blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4ca2b8a5a951cd95edbffbe7f9abcd534e2a26a7 | 3ab617b5ef9cd9ff1c2842be763bf1d10c6649b2 | /app1.R | 73d8c3749282eb9f49594bce5bc677d1690c44ed | [] | no_license | ganeshshinde1986/R-Basics | c1e712f14f36e5ff4a547dc53debff2aa9759e8d | e452d6cd90c108eebf25751153b93e7d87bb9b7c | refs/heads/master | 2021-01-22T01:51:29.084989 | 2019-01-24T19:27:20 | 2019-01-24T19:27:20 | 81,014,806 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,178 | r | app1.R | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
source("helper.R")
source("global.R")
#Loading all required package using helper function check.packages.
packages.needed <- c("shiny","shinydashboard","httr","jsonlite","xlsx","dplyr",
"DT","Hmisc","rsconnect","RMariaDB","plotly")
check.packages(packages.needed)
ui <- dashboardPage( skin = "blue",
# Application title
dashboardHeader(title = "Lead Management"),
dashboardSidebar(
sidebarMenu(
menuItem("Dashboard", tabName = "dashboard", icon = icon("dashboard")),
menuItem("Lead Allocation", tabName = "ldAllocate", icon = icon("address-card")),
menuItem("Upload File", tabName = "ldUploadFile", icon = icon("upload"))
)),
dashboardBody(
tabItems(
# First tab content
tabItem(tabName = "dashboard",
fluidRow(
# A static infoBox
valueBoxOutput("TotalLeads"),
# Dynamic infoBoxes
valueBoxOutput("ConvertedLeads"),
valueBoxOutput("OpenLeads")
),#fluid row ends here
fluidRow(
box(plotOutput("histogram")),
box(
fluidRow(
valueBoxOutput("HotLeads")),
fluidRow(
valueBoxOutput("WarmLeads")),
fluidRow(
valueBoxOutput("ColdLeads"))
)
)
),
tabItem(tabName = "ldAllocate",
fluidRow(
column(10),
column(2,
submitButton("Allocate Leads"))
),
br(),
hr(),
DT::dataTableOutput("leadData")
),
tabItem(tabName = "ldUploadFile",
fluidRow(
column(4,
fileInput("file","Upload the file"),
h5("Max file size to upload is 50 MB")),
column(4,
selectInput("selInput",label = "Select The Source System",
c("ABC Source System - CSV" = "csvSource","XYZ System - Excel" = "exlSource"))
),
column(4,
br(),submitButton("Add File to Lead Store"))
),
fluidRow(
tabBox()
)
)#fluid row ends here
)# tab item ends here
) # tab items ends here
)# tab items ends here
# dashboardBody ends here
# Define server logic required to draw a histogram
server <- function(input, output,session) {
fetchData <- reactive( {
allLeads = get.data()
if(nrow(allLeads)==0){
return(NULL)}
else{return(allLeads)}
})
valueCnt <- get.data()
assignedLeads <- valueCnt[which(valueCnt$Assigned_Agent_Code < 99999999),]
output$TotalLeads <- renderValueBox({
valueBox(
paste0(nrow(valueCnt)),"Total Leads",icon = icon("list")
)
}
)
output$ConvertedLeads <- renderValueBox({
valueBox(
paste0(nrow(assignedLeads)),"Assigned Leads",icon = icon("thumbs-up")
)})
output$OpenLeads <- renderValueBox({
valueBox(
paste0(nrow(valueCnt)- nrow(assignedLeads)),"Open Leads",icon = icon("tasks")
)})
output$HotLeads <- renderValueBox({
valueBox(
paste0(nrow(valueCnt[valueCnt$lead_score==90,])),"Hot Leads",icon = icon("fire")
)
}
)
output$WarmLeads <- renderValueBox({
valueBox(
paste0(nrow(valueCnt[valueCnt$lead_score==75,])),"Warm Leads",icon = icon("bandcamp")
)})
output$ColdLeads <- renderValueBox({
valueBox(
paste0(nrow(valueCnt[valueCnt$lead_score==60,])),"Cold Leads",icon = icon("cloud")
)})
#valueBoxOutput$OpenLeads
output$histogram <- renderPlot({hist(valueCnt$lead_score, main = "Lead Qulification Distribution",col = "sky blue" ,border = "sky blue",
xlab = "Lead Qualiification Score", ylab = "Frequncy")})
output$leadData <- DT::renderDataTable(fetchData(),
extensions = 'Buttons',
selection = "multiple",
options = list(scrollX = TRUE,dom = 'Blfrtip',buttons = list(buttons =c('csv','excel'),text = 'Download'),lengthMenu = list(c(10,50,100,-1),c(10,50,100,'All'))),
rownames = FALSE,
filter = 'top')
}
# Run the application
shinyApp(ui = ui, server = server)
|
2321bc5ae555bbeb11694eb4dd352f4b74785917 | a97ccb7a9444c2fdc07de379214fced22104a216 | /R/02_ECLIPSE_cleaning.R | ad93517b30bd4b7450c010590f9887b9f3768807 | [
"MIT"
] | permissive | yoffeash/baselineLH | a27347110a2dac7cdd8fc1e804474761be21bce1 | dae5bf8c7819cbf5e3f8ac5efeb9ebc057a6e6a4 | refs/heads/master | 2020-03-27T01:26:15.110290 | 2018-08-22T17:59:25 | 2018-08-22T17:59:25 | 145,710,625 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,634 | r | 02_ECLIPSE_cleaning.R | ### clean baseline local histogram data from ECLIPSE and create
## 1) file with summary whole lung LH values
## 2) file with subtype whole lung LH values
### import ECLIPSE baseline LH dataset ###
ECLIPSE_raw <- read_csv("data/raw_data/ECLIPSE_L1_localHistogram_parenchymaPhenotypes_20180305_wideFormat.csv")
ECLIPSE_pre1 <- clean_names(ECLIPSE_raw)
###################################### file with summary whole lung LH values ######################################
eclipse_LH_summary_whole_pre1 <- ECLIPSE_pre1 %>% select(starts_with("whole")) %>% select(-contains("wild")) %>% select(contains("type_frac"))
eclipse_CID <- ECLIPSE_pre1 %>% select(contains("cid")) %>% mutate(sid=str_sub(cid,start=1L,end=12L))
eclipse_LH_summary_whole_pre2 <- bind_cols(eclipse_LH_summary_whole_pre1,eclipse_CID)
eclipse_LH_summary_whole <- eclipse_LH_summary_whole_pre2 %>%
mutate(percent_normal = whole_lung_normal_parenchyma_type_frac) %>%
mutate(percent_emphysema = whole_lung_centrilobular_emphysema_type_frac + whole_lung_paraseptal_emphysema_type_frac) %>%
mutate(percent_interstitial = whole_lung_reticular_type_frac + whole_lung_subpleural_line_type_frac) %>%
select(cid, sid, percent_normal, percent_emphysema, percent_interstitial)
write_csv(eclipse_LH_summary_whole, "data/clean_data/ECLIPSE_L1_localHistogram_parenchymaPhenotypes_20180305_summary_wholelung.csv")
###################################### file with subtype whole lung LH values ######################################
write_csv(eclipse_LH_summary_whole_pre2, "data/clean_data/ECLIPSE_L1_localHistogram_parenchymaPhenotypes_20180305_subtype_wholelung.csv")
|
ab17d82b940c96a4e9a7d11cfa584d2372324c15 | 9ab25c9161cbb7d3d6f1068fcbc01449ba6baa0b | /RVS0.0.0/man/calc_EG_Var.Rd | 987d4f3b29d7ee5c15e9d3a9847a831171ed89ee | [] | no_license | jiafen/RVS | 5b9d4a42f4684d6589bbbc4167849e509b3c001a | f1c1ba78ec3983dd3f90bc09f1496d75d0f8b7dd | refs/heads/master | 2021-01-10T09:10:58.940526 | 2016-01-27T20:37:48 | 2016-01-27T20:37:48 | 49,983,897 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 796 | rd | calc_EG_Var.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/help_gener_seqdata.R
\name{calc_EG_Var_general}
\alias{calc_EG_Var_general}
\title{Andriy's original cal_EG2}
\usage{
calc_EG_Var_general(M, p, rdv)
}
\arguments{
\item{M:}{genotype likelihoods AA, Aa, aa, matrix sum(rdv) by 3 (double);
#' uses output from \code{calc_pobs_ndepth} function for simulation data and output from \code{getgenexp} or \code{getMAF} for VCF input}
\item{p:}{genotype frequencies AA, Aa, aa (double); output from \code{calc_EM} function.}
\item{rdv:}{read depth (vector of integers) for all samples}
}
\value{
the variance of E(G_ij|D_ij)
}
\description{
Andriy's original cal_EG2
}
\section{Functions}{
\itemize{
\item \code{calc_EG_Var_general}: also, see \code{calc_EG_Var},
}}
|
c3955c41e37ea96cda6cf80d51c20d15cb3611b1 | 9ecae552dd81259e5d086bcd2b2a62808d4906fc | /man/LogrankA.Rd | 61ef9d3e701b1d62821e1edb6d4f98e53e2f4406 | [] | no_license | jschoeley/LogrankA | 190817c61be8b73759073615da3eb4dd19f3dd7f | 8a4cbe8a12be8f6c706201ca831996fa2af0bba2 | refs/heads/master | 2020-04-05T22:50:01.466668 | 2015-03-06T16:43:23 | 2015-03-06T16:43:23 | 31,775,251 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,573 | rd | LogrankA.Rd | \name{LogrankA}
\alias{LogrankA}
\title{
Logrank Test for Aggregated Survival Data
}
\description{
\code{LogrankA} provides a logrank test across unlimited groups with the
possibility to input aggregated survival data.
}
\usage{
LogrankA(surv, group, weight)
}
\arguments{
\item{surv}{
An object of type \code{survival} is expected as input argument \code{surv}.
This object is generated with the function \code{Surv} of the package
\code{survival} and holds information about the survival time and censoring
status of each observation.
}
\item{group}{
Argument \code{group} provides the group affiliation of each observation in
the survival argument.
}
\item{weight}{
The argument \code{weight} is optional. It specifies the number of occurrences
for each value combination in an aggregated dataset. Expected is a
non-negative numeric vector.
}
}
\details{
The \code{group} and \code{weight} arguments must correspond to the entries in
the \code{surv} argument. Therefore the \code{group} and \code{weight} vectors
must be equal in length to the time and status columns in the survival object
of \code{surv}
If the weight argument is not specified it is assumed that the input data is
not aggregated.
More than a single group must be specified.
}
\value{
\item{p.chi2}{P-value of chi-squared test of logrank test statistic.}
\item{df}{Degrees of freedom used for chi-squared test.}
\item{LR}{Value of logrank test statistic.}
\item{lr.parameter}{Number of observations, observed events, expected events,
(O-E)^2/E for each group.}
In addition a short text summary of the logrank test is printed to the console.
}
\references{
Peto, R. et al. (1977). "Design and analysis of randomized clinical trials
requiring prolonged observation of each patient". II. analysis and examples. In:
British journal of cancer 35.1, pp. 1-39.
Ziegler, A., S. Lange, and R. Bender (2007). "Ueberlebenszeitanalyse: Der
Log-Rang-Test". In: Deutsche Medizinische Wochenschrift 132, pp. 39-41.
}
\author{
Jonas Richter-Dumke and Roland Rau
Maintainer: Jonas Richter-Dumke <jrd.r.project@gmail.com>
}
\note{
For an in-depth explanation of \code{LogrankA} please see the package vignette.
}
\seealso{
\code{\link{Surv}}, \code{\link{survdiff}}
}
\examples{
library(survival)
library(MASS)
## data: survival of australian aids patients (individual and aggregated)
aids2.ind <- Aids2 # import australian aids data
aids2.ind$status <- as.numeric(aids2.ind$status) - 1 # recode status to 0/1
stime.days <- aids2.ind$death - aids2.ind$diag # generate survival time in weeks
aids2.ind$stime <- round(stime.days / 7, 0)
aids2.ind$agegr <- cut(aids2.ind$age, # generate age groups
c(0, 20, 40, 60, 100), right = FALSE)
aids2.ind <- aids2.ind[ , c(5, 8, 9)] # keep only important columns
aids2.aggr <- aggregate(aids2.ind$stime, # transform to aggregated data
by = list(aids2.ind$status, aids2.ind$stime,
aids2.ind$agegr),
FUN = length)
colnames(aids2.aggr) <- c("status", "stime", "agegr", "n")
# generate survival objects for individual and aggregated data
surv.ind <- Surv(aids2.ind$stime, aids2.ind$status)
surv.aggr <- Surv(aids2.aggr$stime, aids2.aggr$status)
## logrank test on individual and aggregated data
# logrank on individual data
LogrankA(surv = surv.ind,
group = aids2.ind$agegr)
# logrank on aggregated data
LogrankA(surv = surv.aggr,
group = aids2.aggr$agegr,
weight = aids2.aggr$n)
}
\keyword{ survival }
|
dbb086873b55b96b4dd177bb3ab79b1f38853f01 | b785666dfb9dab6f462f0a16b07187d7bd11886a | /comet_code.r | bcd1a3af411c21c9a5e25bf0b444be29651bd13a | [] | no_license | ktorresSD/Rcode | d3cb3ab530419a91df61a791ab84961e52940989 | b91b123f051cb0e04212d9b7b5d3094cdde3a9c8 | refs/heads/master | 2022-11-07T16:53:50.191708 | 2022-10-28T22:53:48 | 2022-10-28T22:53:48 | 121,809,412 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 140 | r | comet_code.r | library(rgl)
open3d()
comet <- readOBJ(url("http://sci.esa.int/science-e/www/object/doc.cfm?fobjectid=54726"))
shade3d(comet, col="gray") |
749c6e60475033318f89d87f396557fa5d232298 | bca0b07ac982392423dcd7df6ff9f38d65dd81a7 | /code/generating_metadata/RPackage_bibliography.R | 7e1d4017d8e186f053b6e9a9dd043458687f7384 | [] | no_license | rory-spurr/ESAPermitsCapstone | 241221ed031f0af2089c8cd65e1c4c87ec244114 | 558af14aad09bacd181156cc93a69704461cdac9 | refs/heads/main | 2023-04-16T14:39:52.411064 | 2023-03-21T17:09:38 | 2023-03-21T17:09:38 | 510,441,404 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 220 | r | RPackage_bibliography.R |
# Function writes a BibTex file which can then be thrown into
# a citation manager to quickly cite packages used.
write_bib(
x = .packages(),
file = "code/creating_metadata/packages.bib",
tweak = T,
width = 60
) |
906471bf62053b50f83bb63fb235dcd9d788f9d9 | 1bcd87514ea143f57f5f4b338ad50f2a8d148134 | /R/llnhlogit.R | 4f23a06539987716eea5cceecbda95ed2b91bbb1 | [] | no_license | cran/bayesm | b491d7f87740082488c8695293f3565b2929f984 | 8a7211ff5287c42d5bc5cc60406351d97f030bcf | refs/heads/master | 2022-12-10T10:51:14.191052 | 2022-12-02T09:10:02 | 2022-12-02T09:10:02 | 17,694,644 | 19 | 15 | null | null | null | null | UTF-8 | R | false | false | 1,119 | r | llnhlogit.R | llnhlogit=function(theta,choice,lnprices,Xexpend)
{
# function to evaluate non-homothetic logit likelihood
# choice is a n x 1 vector with indicator of choice (1,...,m)
# lnprices is n x m array of log-prices faced
# Xexpend is n x d array of variables predicting expenditure
#
# non-homothetic model specifies ln(psi_i(u))= alpha_i - exp(k_i)u
#
# structure of theta vector:
# alpha (m x 1)
# k (m x 1)
# gamma (k x 1) expenditure function coefficients
# tau scaling of v
#
m=ncol(lnprices)
n=length(choice)
d=ncol(Xexpend)
alpha=theta[1:m]
k=theta[(m+1):(2*m)]
gamma=theta[(2*m+1):(2*m+d)]
tau=theta[length(theta)]
iotam=c(rep(1,m))
c1=as.vector(Xexpend%*%gamma)%x%iotam-as.vector(t(lnprices))+alpha
c2=c(rep(exp(k),n))
u=callroot(c1,c2,.0000001,20)
v=alpha - u*exp(k)-as.vector(t(lnprices))
vmat=matrix(v,ncol=m,byrow=TRUE)
vmat=tau*vmat
ind=seq(1,n)
vchosen=vmat[cbind(ind,choice)]
lnprob=vchosen-log((exp(vmat))%*%iotam)
return(sum(lnprob))
}
|
7b296e10bf73aa45098bc915f3dd53562d9114fd | eb95ca11c50c8ac556fd5f54fde9878b99b196a5 | /Figure_script/Motif_pvalue_scatter.r | 690bb7329dd7722d60f78ee664eb56507c8d15e3 | [] | no_license | adamtongji/Enhancer_pred_supple | 3b6df2dd2297633fc8d2024a8bc571c20b934cdd | df2c2cf59cd5a48d43c2428d4121f348dc4c5169 | refs/heads/master | 2021-05-09T11:15:59.211481 | 2018-08-20T07:03:44 | 2018-08-20T07:03:44 | 118,986,263 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,317 | r | Motif_pvalue_scatter.r | library(ggplot2)
boxcol<-function(n){
colall<-c('#1a9850','#984ea3','#fc8d59')
return(colall[c(1:n)])
}
mytemp<-theme(panel.background=element_rect(fill="white",colour=NA), panel.grid.major =element_blank(), panel.grid.minor = element_blank(),axis.line = element_line(colour = "black",size=0.6),axis.text.x = element_text(face = "bold"),axis.text.y =element_text(face = "bold"),
axis.title.y=element_text(face="bold"),axis.title.x=element_text(face="bold"),legend.text=element_text(face="bold"),legend.title=element_text(face="bold"))
mytab<-read.table("./summary.txt",sep='\t')
colnames(mytab)<-c("Motif","Seq","Pval","Count","Soft","Tissue")
for (tis in c("cranioface","limb","neural_tube","hindbrain","midbrain","forebrain","heart")){
for (soft in c("HOMER","Hotspot2","DFilter")){
sub1<-subset(mytab, Soft==soft & Tissue==tis)
if (nrow(sub1)>0){
sub2<-subset(mytab, Soft==paste(soft,"_weighted",sep='') & Tissue==tis)
subp<-merge(sub1,sub2,by=c("Tissue","Motif","Seq"))
subplots<-subp[,c(1,4,5,7,8)]
colnames(subplots)<-c("Tissue","RawP","RawC","WeightP","WeightC")
subplots$RawP[subplots$RawP>100] <- 100
subplots$WeightP[subplots$WeightP>100] <- 100
subplots2<-subset(subplots,subplots$WeightP>2 | subplots$RawP>2)
p<-ggplot(subplots2)+geom_point(aes(x=RawP,y=WeightP))+mytemp+xlab(paste(soft," origin peak -log10 pvalue",sep=''))+ylab(paste(soft," differential signal peak -log10 pvalue",sep=''))+geom_abline()+xlim(c(0,100))+ylim(c(0,100))
mytext <- paste("italic(p)"," == ", format(t.test(subplots2$WeightP,subplots2$RawP,paried=T)$p.value, scientific=T, digits = 3),sep='')
q<-p+annotate(geom="text",label=mytext,x = 50,y=30,fontface="bold",size=4.5,parse=T)
print(paste(soft,tis))
print(t.test(subplots2$WeightP,subplots2$RawP,paried=T)$p.value)
#ggsave(q,filename = paste("./figures/pval/",soft,"_",tis,"_Pval.pdf",sep=''), useDingbats=FALSE, height = 5,width=6)
# p<-ggplot(subplots2)+geom_point(aes(x=RawC,y=WeightC))+mytemp+xlab(paste("Number of ",soft," origin peak TF bings",sep=''))+ylab(paste("Number of ",soft," differential signal peak TF bindings",sep=''))+geom_abline()
#ggsave(p,filename = paste("./figures/count/",soft,"_",tis,"_Count.pdf",sep = ""))
}
}
}
|
d124fe2d6f01e5b3188dc552c47a138dca71dd0c | 674f7384bfc540b0e0df2736d284b44edbbe7338 | /01-scraper.R | 0e1df198267969a3371d4fd54a0f04e7119e7aeb | [] | no_license | acastroaraujo/Cuentas-Claras | 689b5de6f0a29ea808ef25766ba1d64f0913f369 | 3a8949363348f1fd189c5e7995e8430c4a9a8429 | refs/heads/master | 2023-02-19T17:57:43.676493 | 2021-01-19T16:45:34 | 2021-01-19T16:45:34 | 211,405,185 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,323 | r | 01-scraper.R |
library(tidyverse)
library(rvest)
library(readxl)
cc_buscador_territorial <- function(id, year = c("2015", "2019")) {
year <- match.arg(year)
url <- paste0(
url <- "https://www5.registraduria.gov.co/CuentasClarasPublicoTer",
year,
"/Consultas/Candidato/Reporte/",
id
)
message(url)
obj <- httr::RETRY("GET", url)
stopifnot(httr::status_code(obj) == 200)
website <- httr::content(obj)
info <- website %>%
rvest::html_nodes("#form #centro .fuente") %>%
rvest::html_text()
href <- website %>%
rvest::html_nodes(".enlacexls") %>%
rvest::html_attr("href") %>%
paste0("https://www5.registraduria.gov.co", .)
formulario <- website %>%
rvest::html_nodes(".rounded-cornerform strong span") %>%
rvest::html_text()
tibble::tibble(nombre = info[[1]], corporacion = info[[2]], year, formulario, href)
}
cc_buscador_legislativo <- function(id, year = c("2014", "2018")) {
year <- match.arg(year)
url <- paste0(
url <- "https://www5.registraduria.gov.co/CuentasClarasPublicoCon",
year,
"/Consultas/Candidato/Reporte/",
id
)
message(url)
obj <- httr::RETRY("GET", url)
stopifnot(httr::status_code(obj) == 200)
website <- httr::content(obj)
info <- website %>%
rvest::html_nodes("#form #centro .fuente") %>%
rvest::html_text()
href <- website %>%
rvest::html_nodes(".enlacexls") %>%
rvest::html_attr("href") %>%
paste0("https://www5.registraduria.gov.co", .)
formulario <- website %>%
rvest::html_nodes(".rounded-cornerform strong span") %>%
rvest::html_text()
tibble::tibble(nombre = info[[1]], corporacion = info[[2]], year, formulario, href)
}
download_excel <- function(href) {
temp <- tempfile()
download.file(href, temp, quiet = TRUE)
return(temp)
}
# Formulario 5.1B ---------------------------------------------------------
ingresos_familia <- function(x) {
suppressMessages({
data <- read_excel(x, skip = 11)
candidato <- read_excel(x, range = "F10", col_names = "") %>% pull()
})
output <- data %>%
rename(de = matches("Nombre de"), de_id = matches("Cédula"), valor = Valor, parentesco = Parentesco) %>%
mutate(para = candidato, formulario = "5.1B") %>%
select(de, para, valor, formulario, parentesco, de_id) %>%
drop_na(de) %>%
filter(de != "TOTAL")
if (is.logical(output$de_id)) {
output$de_id <- as.character(output$de_id)
}
if (nrow(output) == 0) {
message(candidato, ": formulario 5.1B", " vacío!")
output <- mutate_if(output, is.logical, as.character) %>%
mutate(valor = numeric())
}
if (class(output$valor) == "character") stop("Hay algo raro en el formulario", call. = FALSE)
return(output)
}
# Formulario 5.2B ---------------------------------------------------------
ingresos_particulares <- function(x) {
suppressMessages({
data <- read_excel(x, skip = 11)
candidato <- read_excel(x, range = "F10", col_names = "") %>% pull()
})
output <- data %>%
rename(de = matches("Nombre de"), valor = Valor, de_id = matches("Cédula")) %>%
mutate(para = candidato, parentesco = "otro", formulario = "5.2B") %>%
select(de, para, valor, formulario, parentesco, de_id) %>%
drop_na(de) %>%
filter(de != "TOTAL")
if (is.logical(output$de_id)) {
output$de_id <- as.character(output$de_id)
}
if (nrow(output) == 0) {
message(candidato, ": formulario 5.2B", " vacío!")
output <- mutate_if(output, is.logical, as.character) %>%
mutate(valor = numeric())
}
if (class(output$valor) == "character") stop("Hay algo raro en el formulario", call. = FALSE)
return(output)
}
descargar <- function(datos_buscador) {
if (any(class(datos_buscador) == "try-error")) stop("Los meta-datos no existen", call. = FALSE)
out1 <- download_excel(datos_buscador$href[[2]]) %>%
ingresos_familia()
out1$href <- datos_buscador$href[[2]]
out2 <- download_excel(datos_buscador$href[[3]]) %>%
ingresos_particulares()
out2$href <- datos_buscador$href[[3]]
el <- bind_rows(out1, out2)
el$year <- unique(datos_buscador$year)
el$corporacion <- unique(datos_buscador$corporacion)
return(el) # edge list
} |
7d996210a848fcc610823c4a7fd0f32c1389714c | f398cdb4bebfb081669f75ee50756d9b35722e35 | /R/objects3d.R | a487321e9dea84bd5cb1ddf718abeec785342cc6 | [] | no_license | Tomomahoney/pca3d | e9c9dd1ebcc669da2af5aac4ebd12394377cf873 | f28f049dd7f97c52cb8dbcb6017d0a7cf5249f41 | refs/heads/master | 2023-05-26T09:06:24.928751 | 2020-10-02T14:10:06 | 2020-10-02T14:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,379 | r | objects3d.R | # draw a series of tetrahedrons
tetrahedrons3d <- function( coords, radius= c( 1, 1, 1 ), col= "grey", ... ) {
coords.n <- NULL
r <- 2 * radius / 3
for( i in 1:nrow( coords ) ) {
shade3d(translate3d(
scale3d(
rotate3d(tetrahedron3d(col=col, ...), 2, 0, 1, 1),
r[1], r[2], r[3]),
coords[i,1], coords[i,2], coords[i,3]))
}
# p <- coords[ r, ]
#
# # ABC
# coords.n <- rbind( coords.n, p + c( -radius[1], 0, -radius[3]/sqrt(2) ) ) # A
# coords.n <- rbind( coords.n, p + c( radius[1], 0, -radius[3]/sqrt(2) ) ) # B
# coords.n <- rbind( coords.n, p + c( 0, radius[2], radius[3]/sqrt(2) ) ) # C
#
# # ABD
# coords.n <- rbind( coords.n, p + c( -radius[1], 0, -radius[3]/sqrt(2) ) ) # A
# coords.n <- rbind( coords.n, p + c( radius[1], 0, -radius[3]/sqrt(2) ) ) # B
# coords.n <- rbind( coords.n, p + c( 0, -radius[2], radius[3]/sqrt(2) ) ) # D
#
# # ACD
# coords.n <- rbind( coords.n, p + c( -radius[1], 0, -radius[3]/sqrt(2) ) ) # A
# coords.n <- rbind( coords.n, p + c( 0, radius[2], radius[3]/sqrt(2) ) ) # C
# coords.n <- rbind( coords.n, p + c( 0, -radius[2], radius[3]/sqrt(2) ) ) # D
#
# # BCD
# coords.n <- rbind( coords.n, p + c( radius[1], 0, -radius[3]/sqrt(2) ) ) # B
# coords.n <- rbind( coords.n, p + c( 0, radius[2], radius[3]/sqrt(2) ) ) # C
# coords.n <- rbind( coords.n, p + c( 0, -radius[2], radius[3]/sqrt(2) ) ) # D
# }
#
# triangles3d( coords.n, col= col, ... )
#
}
## construct octahedrons
octahedrons3d <- function( coords, radius= c( 1, 1, 1), col= "grey", ... ) {
coords.n <- NULL
r <- radius
for( i in 1:nrow( coords ) ) {
shade3d(translate3d(
scale3d(
octahedron3d(col=col, ...),
r[1], r[2], r[3]),
coords[i,1], coords[i,2], coords[i,3]))
}
}
## construct cubes
cubes3d <- function( coords, radius= c( 1, 1, 1), col= "grey", ... ) {
coords.n <- NULL
r <- 2 * radius / 3
for( i in 1:nrow( coords ) ) {
shade3d(translate3d(
scale3d(
cube3d(col=col, ...),
r[1], r[2], r[3]),
coords[i,1], coords[i,2], coords[i,3]))
}
}
# return the basic cone mesh
# scale is necessary because of the dependence on the aspect ratio
.getcone <- function( r, h, scale= NULL ) {
n <- length( .sin.t )
xv <- r * .sin.t
yv <- rep( 0, n )
zv <- r * .cos.t
if( missing( scale ) ) scale <- rep( 1, 3 )
scale <- 1 / scale
sx <- scale[1]
sy <- scale[2]
sz <- scale[3]
tmp <- NULL
for( i in 1:(n-1) ) {
tmp <- rbind( tmp,
c( 0, 0, 0 ),
scale3d( c( xv[i], yv[i], zv[i] ), sx, sy, sz ),
scale3d( c( xv[i+1], yv[i+1], zv[i+1] ), sx, sy, sz ) )
}
for( i in 1:(n-1) ) {
tmp <- rbind( tmp,
c( 0, h, 0 ),
scale3d( c( xv[i], yv[i], zv[i] ), sx, sy, sz ),
scale3d( c( xv[i+1], yv[i+1], zv[i+1] ), sx, sy, sz ) )
}
tmp
}
# vector cross product
.cross3 <- function(a,b) {
c(a[2]*b[3]-a[3]*b[2], -a[1]*b[3]+a[3]*b[1], a[1]*b[2]-a[2]*b[1])
}
# draw a cone (e.g. tip of an arrow)
cone3d <- function( base, tip, radius= 10, col= "grey", scale= NULL, ... ) {
start <- rep( 0, 3 )
if( missing( scale ) ) scale= rep( 1, 0 )
else scale <- max( scale ) / scale
tip <- as.vector( tip ) * scale
base <- as.vector( base ) * scale
v1 <- tip
v2 <- c( 0, 100, 0 )
o <- .cross3( v1, v2 )
theta <- acos( sum( v1 * v2 ) / ( sqrt(sum( v1 * v1 )) * sqrt(sum( v2 * v2 )) ) )
vl <- sqrt( sum( tip^2 ) )
tmp <- .getcone( radius, vl )
tmp <- translate3d( rotate3d( tmp, theta, o[1], o[2], o[3] ), base[1], base[2], base[3] )
scale <- 1 / scale
tmp <- t( apply( tmp, 1, function( x ) x * scale ) )
triangles3d( tmp, col= col, ... )
}
arrows3d <- function( coords, headlength= 0.035, head= "end", scale= NULL, radius = NULL, ... ) {
head <- match.arg( head, c( "start", "end", "both" ) )
narr <- nrow( coords ) / 2
n <- nrow( coords )
starts <- coords[ seq( 1, n, by= 2 ), ]
ends <- coords[ seq( 2, n, by= 2 ), ]
if( missing( radius ) ) radius <- ( max( coords ) - min( coords ) ) / 50
segments3d( coords, ... )
if( head == "end" | head == "both" ) {
for( i in 1:narr ) {
s <- starts[i,]
e <- ends[i,]
base <- e - ( e - s ) * headlength
tip <- ( e - s ) * headlength
cone3d( base, tip, radius= radius, scale= scale, ... )
}
}
}
|
526fe33a8d9128a310d155ab3449e34260d703c5 | 8b50fb5214b066784b1c39072b98ae1752b43bb5 | /1-read_countstats.R | 13ba9f4d296f512878331f5171e5ae57481bfa32 | [] | no_license | hbenbow/tafrog | b50abd879806a72fa88dfc7513f1d9fd4a523dd6 | bbf1a7a8d1b8c91394178721b138fd757e2f4c1f | refs/heads/main | 2023-02-10T07:30:39.826198 | 2021-01-05T09:48:10 | 2021-01-05T09:48:10 | 309,426,679 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,759 | r | 1-read_countstats.R | library(WGCNA)
library(tximport)
library(DESeq2)
library(ggplot2)
library(dplyr)
library(tidyr)
library(plyr)
library(stringr)
library(gplots)
library(tidyr)
library(Hmisc)
library(corrplot)
allowWGCNAThreads()
setwd("~/Documents/FROG/")
# ==================================================================================
# if already have a txi object, load it with the metadata (colData)
colData<-read.csv("~/Documents/FROG/metadata.csv")
load("~/Documents/FROG/txi.RData")
colData$Timepoint<-as.factor(colData$Timepoint)
# check that order of samples in metadata and txi object are the same
# ==================================================================================
# read count stats chunk starts here
expressed_genes<-txi.kallisto.tsv$abundance
expressed_genes<-as.data.frame(expressed_genes)
expressed_genes$GeneID<-row.names(expressed_genes)
expressed_genes<- expressed_genes[- grep("LC", expressed_genes$GeneID),]
expressed_genes<-expressed_genes[,c(19, 1:18)]
expressed_genes_long<-expressed_genes %>% gather(Sample, TPM, 2:19)
all_wheat_genes<-merge(expressed_genes_long, colData, by="Sample")
sub<-all_wheat_genes[,c(8, 2, 3, 4)]
rep_wise<-spread(sub, key = Rep, value=TPM)
rep_wise$Sum<-rep_wise$`1` + rep_wise$`2` + rep_wise$`3`
rep_wise$test1<-ifelse(rep_wise$`1`>=0.5, 1,0)
rep_wise$test2<-ifelse(rep_wise$`2`>=0.5, 1,0)
rep_wise$test3<-ifelse(rep_wise$`3`>=0.5, 1,0)
rep_wise$Sum<-rep_wise$test1 + rep_wise$test2 + rep_wise$test3
expressed<-rep_wise[(rep_wise$Sum >=2),]
for(i in unique(expressed$Factor)){
data<-expressed[(expressed$Factor==i),]
factor<-paste(i)
write.csv(data, file=paste("~/Documents/FROG/", factor, ".csv", sep=""))
assign(factor, data)
}
# N1O vs N1W comparison is called "N1"
# M2O vs M2W comparison is called "M2"
# F2O vs F2W comparison is called "F2"
# F2O vs M2O comparison is called "FO"
# F2W vs M2W comparison is called "W2"
N1<-rbind(N1O,N1W)
M2<-rbind(M2O,M2W)
F2<-rbind(F2O,F2W)
FO<-rbind(F2O,M2O)
W2<-rbind(F2W,M2W)
N1<-N1[!(duplicated(N1$GeneID)),]
M2<-M2[!(duplicated(M2$GeneID)),]
F2<-F2[!(duplicated(F2$GeneID)),]
FO<-FO[!(duplicated(FO$GeneID)),]
W2<-W2[!(duplicated(W2$GeneID)),]
N1$Comparison<-"N1"
M2$Comparison<-"M2"
F2$Comparison<-"F2"
FO$Comparison<-"FO"
W2$Comparison<-"W2"
N1<-N1[,c(2, 10)]
M2<-M2[,c(2, 10)]
F2<-F2[,c(2, 10)]
FO<-FO[,c(2, 10)]
W2<-W2[,c(2, 10)]
all_filtered_lists<-rbind(N1,
M2,
F2,
FO,
W2)
write.csv(all_filtered_lists, file="~/Documents/FROG/all_lists_filtered.csv", row.names = F)
write.csv(expressed_genes, file="~/Documents/FROG/all_gene_counts.csv")
write.csv(tpm, file="~/Documents/FROG/all_gene_tpm.csv", row.names=T)
# check correlation of reps
cor<-as.matrix(rep_wise[,c(3,4,5)])
cor<-rcorr(cor)
corrplot(cor$r, type="lower", order="original",p.mat = cor$P,
sig.level = 0.05, insig = "blank", tl.col="black", tl.cex = 2,
tl.srt = 0, tl.offset = 1, method="color", addCoef.col = "white")
# colData is metadata with factor column. Make dds object with deseq
dds <- DESeqDataSetFromTximport(txi.kallisto.tsv, colData, ~ Treatment + Genotype)
# transform using variance stabilising transformation
vsd <- varianceStabilizingTransformation(dds, blind=FALSE)
# generate PC1 and PC2 for visualisation
pcaData <- plotPCA(vsd, intgroup=c("Treatment", "Genotype"), returnData=TRUE)
# plot PC1 vs PC2
ggplot(pcaData, aes(x=PC1, y=PC2)) + geom_point(aes(colour=Genotype, shape=Treatment), size=4, alpha=0.7) +
theme_classic() +
theme(text = element_text(size=20, colour="black"),
axis.text.x = element_text(colour="black"))
write.csv(pcaData, file="~/Documents/FROG/pcadata.csv")
vst_counts<-assay(vsd)
|
4823d94050b2b36786d80f0e360ff1481ecd8f30 | 0893501b88126c3cd818b0ef0c2727b755d673ab | /getData.R | f11fed717a3ce332b668175acc6e953dd9dda75b | [] | no_license | TheScientistBr/geoMap | c2bcf4459bf78efc686d0fe49b7dfb1c429ee131 | b9ad51dc9e750b988db2145cf0734041c3b10ff7 | refs/heads/master | 2020-03-19T12:44:41.462995 | 2018-06-11T21:23:06 | 2018-06-11T21:23:06 | 136,537,420 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,254 | r | getData.R | library("readxl")
library("placement")
library("devtools")
library("RCurl")
df <- read_excel("endereco.xlsx",sheet = 1)
endereco <- "Av. Cesar Hilal, 700 Vitoria ES CEP: 29.052-232 Brasil"
ll <- geocode_url(address = endereco, clean = T,auth="standard_api", privkey="AIzaSyBY20H089DMPHABXcPhty3HFGgsXmrVQw0",messages = T)
print(ll[ , 1:5])
url <- function(address) {
privKey <- "&key=AIzaSyBY20H089DMPHABXcPhty3HFGgsXmrVQw0"
root <- "https://maps.googleapis.com/maps/api/geocode/json?address="
u <- paste(root, address,privKey)
return(URLencode(u))
}
geoCode <- function(address,verbose=FALSE) {
if(verbose) cat(address,"\n")
u <- url(address)
doc <- getURL(u)
x <- fromJSON(doc,simplify = FALSE)
if(x$status=="OK") {
lat <- x$results[[1]]$geometry$location$lat
lng <- x$results[[1]]$geometry$location$lng
location_type <- x$results[[1]]$geometry$location_type
formatted_address <- x$results[[1]]$formatted_address
return(c(lat, lng, location_type, formatted_address))
} else {
return(paste(x$status,address))
}
}
geoCode(endereco)
url(endereco)
getURL("https://maps.googleapis.com/maps/api/geocode/json?address=%20Rua%20Joaquim%20Lirio,%2096,%20Brasil%20&key=AIzaSyDP0mn4Ja3N0cU2gGJUTtxKgPFZrxOeRw4")
|
94e5cb0bbf4aa2013b9dcd12c185bc7a1a7435ff | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Ramble/examples/grapes-thentree-grapes.Rd.R | 0ca53e9c6471b15e2febc08b6c356d59d9cce365 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 264 | r | grapes-thentree-grapes.Rd.R | library(Ramble)
### Name: %thentree%
### Title: '%thentree%' is the infix operator for the then combinator, and
### it is the preferred way to use the 'thentree' operator.
### Aliases: %thentree%
### ** Examples
(item() %thentree% succeed("123")) ("abc")
|
0b4d4aad058facf3a7b935d8aae071dfd3c0c7dd | 29585dff702209dd446c0ab52ceea046c58e384e | /survJamda/R/eval.merge.simulate.R | 32e837939c937c299ab9b9f78dd56ccc299b10cb | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 242 | r | eval.merge.simulate.R | eval.merge.simulate <-
function(d1,d2,tot.genes, gene.nb, zscore)
{
mat = rbind(d1$ds1,d2$ds1)
cat("\nMerged data set\n")
iter.crossval(mat, c(d1$T,d2$T), c(d1$censor,d2$censor),ngroup = 10,zscore =1,gn.nb =gene.nb,gn.nb.display = 0)
}
|
1747dd57c7fcddea1c171ba97bc2a53dcb07f746 | 17073e3fcc303d51ac3254273ab104a0fe241e18 | /.Rproj.user/0AF359E7/sources/s-A38B8BE3/E75B2FEF-contents | 4bdcf3f69dfb2820e2de606d3be1f30d2db34fd9 | [] | no_license | jacobostergaard/bootha | 6a4054730c418fd7fcfabe9859b552c817ed6c71 | 8f3ac5b43203be68775302347094e7792a98d625 | refs/heads/main | 2023-02-16T22:17:39.589968 | 2021-01-15T11:22:54 | 2021-01-15T11:22:54 | 329,871,491 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 828 | E75B2FEF-contents |
cluster <- function(M){
tmp = diag(M) # extract diagonal, unconnected processes should have approx. zero
p = length(tmp)
# find outliers, i.e. entries approx zero using "leave-one-out" estimation of the mean, analogous to the dfbetas measure
m = numeric(length(tmp))
for(i in 1:length(tmp)){
m[i] = mean(tmp)-mean(tmp[-i])
}
r.idx = which(m > 3*sd(m) | m < -3*sd(m))
# r.idx = which(tmp > mean(tmp)+2*sd(tmp) | tmp < mean(tmp)-2*sd(tmp) )
G = graph.adjacency(abs(M[-r.idx,-r.idx]), mode="undirected", weighted=TRUE)
cluster = igraph::fastgreedy.community(G)
grps = data.frame( idx = c( (1:p)[-r.idx], r.idx ), cluster= c(cluster$membership, max(cluster$membership)+1:length(r.idx)) )
grps = grps[order(grps$cluster, grps$idx),]
return(list(grps=grps, mod=cluster$modularity))
}
| |
16f7db4ebb7b0376c4f9592bb26c8f52ee2baae1 | 6b4ef35c61782b88e392126be8fc80c4b9e2ad29 | /man/getCapability.Rd | af8f82cf309cedd339e5a0e1c43efcf543695429 | [
"MIT"
] | permissive | mattia6690/CubeR | f2a74d335cbdfdf8ee6fb1b2f87e83caebf518fe | 07d311db513631f5a00453e8d05deabd277674ce | refs/heads/master | 2020-06-04T00:40:27.674952 | 2019-06-21T12:30:30 | 2019-06-21T12:30:30 | 191,798,466 | 3 | 1 | null | null | null | null | UTF-8 | R | false | true | 397 | rd | getCapability.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coverage_metadata.R
\name{getCapability}
\alias{getCapability}
\title{Returns the Capabilities}
\usage{
getCapability(url = NULL)
}
\arguments{
\item{url}{This central URL Leads to the 'ows' page of the Datacube}
}
\description{
This function Returns the Capabilities of a DataCube containing all coverages available
}
|
8f0533b41b7468451da265d43273c65474e7f2f9 | f5224269ceced4aaeb094a2a16096794c9ce2761 | /SARS-CoV-2/scripts/2_zscores.R | 31d60c2dec66772b8d3ebeb8dea92f6d47531a9d | [
"MIT"
] | permissive | jilimcaoco/MPProjects | 2842e7c3c358aa1c4a5d3f0a734bb51046016058 | 5b930ce2fdf5def49444f1953457745af964efe9 | refs/heads/main | 2023-06-15T04:00:46.546689 | 2021-06-29T02:57:46 | 2021-06-29T02:57:46 | 376,943,636 | 0 | 0 | MIT | 2021-06-29T02:57:47 | 2021-06-14T20:08:32 | null | UTF-8 | R | false | false | 11,538 | r | 2_zscores.R | library(plyr)
library(tidyverse)
library(fuzzyjoin)
library(ggplot2)
library(readxl)
library(MPStats)
Zprime <- function(positives, negatives, na.rm=TRUE) {
1 - 3 * (sd(positives, na.rm=na.rm) + sd(negatives, na.rm=na.rm))/abs(mean(positives, na.rm=na.rm) - mean(negatives, na.rm=na.rm))
}
feature_columns <- readr::read_tsv("raw_data/cell_feature_columns.tsv")
image_scores_20XX <- arrow::read_parquet("product/image_scores_20XX.parquet")
meta_well_scores <- image_scores_20XX %>%
dplyr::distinct(
master_plate_id,
Compound,
dose_nM)
plate_id <- "2006A"
cell_features <- arrow::read_parquet(
file=paste0("product/SARS_", plate_id, "_Cell_MasterDataTable.parquet"),
col_select=c("plate_id", "Condition", "Image_Metadata_WellID", feature_columns$feature)) %>%
dplyr::filter(Condition %in% c("PC", "NC")) %>%
dplyr::mutate(
infectivity_score =
-5.064328 +
Cells_Intensity_IntegratedIntensityEdge_Virus * 1.487025e-01 +
Cells_Intensity_MeanIntensityEdge_Virus * -3.840196e+01 +
Cells_Intensity_MaxIntensityEdge_Virus * 4.270269e+01 +
Cells_Intensity_MaxIntensity_Virus * 4.254849e+01)
feature_columns <- feature_columns %>%
dplyr::bind_rows(
data.frame(feature="infectivity_score", transform="identity"))
Zprime <- function(positives, negatives) {
1 - 3 * (sd(positives) + sd(negatives))/abs(mean(positives) - mean(negatives))
}
zprime_scores <- plyr::ldply(feature_columns$feature, function(feature_id){
positives <- cell_features %>%
dplyr::filter(Condition == "PC") %>%
magrittr::extract2(feature_id)
negatives <- cell_features %>%
dplyr::filter(Condition == "NC") %>%
magrittr::extract2(feature_id)
data.frame(
feature_id=feature_id,
Zprime=Zprime(positives, negatives))
})
feature_id Zprime
1 Nuclei_Intensity_MinIntensity_CMO -3.344305
2 Nuclei_Intensity_MinIntensityEdge_CMO -3.402473
3 Nuclei_Intensity_MinIntensity_Lipids -3.477715
4 Nuclei_Intensity_MinIntensityEdge_Lipids -3.592840
5 Nuclei_Intensity_LowerQuartileIntensity_Lipids -3.878905
6 Nuclei_Intensity_MeanIntensityEdge_CMO -4.005551
7 Nuclei_Intensity_LowerQuartileIntensity_CMO -4.141382
8 Nuclei_Intensity_MedianIntensity_Lipids -4.240294
9 Nuclei_Intensity_MeanIntensity_Lipids -4.310019
10 Cells_Intensity_MinIntensity_Virus -4.343327
11 Cytoplasm_Intensity_MeanIntensityEdge_CMO -4.368224
12 Nuclei_Intensity_MeanIntensityEdge_Lipids -4.432672
13 Nuclei_Intensity_MeanIntensity_CMO -4.456870
14 Nuclei_Intensity_MedianIntensity_CMO -4.574998
15 Cytoplasm_Intensity_MeanIntensity_CMO -4.669878
16 Cytoplasm_Intensity_MinIntensityEdge_Virus -4.717068
17 Cytoplasm_Intensity_MinIntensity_Virus -4.738681
...
infectivity_score -6.718468
## averaging to the well level first
zprime_scores_well_mean <- plyr::ldply(feature_columns$feature, function(feature_id){
positives <- cell_features %>%
dplyr::filter(Condition == "PC") %>%
dplyr::group_by(Image_Metadata_WellID) %>%
dplyr::summarize(mean_feature_value=mean(!!sym(feature_id))) %>%
magrittr::extract2("mean_feature_value")
negatives <- cell_features %>%
dplyr::filter(Condition == "NC") %>%
dplyr::group_by(Image_Metadata_WellID) %>%
dplyr::summarize(mean_feature_value=mean(!!sym(feature_id))) %>%
magrittr::extract2("mean_feature_value")
data.frame(
feature_id=feature_id,
Zprime=Zprime(positives, negatives))
})
# feature_id Zprime
# 1 Cells_Intensity_LowerQuartileIntensity_CMO -0.1013488
# 2 Cells_Intensity_MedianIntensity_CMO -0.1127203
# 3 Cytoplasm_Intensity_MinIntensity_CMO -0.1537897
# 4 Cells_Intensity_MinIntensity_CMO -0.1538760
# 5 Cytoplasm_Intensity_MedianIntensity_CMO -0.1548385
# 6 Cells_Intensity_MeanIntensityEdge_Lipids -0.1548388
# 7 Cells_Intensity_MeanIntensity_CMO -0.1561189
# 8 Cells_Intensity_MeanIntensityEdge_CMO -0.1598360
# 9 Cytoplasm_Intensity_MeanIntensity_CMO -0.1628205
# 10 Cytoplasm_Intensity_MeanIntensity_Lipids -0.1643084
# ...
# 142 infectivity_score -0.9883708
##########################################
## Zprime for RF SCores for 20XX Series ##
##########################################
load("intermediate_data/rf_scores_field_10XX.Rdata")
# straight fraction with viral intensity > .01
zprime_plate_10XX <- rf_scores_field_10XX %>%
plyr::ddply(c("Plate_Name"), function(rf_scores){
Zprime(
positives = rf_scores %>%
dplyr::filter(Compound == "PC") %>%
magrittr::extract2("Image_Classify_Positive_PctObjectsPerBin"),
negatives = rf_scores %>%
dplyr::filter(Compound == "NC") %>%
magrittr::extract2("Image_Classify_Positive_PctObjectsPerBin")) %>%
data.frame(Zprime=.)
})
zprime_plate_10XX %>% dplyr::summarize(mean(Zprime, na.rm=TRUE))
# mean over fields
zprime_plate_10XX <- rf_scores_field_10XX %>%
dplyr::filter(Compound %in% c("PC", "NC")) %>%
dplyr::group_by(Plate_Name, Compound, Well_ID) %>%
dplyr::summarize(infectivity_probpos_well = mean(Image_Classify_Positive_PctObjectsPerBin)) %>%
dplyr::ungroup() %>%
plyr::ddply(c("Plate_Name"), function(rf_scores){
MPStats::Zprime(
positives = rf_scores %>%
dplyr::filter(Compound == "PC") %>%
magrittr::extract2("infectivity_probpos_well"),
negatives = rf_scores %>%
dplyr::filter(Compound == "NC") %>%
magrittr::extract2("infectivity_probpos_well")) %>%
data.frame(Zprime=.)
})
zprime_plate_10XX %>% dplyr::summarize(mean(Zprime, na.rm=TRUE))
# RF stabilized model over frames
zprime_plate_10XX <- rf_scores_field_10XX %>%
plyr::ddply(c("Plate_Name"), function(rf_scores){
MPStats::Zprime(
positives = rf_scores %>%
dplyr::filter(Compound == "PC") %>%
magrittr::extract2("infectivity_probpos_field"),
negatives = rf_scores %>%
dplyr::filter(Compound == "NC") %>%
magrittr::extract2("infectivity_probpos_field")) %>%
data.frame(Zprime=.)
})
zprime_plate_10XX %>% dplyr::summarize(mean(Zprime, na.rm=TRUE))
# mean over fields
zprime_plate_10XX <- rf_scores_field_10XX %>%
dplyr::filter(Compound %in% c("PC", "NC")) %>%
dplyr::group_by(Plate_Name, Compound, Well_ID) %>%
dplyr::summarize(infectivity_probpos_well = mean(infectivity_probpos_field)) %>%
dplyr::ungroup() %>%
plyr::ddply(c("Plate_Name"), function(rf_scores){
MPStats::Zprime(
positives = rf_scores %>%
dplyr::filter(Compound == "PC") %>%
magrittr::extract2("infectivity_probpos_well"),
negatives = rf_scores %>%
dplyr::filter(Compound == "NC") %>%
magrittr::extract2("infectivity_probpos_well")) %>%
data.frame(Zprime=.)
})
zprime_plate_10XX %>% dplyr::summarize(mean(Zprime, na.rm=TRUE))
# median over fields
zprime_plate_10XX <- rf_scores_field_10XX %>%
dplyr::filter(Compound %in% c("PC", "NC")) %>%
dplyr::group_by(Plate_Name, Compound, Well_ID) %>%
dplyr::summarize(infectivity_probpos_well = median(infectivity_probpos_field)) %>%
dplyr::ungroup() %>%
plyr::ddply(c("Plate_Name"), function(rf_scores){
MPStats::Zprime(
positives = rf_scores %>%
dplyr::filter(Compound == "PC") %>%
magrittr::extract2("infectivity_probpos_well"),
negatives = rf_scores %>%
dplyr::filter(Compound == "NC") %>%
magrittr::extract2("infectivity_probpos_well")) %>%
data.frame(Zprime=.)
})
zprime_plate_10XX %>% dplyr::summarize(mean(Zprime, na.rm=TRUE))
# max over fields
zprime_plate_10XX <- rf_scores_field_10XX %>%
dplyr::filter(Compound %in% c("PC", "NC")) %>%
dplyr::group_by(Plate_Name, Compound, Well_ID) %>%
dplyr::summarize(infectivity_probpos_well = max(infectivity_probpos_field)) %>%
dplyr::ungroup() %>%
plyr::ddply(c("Plate_Name"), function(rf_scores){
MPStats::Zprime(
positives = rf_scores %>%
dplyr::filter(Compound == "PC") %>%
magrittr::extract2("infectivity_probpos_well"),
negatives = rf_scores %>%
dplyr::filter(Compound == "NC") %>%
magrittr::extract2("infectivity_probpos_well")) %>%
data.frame(Zprime=.)
})
zprime_plate_10XX %>% dplyr::summarize(mean(Zprime, na.rm=TRUE))
##########################################
## Zprime for RF SCores for 20XX Series ##
##########################################
zprime_plate_20XX <- rf_scores_field_20XX %>%
plyr::ddply(c("Plate_Name"), function(rf_scores){
MPStats::Zprime(
positives = rf_scores %>%
dplyr::filter(Compound == "PC") %>%
magrittr::extract2("infectivity_probpos_field"),
negatives = rf_scores %>%
dplyr::filter(Compound == "NC") %>%
magrittr::extract2("infectivity_probpos_field")) %>%
data.frame(Zprime=.)
})
zprime_plate_20XX %>% dplyr::summarize(mean(Zprime, na.rm=TRUE))
# mean over fields
zprime_plate_20XX <- rf_scores_field_20XX %>%
dplyr::filter(Compound %in% c("PC", "NC")) %>%
dplyr::group_by(Plate_Name, Compound, Well_ID) %>%
dplyr::summarize(infectivity_probpos_well = mean(infectivity_probpos_field)) %>%
dplyr::ungroup() %>%
plyr::ddply(c("Plate_Name"), function(rf_scores){
MPStats::Zprime(
positives = rf_scores %>%
dplyr::filter(Compound == "PC") %>%
magrittr::extract2("infectivity_probpos_well"),
negatives = rf_scores %>%
dplyr::filter(Compound == "NC") %>%
magrittr::extract2("infectivity_probpos_well")) %>%
data.frame(Zprime=.)
})
zprime_plate_20XX %>% dplyr::summarize(mean(Zprime, na.rm=TRUE))
# median over fields
zprime_plate_20XX <- rf_scores_field_20XX %>%
dplyr::filter(Compound %in% c("PC", "NC")) %>%
dplyr::group_by(Plate_Name, Compound, Well_ID) %>%
dplyr::summarize(infectivity_probpos_well = median(infectivity_probpos_field)) %>%
dplyr::ungroup() %>%
plyr::ddply(c("Plate_Name"), function(rf_scores){
MPStats::Zprime(
positives = rf_scores %>%
dplyr::filter(Compound == "PC") %>%
magrittr::extract2("infectivity_probpos_well"),
negatives = rf_scores %>%
dplyr::filter(Compound == "NC") %>%
magrittr::extract2("infectivity_probpos_well")) %>%
data.frame(Zprime=.)
})
zprime_plate_20XX %>% dplyr::summarize(mean(Zprime, na.rm=TRUE))
|
03b574fb7b3c8b5b9a2ea56a1a98d7497393ada2 | 2c38fc71287efd16e70eb69cf44127a5f5604a81 | /R/class_nonexportable.R | 4608965918808b191f459f1660b0df8f04307552 | [
"MIT",
"Apache-2.0"
] | permissive | ropensci/targets | 4ceef4b2a3cf7305972c171227852338dd4f7a09 | a906886874bc891cfb71700397eb9c29a2e1859c | refs/heads/main | 2023-09-04T02:27:37.366455 | 2023-09-01T15:18:21 | 2023-09-01T15:18:21 | 200,093,430 | 612 | 57 | NOASSERTION | 2023-08-28T16:24:07 | 2019-08-01T17:33:25 | R | UTF-8 | R | false | false | 453 | r | class_nonexportable.R | #' @export
store_marshal_value.tar_nonexportable <- function(store, target) {
object <- store_marshal_object(target$store, target$value$object)
target$value <- value_init(object, iteration = target$settings$iteration)
}
#' @export
store_unmarshal_value.tar_nonexportable <- function(store, target) {
object <- store_unmarshal_object(target$store, target$value$object)
target$value <- value_init(object, iteration = target$settings$iteration)
}
|
de56a933dbaf8579d0c84fc81c1010ba49b7a5b4 | 8759bff5309aa4052142433d6fcf1d941e4bcf69 | /myfunction.R | b6347f0c55ee27a46574f5540f563393d004bfeb | [
"MIT"
] | permissive | rstokes/datasciencecoursera | 2a0bc53f61715292c06de54b0ae98d164e5b3ae2 | 8b99142953a26ff87ab11d986dc29779a92c900c | refs/heads/master | 2021-01-21T13:25:18.063433 | 2016-06-04T03:53:00 | 2016-06-04T03:53:00 | 51,267,574 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 142 | r | myfunction.R | myfunction <- function(x){
y <- rnorm(100)
mean(y)
}
second <- function(x){
x + rnorm(length(x))
}
cube <- function(x, n) {
x^3
} |
6c97900ccea184a8b740c1022cea3219acf4907d | 0807e7506199d730a49c909d6c2267cede51d114 | /1.Simple Linear Regression/Sol_calories_consumed.R | 78de395c95d8196b8c4701697e914398ac2a75bd | [] | no_license | barodiaanjali/MachineLearning | c090dbe1bce507d78184afa438d70f4b1673ebb5 | 44b5d43e642aea0100d28652c2f59db35575509e | refs/heads/master | 2021-09-04T11:26:58.294188 | 2018-01-18T08:36:33 | 2018-01-18T08:36:33 | 117,952,305 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 984 | r | Sol_calories_consumed.R | library(readr)
calories_consumed <- read_csv("D:/ALL Assignments/3.Simple Linear Regression/calories_consumed.csv")
View(calories_consumed)
attach(calories_consumed)
head(calories_consumed)
# EDA
# 1. Scatter diagram: scatterplot
scatter.smooth(x=Cal,
y=Wgt,
main="Wgt ~ Cal")
#-----------------------------------------------------------
plot(Cal,Wgt)
cor(Cal,Wgt)
lm(Wgt~Cal)
calories_model <-lm(Wgt~Cal)
summary(calories_model)
confint(calories_model,level = 0.95)
predict(calories_model,data.frame(Cal=1800),interval="confidence")
predict(calories_model,data.frame(Cal=1800),interval = "prediction")
#predict(calories_model, data.frame(1800))
View(calories_model$residuals)
sqrt(sum(calories_model$residuals^2)/(nrow(calories_consumed)-1)) #RMSE
# --------------------------------------
predicted_val <- predict(calories_model)
residual = predicted_val - Wgt
sqrt(sum(residual^2)/(nrow(calories_consumed) -1 ))
|
72acce99bceea5e967317ed710ae41cc34d5ae40 | 753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed | /service/paws.cloudhsmv2/man/delete_backup.Rd | c2c22f5386dea15814aa3bc31614e9410e79b89a | [
"Apache-2.0"
] | permissive | CR-Mercado/paws | 9b3902370f752fe84d818c1cda9f4344d9e06a48 | cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983 | refs/heads/master | 2020-04-24T06:52:44.839393 | 2019-02-17T18:18:20 | 2019-02-17T18:18:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 651 | rd | delete_backup.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.cloudhsmv2_operations.R
\name{delete_backup}
\alias{delete_backup}
\title{Deletes a specified AWS CloudHSM backup}
\usage{
delete_backup(BackupId)
}
\arguments{
\item{BackupId}{[required] The ID of the backup to be deleted. To find the ID of a backup, use the DescribeBackups operation.}
}
\description{
Deletes a specified AWS CloudHSM backup. A backup can be restored up to 7 days after the DeleteBackup request. For more information on restoring a backup, see RestoreBackup
}
\section{Accepted Parameters}{
\preformatted{delete_backup(
BackupId = "string"
)
}
}
|
61f932113e3160293829ec2e2d55d8e37c4058bf | ead003c677aef91563ed95f10023c60d3f716d06 | /hw-rstudio-project.R | c1d9b8587e3c4e0bc58b824872779dc6e5a97103 | [] | no_license | analise-viz-dados-1sem-2020/hw-rstudio-project-amandasalvador | d101d304313a34520f54bedc14c5b5e45f81cbea | f1aaf3b857dc24613dd2698413e71161c3658bd4 | refs/heads/master | 2022-09-19T10:14:17.831870 | 2020-06-01T21:47:39 | 2020-06-01T21:47:39 | 266,749,267 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 353 | r | hw-rstudio-project.R | library(magrittr); library(ggplot2)
source("R/utils.R")
x <- "Francisco"
df <- get_freq_nome(x)
df %>%
ggplot(aes(x = decada, y = freq)) +
geom_point() +
geom_line(aes(group = 1)) +
labs(title = paste("Nascimentos por década", x, sep = " - ")) +
xlab("Década de nascimento") +
ylab("Pessoas") +
ggsave("figures/nomes-decada.pdf")
|
e94d5ded9d16fb3e236715bd4d419cdc77bcd826 | b2692cad2f83c97518acade33ef2e03b74b6e0df | /R/fastTransformWorldToVoxel.R | d5e7e36540acc9417bad32a71dcc59ba0d278f0a | [] | no_license | neuroimaginador/ni.quantification | 03e55e6f1eaaacebaa008ca3b045a307b78b9cfb | 5a3495c1b685eb573aa3f14d5c52129d3be69003 | refs/heads/master | 2020-04-04T19:14:51.144154 | 2018-10-22T09:01:01 | 2018-10-22T09:01:01 | 156,198,036 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 288 | r | fastTransformWorldToVoxel.R | ## TransformWorldToVoxel 2
fastTransformWorldToVoxel <- function(points, voxelDims) {
new_points <- points
# apply(points, 1, function(x) x/abs(voxelDims)) + 1
for(i in 1:3) {
new_points[, i] <- new_points[, i]/voxelDims[i]
}
return (new_points + 1)
} |
c85173e4bc6e9acf718b8269ee9b27348549378d | e5f80ac83b9f4dd2006276972b76326cb0e24566 | /tests/testthat/testthat_uniformCrossover.R | bc568fa11cc373e0e1844a489d6d315b6f2be5b6 | [] | no_license | xdavidlin94/GA | 7fdb1ae945a6b1ffa2e9607a05bc21aa3f416bd3 | 072f0c0ba1af4b1e482272570f1a9fd2b96c7e0e | refs/heads/master | 2021-05-06T10:23:03.146760 | 2017-12-14T01:40:24 | 2017-12-14T01:40:24 | 114,168,336 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,200 | r | testthat_uniformCrossover.R | library(testthat)
# source("geneticOperators.R")
context("uniformCrossover()")
test_that("Invalid input", {
expect_error(uniformCrossover(), 'argument "nFeatures" is missing, with no default')
})
test_that("Offspring's length stays the same after Crossover",{
parent1 <- sample(0:1, 10, replace = T)
parent2 <- sample(0:1, 10, replace = T)
nFeatures <- length(parent1)
children <- uniformCrossover(parent1, parent2, nFeatures)
expect_equal(class(children),"list")
expect_equal(lengths(children),c(length(parent1),length(parent2)))
})
test_that("Crossover produced different offsprinngs and is random",{
parent1 <- sample(0:1, 20, replace = T)
parent2 <- sample(0:1, 20, replace = T)
nFeatures <- length(parent1)
children1 <- uniformCrossover(parent1, parent2, nFeatures)
children2 <- uniformCrossover(parent1, parent2, nFeatures)
expect_false(identical(children1[[1]],children2[[1]]))
expect_false(identical(children1[[2]],children2[[2]]))
expect_false(identical(parent1[[1]],children1[[1]]))
expect_false(identical(parent1[[2]],children1[[2]]))
expect_false(identical(parent2[[1]],children2[[1]]))
expect_false(identical(parent2[[2]],children2[[2]]))
})
|
c435cb664b4c2724f8c17da9d1503430f43c552c | 78aa2a91d46ef0f030f30bc5adf3bddf100416cd | /man/printWithFootnote.Rd | 67fc43678f8aa06ee4d794ec1af553b561acec20 | [] | no_license | jrthompson54/DGE.Tools2 | c7239ee27c1d6c328d2c860618fbd09147ae5fc5 | f7f9badef7d94a7e637ca3716a073a1ddbf4f5d2 | refs/heads/master | 2021-08-06T04:30:18.547140 | 2021-05-12T13:15:13 | 2021-05-12T13:15:13 | 250,071,780 | 3 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,282 | rd | printWithFootnote.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/printAndSave.R
\name{printWithFootnote}
\alias{printWithFootnote}
\title{Function printWithFootnote}
\usage{
printWithFootnote(
plotObject,
footnote,
fontface = "plain",
fontsize = 10,
hjust = -0.1
)
}
\arguments{
\item{plotObject}{A ggplot2 plotobject}
\item{footnote}{A path/filename for the graphic}
\item{fontface}{fontface for the footnotw (default = "plain")}
\item{fontsize}{size of the footnote font (default = 10)}
\item{hjust}{Specify horizontal justification (Default = -0.1)}
}
\value{
Prints the graphic object to the console
}
\description{
Print a ggplot2 object to the console/knitr report adding footnote text
under the plot. Use when you want the footnot to be underneath the plot
labels. Only prints the footnote once on a facetted plot.
}
\examples{
#Write to the console or knitr report
printWithFootnote(Myggplot, footnote = "Footnote Text")
#Capture to a file
png ("myplot.png", width=5, height=4, unit="in")
printWithFootnote(Myggplot, footnote = "Footnote Text")
invisible(dev.off())
}
\author{
John Thompson, \email{jrt@thompsonclan.org}
}
\keyword{bmp,}
\keyword{ggplot2,}
\keyword{jpeg,}
\keyword{pdf}
\keyword{png,}
\keyword{tiff,}
|
bbd73bcbb3dc7e7797675d1f2163fefd938625f1 | fdb15794952b1dcd74d4fc71b60b1eb1fb107106 | /modules/splitModelPlot/R/groupSplitRaster.R | 5bddcc4dc0a99943263db36e5e6a4f3f74d88e06 | [] | no_license | ianmseddy/borealBirdsAndForestry | 76f73e52fd22e56745f94398ce46a684579bc326 | b50c7ce48f85864d7b693664a11542e99a8058a2 | refs/heads/master | 2020-03-20T02:00:56.151676 | 2018-06-12T16:09:13 | 2018-06-12T16:09:13 | 123,337,634 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,548 | r | groupSplitRaster.R | groupSplitRaster <- function(spec, mod, abund, sim) {
rasterList <- list("distType" = sim$disturbanceType, "distYear" = sim$disturbanceYear, "land" = sim$landCover)
rasterList[["inputAbundances"]] <- abund
newlist <- Cache(Map, rasterList, path = file.path(dataPath(sim), names(rasterList)), f = splitRaster,
MoreArgs = list(nx = P(sim)$nx, ny = P(sim)$ny, buffer = P(sim)$buffer, rType = P(sim)$rType))
lengthvect <- 1:(P(sim)$nx*P(sim)$ny)
outList <- lapply(lengthvect, FUN = tileReorder, inList = newlist, origList = rasterList, sim = sim, passedModel = mod)
#Merge will not work if the list is named. Investigate why one day.
rasList <- lapply(outList, function(s) s[[1]])
mergePlot <- mergeRaster(rasList) #recombine tiles into single raster layer
#Make time series from sum
timeSums <- lapply(outList, function(s) s[[2]]) %>%
list.stack(.) %>%
apply(., MARGIN = 2, FUN = sum)
sumTS <- ts(timeSums, start = start(sim) + 1900, end = end(sim) + 1900, frequency = 1)
timePlot <- ggplot2::autoplot(sumTS)+
ggplot2::theme_bw() +
ggplot2::labs(title = paste(spec, "population"),
y = "Predicted population",
x = "year") +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90, vjust = 0.5),
panel.grid.minor = ggplot2::element_blank())
#Return time series and raster as list
outList <- list("trendRas" = mergePlot, "timePlot" = timePlot, "population" = sumTS)
return(outList)
}
|
fd771c91de1f397a9c5e05cde3180d853e5339ae | ba570d3aa3231f58989a83b9fc57ccd232c98b3a | /R/Define.R | 0262ef3d17a6d7cb16f93d3c1ca1cf4e1557d79c | [
"MIT"
] | permissive | armbrustlab/flowPhyto | 4662446253ed6dd621dd6796aa24ec4d533b44c0 | d527eac8a7aa3e0370e70809dbb885e44901717a | refs/heads/master | 2021-01-19T22:10:58.695221 | 2018-03-27T18:18:23 | 2018-03-27T18:18:23 | 8,489,994 | 2 | 0 | null | 2018-03-27T18:18:24 | 2013-02-28T22:56:24 | R | UTF-8 | R | false | false | 2,576 | r | Define.R |
## DEFINE DEFAULT POPULATION DEFINITION PARAMETERS ##
POP.DEF <- data.frame(
abrev = I(c('beads','synecho','crypto','cocco','diatoms','prochloro','ultra','nano','pico')),
title = I(c('Beads','Synechococcus','Cryptophyte','Coccolithophores','Elongated','Prochlorococcus','Ultraplankton','Nanoplankton','Picoplankton')),
xmin = c(0.5, 0.5, 3.5, 2.0, 2.0, 1.0, 2.0, 3.5, 1.0) * 10^4,
ymin = c(2.0, 0.5, 3.5, 2.0, 3.0, 1.0, 2.0, 3.0, 1.5) * 10^4,
xmax = c(6.5, 3.5, 6.5, 4.5, 6.5, 2.0, 4.5, 6.5, 3.0) * 10^4,
ymax = c(6.5, 3.0, 6.5, 4.5, 6.5, 2.0, 4.5, 6.5, 3.5) * 10^4,
color = I(c('gray40','tan2','tomato3','blueviolet','gold','violetred4','palegreen3','darkcyan','lightseagreen')),
xvar = I(c('chl_small', rep('pe', 2),rep('fsc_small', 6))),
yvar = I(c('pe',rep('chl_small', 2), 'fsc_perp', 'chl_big', rep('chl_small', 2), 'chl_big', 'chl_small')),
u.co = c(0.05, 0.25, 0.25, 0.5, 0.25, 0.50, 0.50, 0.50, 0.50),
lim = c(0.5, 0.5, 0.5, 0.5, 0.5, NA, NA, NA, NA) * 10^4
)
row.names(POP.DEF) <- as.character(POP.DEF$abrev)
readPopDef <- function(pop.def.tab.path){
## check to see if there is an externally defined pop definition table
if(file.info(pop.def.tab.path)$isdir)
pop.def.tab.path <- paste(pop.def.tab.path,'/pop.def.tab',sep='')
if(file.exists(pop.def.tab.path)){
pop.def <- read.delim(pop.def.tab.path, as.is=TRUE)
if(!validatePopDef(pop.def))
stop('This is not a valid pop.def file. please read the documentation conderning proper format')
rownames(pop.def) <- as.character(pop.def$abrev)
return(pop.def)
}else{ #if there is not one, write the hard coded one above
warning('No pop.def.tab file found. Writing hardcoded one into specified directory')
write.table(POP.DEF, pop.def.tab.path, quote=FALSE, sep='\t', row.names=TRUE)
return(POP.DEF)
}
}
validatePopDef <- function(pop.def){
valid <- TRUE
if(!all(names(POP.DEF) %in% names(pop.def))){
warning("not all of the names in the default pop def match those in your custom one")
valid <- FALSE
}
if(!all(c(levels(pop.def$xvar),levels(pop.def$yvar)) %in% CHANNEL.CLMNS)){
warning("not all of the levels of your x & y var colums of pop def match the global CHANNEL.CLMNS")
valid <- FALSE
}
return(valid)
} |
a94f7f873fe0ebf16babfea827dbc309ec0cc620 | f4b3038c65c88be7c460ae7d1ef73fab7e3221fa | /man/check_rhat.Rd | 46e45702b3f93e9a78fd452b0e4ebfe87209f85d | [] | no_license | cran/BayesianFROC | 8d74c554f82adbf3f73667a5b7eebb5174b4dbad | 9abf77e16c31284547960b17d77f53c30ea0d327 | refs/heads/master | 2022-02-04T14:54:01.788256 | 2022-01-23T06:22:43 | 2022-01-23T06:22:43 | 185,827,147 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,522 | rd | check_rhat.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check_rhat.R
\name{check_rhat}
\alias{check_rhat}
\title{Diagnosis of MCMC sampling}
\usage{
check_rhat(StanS4class, summary = FALSE, digits = 3)
}
\arguments{
\item{StanS4class}{An S4 object of class \emph{\code{ \link{stanfitExtended}}} which is an inherited class from the S4 class \code{stanfit}.
This \R object is a fitted model object
as a return value of the function \code{\link{fit_Bayesian_FROC}()}.
To be passed to \code{\link{DrawCurves}()} ... etc}
\item{summary}{Logical: \code{TRUE} of \code{FALSE}. Whether to print the verbose summary. If \code{TRUE} then verbose summary is printed in the \R console. If \code{FALSE}, the output is minimal. I regret, this variable name should be verbose.}
\item{digits}{a positive integer, indicating the digit of R hat printed in R/R-studio console}
}
\value{
Logical, that is \code{TRUE} or \code{FALSE}. If model converges then \code{TRUE}, and if not \code{FALSE}.
}
\description{
This function evaluate \eqn{R} hat statistics for any fitted model object of class \code{stanfit}.
}
\details{
It evaluates whether or not r hat statistics are close to 1.
}
\references{
Gelman A. \& Rubin, D.B. (1992). Inference from Iterative Simulation Using Multiple Sequences, Statistical Science, Volume 7, Number 4, 457-472.
}
\author{
\strong{betanalpha}, so not my function. But I modified it. So, alphanbetan is one of the standeveloper, so his function will has consensus, thus I use it.
}
|
b5bf8a5a16874bfa7bdafb3806a8b2e51afd9b3c | b7247d3a1ba418359e48f787b26700beca0c04be | /man/valorate.p.value.Rd | 9ca5ba1064d476b07621ec49d60a20ea0f3b04ef | [] | no_license | cran/valorate | fd564908042a6bd9a66b912afa8ec7b7740cffd8 | bd40ce2d0f20e5e4e9063b540c88a34206e361cb | refs/heads/master | 2021-01-21T20:53:04.294429 | 2016-10-09T23:23:03 | 2016-10-09T23:23:03 | 69,889,282 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,421 | rd | valorate.p.value.Rd | % File man/valorate.p.value.Rd
\name{valorate.p.value}
\alias{valorate.p.value}
\alias{valorate.p.value.sampling}
\alias{valorate.p.value.normal}
\alias{valorate.p.value.chisq}
\alias{valorate.p.value.gaussian}
\alias{valorate.p.value.weibull}
\alias{valorate.p.value.beta}
\alias{valorate.p.value.all}
\title{ESTIMATES THE P-VALUE OF THE LOG-RANK TEST}
\description{
Estimates the p-value using specific approximations to the log-rank.
}
\usage{
valorate.p.value.sampling(vro, vrsubo, lrv, z)
valorate.p.value.chisq(vro, vrsubo, lrv, z)
valorate.p.value.normal(vro, vrsubo, lrv, z)
valorate.p.value.gaussian(vro, vrsubo, lrv, z)
valorate.p.value.weibull(vro, vrsubo, lrv, z)
valorate.p.value.beta(vro, vrsubo, lrv, z)
valorate.p.value.all(vro, vrsubo, lrv, z)
}
\arguments{
\item{vro}{the valorate object.}
\item{vrsubo}{the subpop list object (see \link{prepare.n1}) or a numeric value representing n1 used to obtain the subpop.}
\item{lrv}{if provided, the log-rank value. It is needed for .sampling, .gaussian, .weibull, .beta, .normal, and .all .}
\item{z}{if provided, the log-rank value in z-score (divided by the approximated standard deviation). It is needed for .normal, .chisq, optionally to .all if normal and chisq are required.}
}
\details{
This family of functions estimates the p-value of the log-rank test using specific approximations.
The intended 'user' function in VALORATE is valorate.p.value.sampling, which is the function that is described in the publications. The rest of the functions are complementary for comparison with the classical approximations (chisq and normal) and for experimental purposes fitting each conditional log-rank distribution sampled (conditioned on k co-occurrences) with the specified distribution (gaussian, weibull, and beta). The function valorate.p.value.all is just a proxy to all calculations in the same function.
}
\value{the estimated p-value (times tails).
}
\references{
Trevino et al. 2016 \url{http://bioinformatica.mty.itesm.mx/valorateR}
}
\author{Victor Trevino \email{vtrevino@itesm.mx}}
\seealso{
\code{\link{new.valorate}}.
\code{\link{valorate.survdiff}}.
\code{\link{valorate.plot.empirical}}.
}
\examples{
## Create a random population of 100 subjects
## having 20 events
subjects <- numeric(100)
subjects[sample(100,20)] <- 1
vo <- new.valorate(rank=subjects, sampling.size=100000, verbose=TRUE)
groups <- numeric(100)
groups[sample(100,4)] <- 1 # only 4 subjects are within the 'mutated' group
pvr <- valorate.survdiff(vo, groups)
print(pvr)
# the same than the value of pvr
valorate.p.value.sampling(vo, vo@subpop[["subpop4"]], attributes(pvr)[[1]]["LR"])
# the same than the value of pvr
valorate.p.value.sampling(vo, 4, attributes(pvr)[[1]]["LR"])
#classical approximations:
valorate.p.value.normal(vo, 4, attributes(pvr)[[1]]["LR"], attributes(pvr)[[1]]["Z"])
valorate.p.value.chisq(vo, 4, attributes(pvr)[[1]]["LR"], attributes(pvr)[[1]]["Z"])
# approximations of the conditional log-rank sampled density
valorate.p.value.gaussian(vo, 4, attributes(pvr)[[1]]["LR"])
valorate.p.value.beta(vo, 4, attributes(pvr)[[1]]["LR"])
valorate.p.value.weibull(vo, 4, attributes(pvr)[[1]]["LR"])
# all above can be get by:
valorate.p.value.all(vo, 4, attributes(pvr)[[1]]["LR"], attributes(pvr)[[1]]["Z"])
# Estimate a p-value a given log-rank
prepare.n1(vo, 50)
valorate.p.value.all(vo, 50, 0, 0) # 0 log-rank, 0 z-score
}
|
8a71c0f650144a14c2314c47ee2ad8ab6754bee2 | 207341f87a5c7663caac36de2b58b6ccf1aff370 | /man/rand_meta.Rd | 9ae9c19a098d5fadf239f552e3e51280c08783d5 | [] | no_license | trevorjwilli/CommSimABCR | fa770ebd4938cf3f665f52d759deef4945750bc9 | 3ec64319b9e9b2b4ccf091f84905dbc3e9cb4469 | refs/heads/master | 2023-03-13T20:23:31.158520 | 2021-03-08T22:20:22 | 2021-03-08T22:20:22 | 269,144,264 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,099 | rd | rand_meta.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Utility_functions.R
\name{rand_meta}
\alias{rand_meta}
\title{Random metacommunity matrix.}
\usage{
rand_meta(N, S, J, min.spec = 2)
}
\arguments{
\item{N}{Numeric, the number of communities.}
\item{S}{Numeric, the number of species.}
\item{J}{Numeric vector, the number of individuals within each community. If
length = 1 gives all communities the same number of individuals.}
\item{min.spec}{Numeric, the minimum number of species within communities.}
}
\value{
Returns a numeric matrix where rows are communities, columns are species,
and cell ij is the count of species j in community i.
}
\description{
Creates a random metacommunity matrix where cell ij is the number of species j in
community i.
}
\details{
This function creates a random metacommunity matrix in which species
absolute abundances \(counts\) within communities are recorded.
}
\examples{
rand_meta(N = 5, S = 5, J = c(100, 100, 200, 40, 300), min.spec = 2)
rand_meta(N = 5, S = 5, J = 500)
\dontrun{
rand_meta(N = 5, S = 5, J = c(10, 10))
}
}
|
081a5f2ceaef5044c9296755c99e5e6cb7318605 | ad2f9444f76517f404238878fe7f788dc8c11197 | /R/plotting.R | 7423caa2cd64f8808c48d4493e467e7c282212a5 | [] | no_license | davidtgonzales/Bayesian-Fitting | 5a90e928d01f6b7577186a509bc6a4991cb0e591 | ea18f3ab444340fdadc17d9c01d33d74cb5f0f2c | refs/heads/master | 2016-08-11T12:50:18.827792 | 2016-01-22T03:37:41 | 2016-01-22T03:37:41 | 50,153,856 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,013 | r | plotting.R | PlotHist<-function(data,group,factor,units,range,side,colors){
#Plots histograms of each level in a group and factor in the same plot.
subset<-data[data$group==group & data$factor==factor,]
#colors<-rev(heat.colors(length(unique(subset$level))))
xrange<-c(range[1],range[2])
yrange<-c(range[3],range[4])
border<-c(1)
color<-c(1)
levels<-sort(unique(levels(subset$level)))
for (i in levels){
for (j in unique(subset$replicate)[1:length(unique(subset$replicate))]){
density<-density(log10(subset[subset$level==i & subset$replicate==j,]$value),log='x',na.rm=TRUE)
if (border==1){
plot(density,main='',xlab='log10(RFU)',ylab='Density',lwd=2,ylim=c(yrange[1],yrange[2]),xlim=c(xrange[1],xrange[2]),col=colors[color],cex.lab=1.7,cex.axis=1.5,cex=1.3)
grid(NULL,NULL,lwd=1)
border<-c(2)
par(new=TRUE)
}
if (border!=1){
plot(density,xaxt='n',yaxt='n',bty='n',xlab='',ylab='',main='',lwd=2,ylim=c(yrange[1],yrange[2]),xlim=c(xrange[1],xrange[2]),col=colors[color])
par(new=TRUE)
}
}
par(new=TRUE)
color<-c(color+1)
}
if (side=='right'){
legend('topright',yrange[2],paste(levels,factor,sep=paste(units,' ',sep='')),col=colors,box.lty=0,bty='n',lty=1,lwd=3)
}
else if (side=='left'){
legend('topleft',yrange[2],paste(levels,factor,sep=paste(units,' ',sep='')),col=colors,box.lty=0,bty='n',lty=1,lwd=3)
}
}
library(scales)
PlotCurve<-function(data,times,color,limits,borders,xlabel,ylabel,mainlabel){
lightcolor<-alpha(color,1)
if (borders=='no'){
par(new=TRUE)
plot(rep(times,3),t(data),pch=1,xlim=c(limits[1],limits[2]),ylim=c(limits[3],limits[4]),col=lightcolor,xaxt='n',yaxt='n',bty='n',xlab='',ylab='',main='',cex=0.8)
par(new=FALSE)
}
else if (borders=='yes'){
plot(rep(times,3),t(data),pch=1,xlim=c(limits[1],limits[2]),ylim=c(limits[3],limits[4]),col=lightcolor,xlab=xlabel,ylab=ylabel,main='',cex.lab=1.7,cex.axis=1.5,cex=0.8)
}
grid(NULL, NULL, lwd = 1)
}
|
91a30e80d0479ec4c4468ab60099e459c7415f15 | d8ad6a7bf9c6a19dbbe22f594f217e771400959f | /PredictionModel/generatingSocialModelPredictions.R | 9bdf55720c519ce5513e2cf66b1056d477b72012 | [] | no_license | jernlab/social-prediction | 4aa21cf1b029a4a9146a3671785aac4a600264ee | 46d6e8d70aea3ac0b66d862893d12500818255e6 | refs/heads/master | 2022-08-06T07:27:28.241082 | 2022-07-28T03:58:35 | 2022-07-28T03:58:35 | 250,059,121 | 1 | 0 | null | 2022-05-09T19:32:59 | 2020-03-25T18:29:45 | R | UTF-8 | R | false | false | 5,501 | r | generatingSocialModelPredictions.R | #Non-Social Model Predictions
library(purrr)
library(ggplot2)
computeModelPosterior_social<-function(t_total, t, t_total_info, b0, b1){
#t_total : The value of t_total in P(t_total | t)
#t : The value of t in P(t_total | t)
#gamma : gamma parameter (Not used in generating these predictions, leftover from implementation of original model)
#t_total_info : a Domain Size x 2 vector, 1st column are t_total values,
# 2nd is P(t_total)
#flag : The story (cake, bus, drive, train) we're calculating posterior for
# 1 2 3 4
startIndex = 0
#Vector of all t_total values
t_total_vals_vec = t_total_info[1]
#Vector of all t_total_probs
t_total_probs_vec = t_total_info[2]
num_rows = nrow(t_total_vals_vec)
startIndex = which(t_total_vals_vec >= t)[1]
utility <- 1/(t_total - t)
if(t_total == t){
utility <- 1000
}
likelihood = 1/(1 + exp(-(b0 + b1*utility)))
t_prior = 0
given_t_total_prior = 0
t_total_prior = 0
t_total_idx <- which(t_total_vals_vec == t_total)[1]
if(!is.na(t_total_idx)){
t_total_prior = t_total_probs_vec[[1]][t_total_idx]
}
t_totals <- 0
p_totals <- 0
if(startIndex == 1){
t_totals <- t_total_vals_vec[[1]]
p_t_totals <- t_total_probs_vec[[1]]}
else{
t_totals <- t_total_vals_vec[[1]][-c(1:startIndex-1)]
p_t_totals <- t_total_probs_vec[[1]][-c(1:startIndex-1)]
}
likelihoodTerms <- getRegressFunc(t, b0,b1)(t_totals)
p_t_and_o <- sum((p_t_totals/t_totals)*likelihoodTerms)
#Bayes Rules
return (likelihood * t_total_prior / p_t_and_o)
}
# getSocialPosteriorFunc <- function(t_total_info, b0, b1){
# return (function (t_))
# }
getRegressFunc <- function(t, b0, b1){
return (function(t_total) {
return (ifelse(t == t_total, 1000, (1/(1 + exp(-(b0 + b1*(1/(t_total-t))))))))
# return (1/(1 + exp(-(b0 + b1*(1/(t_total-t))))))
})
}
##Generate a single Social prediction
generateSocialPrediction <- function(t,b0,b1){
dataP <- probs
maxTtotal <- max(dataP[[1]])
x_space <- c(t:maxTtotal)
idx <- 1
allTtotalProbsGivenT <- data.frame(Ttotal = x_space, pTtotalGivenT = rep(0,length(x_space)))
ttotalPosts <- sapply(x_space, function (x) {
probTtotalGivenT = computeModelPosterior_social(x, t, dataP, b0, b1)
})
#
allTtotalProbsGivenT$pTtotalGivenT <- ttotalPosts
#Predict Median
sum = 0
pTtotalGivenT <- allTtotalProbsGivenT$pTtotalGivenT
medi <- sum(pTtotalGivenT)/2
idx = 1
lenpTtotal <- length(pTtotalGivenT)
while (sum < medi && idx < lenpTtotal) {
sum = sum + pTtotalGivenT[idx]
idx = idx + 1
}
pred <- allTtotalProbsGivenT$Ttotal[idx-1]
return(pred)
}
## Generate multiple social predictions for t's in a certain range
generateMultipleSocialPredictions <- function(t, tMax, tMin=NULL, b0,b1){
tVals <- 0
if(!is.null(tMin)){
tVals <- c(tMin:tMax)
}
else{
tVals <- c(t:tMax)
}
df <- data.frame(t=tVals, pred=rep(0, length(tVals)))
ix = 1
for(tVal in tVals){
print("Generating for ")
print(tVal)
pred = generateSocialPrediction(t=tVal, b0, b1)
df$pred[ix] <- pred
ix = ix + 1
}
return(df)
}
## Create a social model with certain parameters. Returned function can be applied to
## vectors & lists.
createSocialModel <- function(b0,b1){
return(function(t){
generateSocialPrediction(t, b0, b1)
})
}
# ## UNCOMMENT THIS BLOCK TO GENERATE PREDICTIONS INSIDE THIS FILE
# ## ========================================================================================
# ## Change this number to match the story you'd like to generate predictions for
# ## 1 = Cake
# ## 2 = Movie
# ## 3 = Podcast
# ## Predictions can take as long as 15 minutes to generate depending on your largest value of t you're predicting for.
#
# storyNum <- 1
#
# filename <- switch (storyNum, "../Data/cakeProbs.csv", "../Data/movieProbs.csv","../Data/podcastProbs.csv")
#
# probs <- read.csv(filename)
#
# startTimestamp <- timestamp()
# df <- generateMultipleSocialPredictions(t=10,tMax=110, tMin=1)
# generateSocialPrediction(45, 5, 2)
# endTimestamp <- timestamp()
#
# #
# plt <- ggplot(df) + geom_line(mapping = aes(x=t, y=pred), color="blue", size=1.3) + ylab("Prediction") + ggtitle("Non-Social Podcast Duration Predictions") + theme(axis.title.x = element_text(size=20, face="bold"),
# axis.title.y = element_text(size=20, face="bold"),
# axis.text.x = element_text(size=16),
# axis.text.y = element_text(size=16),
# plot.title = element_text(size=25, face="bold")
# )+coord_cartesian(xlim=c(0,120))
# plt
# ## ========================================================================================
# ## UNCOMMENT THIS BLOCK TO GENERATE PREDICTIONS INSIDE THIS FILE |
910b5dca1aca02cc78a8ae88ef2b4a0d67f8b715 | 884b0dd52cf742cf409025a6cf33bf4ab6dd97fa | /run_Analysis.R | 6f272f37b29f46d0ab1c1147ff322cf3be33cb41 | [] | no_license | JOMR92/gettingandcleaning | d6d228ab4e00f8d08506a1f017e8df3a0ced5f92 | 816d6c7099c6a883c7504e593aaf68ab93148d67 | refs/heads/master | 2020-03-14T09:45:59.585494 | 2018-04-30T04:03:13 | 2018-04-30T04:03:13 | 131,551,782 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,736 | r | run_Analysis.R | #Setting Working Directory
run_analysis.R <- traindata <- setwd("/Users/jomr/Desktop/Coursera/Cleaningdata/Proyecto final/UCI HAR Dataset")
#opening file that contains data labels
datalabels <- read.table("activity_labels.txt")
#opening and merging all the test data
testlabelsscript <- read.table("/Users/jomr/Desktop/Coursera/Cleaningdata/Proyecto final/UCI HAR Dataset/test/y_test.txt")
testsetscript <- read.table("/Users/jomr/Desktop/Coursera/Cleaningdata/Proyecto final/UCI HAR Dataset/test/X_test.txt")
testsubjectsscript <- read.table("/Users/jomr/Desktop/Coursera/Cleaningdata/Proyecto final/UCI HAR Dataset/test/subject_test.txt")
testdfscript <- cbind(testsubjectsscript, testlabelsscript, testsetscript)
#naming columns
colnames (testdfscript) <- c ("subjectnumber", "activitylabel", paste("Measurement", c (1:561), sep=""))
#converting to tbl_df
testdfscript <- tbl_df(testdfscript)
#opening and merging all the training data
trainlabelsscript <- read.table("/Users/jomr/Desktop/Coursera/Cleaningdata/Proyecto final/UCI HAR Dataset/train/y_train.txt")
trainsetscript <- read.table("/Users/jomr/Desktop/Coursera/Cleaningdata/Proyecto final/UCI HAR Dataset/train/X_train.txt")
trainsubjectsscript <- read.table ("/Users/jomr/Desktop/Coursera/Cleaningdata/Proyecto final/UCI HAR Dataset/train/subject_train.txt")
traindfscript <- cbind(trainsubjectsscript, trainlabelsscript, trainsetscript)
#naming columns
colnames (traindfscript) <- c ("subjectnumber", "activitylabel", paste("Measurement", c (1:561), sep=""))
#converting to tbl_df
traindfscript <- tbl_df(traindfscript)
#adding an origin database identifier column
testdfscript <- mutate(testdfscript, origindatabase ="test")
traindfscript <- mutate(traindfscript, origindatabase ="training")
#merging train_df and test_df
complete <- bind_rows(testdfscript, traindfscript)
#creating database with mean and sd as for each observation
mean <- mutate(complete, MeasurementMean = rowMeans(select(complete, starts_with("Measurement")), na.rm = TRUE))
sd<- mean %>%
rowwise() %>%
mutate(stdev = sd(c(Measurement1:Measurement561, na.rm=TRUE)))
#selecting only subject, activity, mean and sd
database <- select(sd, origindatabase, subjectnumber, activitylabel, MeasurementMean, stdev)
#labeling the activities
database$activitylabel <- factor(database$activitylabel, labels=c("Walking", "Walking Upstairs", "Walking Downstairs", "Sitting", "Standing", "Laying"))
#producing final db
database <- group_by(database, activitylabel, subjectnumber)
tidydb <- dplyr::summarise(database, meanaverage=mean(MeasurementMean), meansd= mean(stdev))
#exporting final db
write.table(tidydb, file="course_submission.csv")
write.table(tidydb, file="course_submission.txt", sep="\t") |
c280eabd015fd66e77126f94bb31f1f9bffe5a16 | 001d55b6b688cb31a27f89346421425c18270bbe | /Script_1.R | fee260483589a80f43d57c442748b812e0e0237d | [] | no_license | sischmid/GIT_course | 4de5af2f4de5325a6beb5641450a89d5403ba60e | e6f2124d568f855f4bfd202870122541dbeb2a61 | refs/heads/master | 2020-11-25T18:58:16.757295 | 2019-12-18T10:12:42 | 2019-12-18T10:12:42 | 228,802,541 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 108 | r | Script_1.R | x <- seq(0,10)
y <- rnorm(11)
plot(y~x)
# after plotting calculate a linear model
m_1<-lm(y~x)
summary(m_1)
|
97e30a4a0bf0b1c6641b63722ea9f902e5697a0c | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ecd/examples/ecd.sd.Rd.R | 19b66ee1db9bfb948093ef83138cd193bd0a64f2 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 305 | r | ecd.sd.Rd.R | library(ecd)
### Name: ecd.sd
### Title: Standard deviation, variance, mean, skewness, and kurtosis of
### ecd
### Aliases: ecd.sd ecd.var ecd.mean ecd.skewness ecd.kurt ecd.kurtosis
### Keywords: stats
### ** Examples
d <- ecd(-1,1)
ecd.sd(d)
ecd.var(d)
ecd.mean(d)
ecd.skewness(d)
ecd.kurt(d)
|
74372c5b0d87e22bc2e790daf59a5be4e866b12f | bc19f618954a43fafd369c8dcee81bc0760683e5 | /src/simulate.R | 1eb439c34641a8eeb3ff793ff0802e62a2e1a2bb | [] | no_license | thisismactan/Canada-2021 | 08fbf6b856c1fa494c44663921c615afa071f899 | b12e8e83a76b7ac4d56f298f6bec53f1b31e8538 | refs/heads/master | 2023-08-16T23:21:48.700968 | 2021-09-20T13:16:17 | 2021-09-20T13:16:17 | 396,624,728 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 17,273 | r | simulate.R | source("src/poll_averages.R")
source("src/shape_2021_data.R")
regions <- c("Atlantic", "Quebec", "Ontario", "Prairie", "Alberta", "British Columbia", "The frigid northlands")
# Quickly fit models
district_results <- read_csv("data/district_results.csv")
votes_per_district <- district_results %>%
filter(year == 2019) %>%
group_by(district_code) %>%
summarise(votes = sum(votes))
regional_natl_results <- read_csv("data/regional_natl_results.csv")
district_model <- lm(pct ~ I(province_abbr == "QC") : (pct_lag + region_change + I(incumbent_running == party)) +
(pct_lag + region_change + I(incumbent_running == party)),
data = district_results %>%
filter(party %in% c("Bloc", "Conservative", "Green", "Liberal", "NDP"),
!incumbent_running %in% c("Independent", "People's")))
region_model <- lm(region_pct ~ region_lag + natl_change, data = regional_natl_results %>% filter(!party %in% c("Bloc", "People's")))
region_model_bloc <- lm(region_pct ~ 0 + natl_pct, data = regional_natl_results %>% filter(party == "Bloc"))
# National popular vote simulations
set.seed(2021)
n_sims <- 1e4
natl_vote_sims <- rmvn(n_sims, mu = natl_polling_average$avg, sigma = natl_poll_cov) %>%
as.data.frame() %>%
dplyr::select(Liberal = V1, Conservative = V2, NDP = V3, Green = V4, Bloc = V5, `People's` = V6) %>%
mutate(id = 1:n_sims) %>%
melt(id.vars = "id", variable.name = "party", value.name = "natl_pct") %>%
as_tibble()
# Initial forecast at the region level
region_preds <- regional_natl_results %>%
filter(year == 2019) %>%
dplyr::select(region, party, region_lag = region_pct, natl_lag = natl_pct) %>%
right_join(natl_vote_sims, by = "party") %>%
mutate(natl_change = natl_pct - natl_lag) %>%
mutate(pred = case_when(party != "Bloc" ~ predict(region_model, newdata = .),
party == "Bloc" ~ predict(region_model, newdata = .)))
## Add in simulated error (different by party)
regional_model_mse <- read_csv("data/regional_model_mse.csv")
region_error_sims <- expand.grid(party = c("Liberal", "Conservative", "NDP", "Green", "People's"),
region = regions,
id = 1:n_sims) %>%
arrange(party, region, id) %>%
mutate(error = 0) %>%
filter(!(party == "Bloc" & region != "Quebec")) %>%
as_tibble()
for(p in unique(region_error_sims$party)) {
region_mse <- regional_model_mse %>% filter(party == p) %>% pull(mse)
for(r in unique(region_error_sims$region)) {
region_error_sims[region_error_sims$party == p & region_error_sims$region == r, "error"] <- rnorm(n_sims, 0, sqrt(region_mse))
}
}
region_error_sims <- region_error_sims %>%
bind_rows(tibble(region = "Quebec", party = "Bloc", id = 1:n_sims, error = rnorm(n_sims, 0, summary(region_model_bloc)$sigma)))
# Regional polling simulations
regional_polling_sim_list <- vector("list", 6)
for(i in 1:length(polling_regions)) {
region_polling_stats <- regional_polling_average %>%
filter(region == polling_regions[i])
regional_polling_sim_list[[i]] <- rmvn(n_sims, mu = region_polling_stats$avg, sigma = within_region_covariance[[i]]) %>%
as.data.frame() %>%
as_tibble() %>%
mutate(id = 1:n_sims,
region = polling_regions[i])
names(regional_polling_sim_list[[i]]) <- c(as.character(region_polling_stats$party), "id", "region")
}
regional_polling_sims <- bind_rows(regional_polling_sim_list) %>%
melt(id.vars = c("id", "region"), variable.name = "party", value.name = "poll_sim") %>%
as_tibble() %>%
group_by(region, party) %>%
mutate(poll_var = var(poll_sim)) %>%
ungroup()
# Compute weighted average for region
region_sims <- region_preds %>%
left_join(region_error_sims, by = c("party", "region", "id")) %>%
mutate(pred_sim = pred + error) %>%
group_by(region, party) %>%
mutate(pred_var = var(pred_sim)) %>%
left_join(regional_polling_sims, by = c("id", "region", "party")) %>%
mutate(poll_weight = 1 / poll_var,
pred_weight = 1 / pred_var) %>%
mutate(poll_weight = ifelse(is.na(poll_weight), 0, poll_weight),
pred_weight = ifelse(is.na(pred_weight), 0, pred_weight),
poll_sim = ifelse(poll_weight == 0, 0, poll_sim),
pred_sim = ifelse(pred_weight == 0, 0, pred_sim),
region_pct = (poll_weight * poll_sim + pred_weight * pred_sim) / (poll_weight + pred_weight)) %>%
dplyr::select(id, party, region, region_pct) %>%
ungroup() %>%
arrange(party, region, id)
# Simulating at riding level
error_params <- read_csv("data/district_model_mse.csv")
## Initial predictions
district_sims <- data_2021 %>%
left_join(region_sims, by = c("region", "party")) %>%
mutate(region_change = region_pct - region_lag) %>%
mutate(pred_pct = predict(district_model, newdata = .))
## Add on errors
district_error_covariances <- read_rds("data/district_error_covariances.rds")
district_errors <- district_sims %>%
filter(party != "Independent") %>%
dplyr::select(id, region, district_code, party) %>%
arrange(district_code, party, id) %>%
mutate(error = 0) %>%
spread(party, error) %>%
dplyr::select(id, region, district_code, Liberal, Conservative, NDP, Green, Bloc, `People's`)
error_sim_list <- vector("list", length(regions))
for(i in 1:length(regions)) {
n_district_sims <- district_errors %>%
filter(region == regions[i]) %>%
nrow()
if(regions[i] != "Quebec") {
error_sim_list[[i]] <- rmvn(n_district_sims, mu = rep(0, 5), sigma = district_error_covariances[[i]]) %>%
as.data.frame() %>%
dplyr::select(Liberal = V1, Conservative = V2, NDP = V3, Green = V4, `People's` = V5) %>%
mutate(id = district_errors %>% filter(region == regions[i]) %>% pull(id),
district_code = district_errors %>% filter(region == regions[i]) %>% pull(district_code)) %>%
as_tibble()
} else {
error_sim_list[[i]] <- rmvn(n_district_sims, mu = rep(0, 6), sigma = district_error_covariances[[i]]) %>%
as.data.frame() %>%
dplyr::select(Liberal = V1, Conservative = V2, NDP = V3, Green = V4, Bloc = V5, `People's` = V6) %>%
mutate(id = district_errors %>% filter(region == regions[i]) %>% pull(id),
district_code = district_errors %>% filter(region == regions[i]) %>% pull(district_code)) %>%
as_tibble()
}
}
district_errors <- bind_rows(error_sim_list) %>%
melt(id.vars = c("id", "district_code"), variable.name = "party", value.name = "pred_error") %>%
filter(!is.na(pred_error)) %>%
as_tibble()
## Simulate from riding-level polling
district_poll_averages_simp <- district_poll_averages %>%
dplyr::select(district_code, district, party, district_avg, district_var) %>%
filter(party %in% c("Liberal", "Conservative", "NDP", "Green", "People's", "Bloc"))
district_polling_sims <- do.call("rbind", replicate(n_sims, district_poll_averages_simp, simplify = FALSE)) %>%
arrange(district_code, party) %>%
mutate(id = rep(1:n_sims, n() / n_sims),
poll_sim = rnorm(n(), district_avg, sqrt(district_var))) %>%
dplyr::select(id, district_code, district, party, poll_sim)
district_undecided_sims <- district_polling_sims %>%
group_by(id, district_code, district) %>%
summarise(undecided = 1 - sum(poll_sim)) %>%
arrange(district_code, district, id)
undecided_dirichlet_params <- district_poll_averages_simp %>%
arrange(district_code, party) %>%
group_by(district_code) %>%
mutate(alpha = 10 * district_avg / sum(district_avg)) %>%
dplyr::select(-district_avg, -district_var) %>%
mutate(alpha = pmax(0, alpha)) %>%
spread(party, alpha, fill = 0) %>%
ungroup()
district_codes_with_polling <- unique(district_undecided_sims$district_code)
undecided_allocation_list <- vector("list", length(district_codes_with_polling))
for(i in 1:length(district_codes_with_polling)) {
district_dirichlet_params <- undecided_dirichlet_params %>%
filter(district_code == district_codes_with_polling[i]) %>%
dplyr::select(-district_code, -district) %>%
as.matrix() %>%
as.vector()
# Simulate undecided fractions
undecided_allocation_list[[i]] <- rdirichlet(n_sims, district_dirichlet_params) %>%
as.data.frame() %>%
mutate(district_code = district_codes_with_polling[i],
id = 1:n_sims) %>%
as_tibble()
}
district_undecided_frac <- bind_rows(undecided_allocation_list) %>%
dplyr::select(id, district_code, Liberal = V1, Conservative = V2, NDP = V3, Green = V4, `People's` = V5, Bloc = V6) %>%
melt(id.vars = c("id", "district_code"), variable.name = "party", value.name = "undecided_frac") %>%
as_tibble()
district_undecided_allocation <- district_undecided_frac %>%
left_join(district_undecided_sims, by = c("id", "district_code")) %>%
mutate(undecided_pct = undecided * undecided_frac) %>%
dplyr::select(id, district_code, party, undecided_pct)
## Add undecided onto sims
district_polling_sims <- district_polling_sims %>%
left_join(district_undecided_allocation, by = c("id", "district_code", "party")) %>%
mutate(poll_sim = poll_sim + undecided_pct) %>%
group_by(district_code) %>%
mutate(poll_weight = 1 / var(poll_sim)) %>%
ungroup()
## Weighted average
district_sims <- district_sims %>%
left_join(district_errors, by = c("district_code", "id", "party")) %>%
mutate(pred_sim = pred_pct + pred_error) %>%
group_by(district_code, party) %>%
mutate(pred_weight = 1 / var(pred_sim)) %>%
ungroup() %>%
left_join(district_polling_sims, by = c("id", "district_code", "district", "party")) %>%
mutate(poll_weight = ifelse(is.na(poll_weight), 0, poll_weight),
poll_sim = ifelse(is.na(poll_sim), 0, poll_sim),
pct = (pred_weight * pred_sim + poll_weight * poll_sim) / (pred_weight + poll_weight)) %>%
mutate(pct = pmax(pct, 0)) %>%
dplyr::select(id, region, province, district_code, district, party, candidate, incumbent_running, pct) %>%
filter(!is.na(id))
district_sims %>%
filter(id <= 1000) %>%
dplyr::select(id, district_code, district, party, pct) %>%
mutate(pct = round(pct, 4)) %>%
write_csv("shiny-app/data/district_sims_1-1000.csv")
district_winners <- district_sims %>%
group_by(id, district_code) %>%
arrange(desc(pct)) %>%
dplyr::slice(1) %>%
ungroup() %>%
dplyr::select(id, district_code, winner = party)
# Implied national and provincial popular vote
## National
natl_vote_implied <- district_sims %>%
left_join(votes_per_district, by = "district_code") %>%
mutate(pred_sim_votes = pct * votes) %>%
group_by(id, party) %>%
summarise(total_votes = sum(votes),
party_votes = sum(pred_sim_votes)) %>%
group_by(id) %>%
# Recalculate denominator
mutate(total_votes = sum(party_votes) / 0.995,
pct = party_votes / total_votes)
## Provincial
provincial_vote_implied <- district_sims %>%
left_join(votes_per_district, by = "district_code") %>%
mutate(pred_sim_votes = pct * votes) %>%
group_by(id, province, party) %>%
summarise(total_votes = sum(votes),
party_votes = sum(pred_sim_votes)) %>%
group_by(id, province) %>%
# Recalculate denominator
mutate(total_votes = sum(party_votes) / 0.995,
pct = party_votes / total_votes)
# Summary stats
district_summary_stats <- district_sims %>%
filter(party != "Independent") %>%
group_by(district_code, party) %>%
summarise(pct_05 = quantile(pct, 0.05),
mean = mean(pct),
pct_95 = quantile(pct, 0.95))
district_probs <- district_sims %>%
group_by(district_code, id) %>%
filter(pct == max(pct)) %>%
group_by(province, district, district_code, party) %>%
summarise(prob = n() / n_sims) %>%
spread(party, prob) %>%
arrange(district_code) %>%
dplyr::select(province, district_code, district, Liberal, Conservative, NDP, Green, Bloc, `People's`)
district_probs %>% print(n = Inf)
seat_sims <- district_sims %>%
group_by(id, district_code) %>%
filter(pct == max(pct)) %>%
group_by(id, party) %>%
summarise(seats = n()) %>%
spread(party, seats, fill = 0) %>%
melt(id.vars = "id", variable.name = "party", value.name = "seats") %>%
as_tibble() %>%
mutate(party = as.character(party))
sim_results <- seat_sims %>%
group_by(id) %>%
filter(seats == max(seats)) %>%
mutate(tied_parties = n()) %>%
ungroup() %>%
mutate(winner = case_when(tied_parties == 1 ~ party,
tied_parties == 2 ~ "Tie"),
win_type = case_when(seats > 338/2 & winner != "Tie" ~ "Majority",
seats <= 338/2 & winner != "Tie" ~ "Minority",
winner == "Tie" ~ ""),
result = paste(winner, tolower(win_type)) %>% trimws()) %>%
distinct(id, result)
result_probs <- sim_results %>%
group_by(result) %>%
summarise(prob = n() / n_sims) %>%
mutate(date = today()) %>%
dplyr::select(date, result, prob)
## At provincial level
province_key <- tibble(province_code = c(10, 11, 12, 13, 24, 35, 46, 47, 48, 59, 60, 61, 62),
province = c("Newfoundland and Labrador", "Prince Edward Island", "Nova Scotia", "New Brunswick", "Quebec",
"Ontario", "Manitoba", "Saskatchewan", "Alberta", "British Columbia", "Yukon", "Northwest Territories",
"Nunavut"),
province_abbr = c("NL", "PE", "NS", "NB", "QC", "ON", "MB", "SK", "AB", "BC", "YT", "NT", "NU"),
region = c("Atlantic", "Atlantic", "Atlantic", "Atlantic", "Quebec", "Ontario", "Prairie", "Prairie", "Alberta",
"British Columbia", "The frigid northlands", "The frigid northlands", "The frigid northlands"))
province_sims <- provincial_vote_implied %>%
left_join(district_winners %>%
mutate(province_code = floor(district_code / 1000)) %>%
left_join(province_key %>% dplyr::select(province_code, province), by = "province_code") %>%
group_by(id, province, party = winner) %>%
summarise(seats = n()),
by = c("id", "province", "party")) %>%
mutate(seats = ifelse(is.na(seats), 0, seats))
province_sims %>%
bind_rows(natl_vote_implied %>% mutate(province = "National") %>% left_join(seat_sims, by = c("id", "party"))) %>%
write_csv("shiny-app/data/province_sims.csv")
province_summary_stats <- province_sims %>%
group_by(province, party) %>%
summarise(vote_pct_05 = quantile(pct, 0.05),
vote_pct_50 = median(pct),
vote_pct_95 = quantile(pct, 0.95),
seats_pct_05 = round(quantile(seats, 0.05)),
seats_pct_50 = round(median(seats)),
seats_pct_95 = round(quantile(seats, 0.95)))
province_summary_stats %>%
print(n = Inf)
## Summary stats format for the timeline
summary_stats_by_geo <- bind_rows(
# National seats
seat_sims %>%
filter(party %in% party_order) %>%
group_by(party) %>%
summarise(pct_05 = quantile(seats, 0.05),
pct_50 = quantile(seats, 0.5),
pct_95 = quantile(seats, 0.95)) %>%
mutate(date = today(),
geography = "National",
outcome = "Seats") %>%
dplyr::select(geography, date, party, outcome, pct_05, pct_50, pct_95),
# National vote
natl_vote_implied %>%
filter(party %in% party_order) %>%
group_by(party) %>%
summarise(pct_05 = quantile(pct, 0.05),
pct_50 = quantile(pct, 0.5),
pct_95 = quantile(pct, 0.95)) %>%
mutate(date = today(),
geography = "National",
outcome = "Vote share") %>%
dplyr::select(geography, date, party, outcome, pct_05, pct_50, pct_95),
# Province summary stats
province_sims %>%
dplyr::select(-total_votes, -party_votes) %>%
rename(`Vote share` = pct, Seats = seats) %>%
melt(measure.vars = c("Vote share", "Seats"), variable.name = "outcome", value.name = "value") %>%
group_by(party, geography = province, outcome) %>%
summarise(pct_05 = quantile(value, 0.05),
pct_50 = quantile(value, 0.5),
pct_95 = quantile(value, 0.95)) %>%
mutate(date = today())
) %>%
arrange(geography, as.character(outcome), date, party)
## Create (or add to existing) summary stats timeline
if(!("summary_stats_timeline.csv" %in% list.files("shiny-app/data"))) {
write_csv(summary_stats_by_geo, "shiny-app/data/summary_stats_timeline.csv")
}
summary_stats_timeline <- read_csv("shiny-app/data/summary_stats_timeline.csv") %>%
bind_rows(summary_stats_by_geo) %>%
distinct(geography, date, party, outcome, .keep_all = TRUE)
write_csv(summary_stats_timeline, "shiny-app/data/summary_stats_timeline.csv")
if(!("overall_result_timeline.csv" %in% list.files("shiny-app/data"))) {
write_csv(result_probs, "shiny-app/data/overall_result_timeline.csv")
}
overall_result_timeline <- read_csv("shiny-app/data/overall_result_timeline.csv") %>%
bind_rows(result_probs) %>%
distinct(date, result, .keep_all = TRUE) %>%
spread(result, prob, fill = 0) %>%
melt(id.vars = "date", variable.name = "result", value.name = "prob") %>%
as_tibble()
write_csv(overall_result_timeline, "shiny-app/data/overall_result_timeline.csv")
# Cleanup
rm(district_errors)
rm(region_error_sims)
gc()
|
b72d742aabf39c275fbd46b88b0dc6ce07860e92 | 2da570da5859c8a830e76d794fa17d042cd41ebc | /10 Correlation and Reg in R/04 Interp_Reg_Models/03 Fitt_Val_Res.R | 57f704e26814a1d058b0f86121523c169deab759 | [] | no_license | ArmandoReyesRepo/RCode | 85d5c8f36107936bfcbbdbf16dc9bb2ed1a0feee | 41c96dd0d4bc7762fad3cbeb46c3df4ee1444575 | refs/heads/main | 2023-05-12T16:29:25.970647 | 2021-06-04T09:01:34 | 2021-06-04T09:01:34 | 373,320,037 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 238 | r | 03 Fitt_Val_Res.R | library(openintro) ## data bdims
library(broom) ## in order to get data from fitted model
# Mean of weights equal to mean of fitted values?
mean(bdims$wgt) == mean(fitted.values(mod))
# Mean of the residuals
mean(residuals(mod))
|
afb17e22ffbedb1eafacd577a12e2bb46b0fa632 | faf1f580595ad6912c1184858792870d88b965ff | /scripts_old/scripts/espectro_v_movil02.R | 5036a26ae88f0b02885f1d58728387e96e6db23d | [] | no_license | EncisoAlvaJC/TESIS | 732cb07f5488a388ad4b6f2417717a6262817c0d | c2bad0f255e7d5795d2ac63c80e65e3d752ea5f8 | refs/heads/master | 2021-01-09T05:34:36.738141 | 2018-08-08T20:46:52 | 2018-08-08T20:46:52 | 80,755,604 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,695 | r | espectro_v_movil02.R | library(signal)
data_d = 'C:/Users/EQUIPO 1/Desktop/julio/DATOS'
#center_d = 'C:/Users/EQUIPO 1/Desktop/julio/scripts'
nombre = 'CLMN10SUE'
#nombre = 'MJNNVIGILOS'
nom_dir = '/CLMN10SUE'
#nom_dir = '/MJNNVIGILOScCanal'
#etiqueta = 'CLMN'
dir_datos = paste0(data_d,nom_dir)
dir_res = 'C:/Users/EQUIPO 1/Desktop/julio/espectro_cuadrado2'
extension = '.txt'
reemplazar = T
fr_muestreo = 512
#dur_epoca = 30
canales = 'PSG'
#canales = c('T4','T5','T6','LOG','ROG','EMG')
ver_avance = F
no_repetir = F
haz_carpeta = T
usar_loess = F
filtrar = F
#################################################
# parametros opcionales
if(reemplazar){
if(canales=='10-20'){
canales = c('C3','C4','CZ','F3','F4','F7','F8','FP1','FP2','FZ','O1','O2',
'P3','P4','PZ','T3','T4','T5','T6')
}
if(canales=='PSG'){
canales = c('C3','C4','CZ','F3','F4','F7','F8','FP1','FP2','FZ','O1','O2',
'P3','P4','PZ','T3','T4','T5','T6','LOG','ROG','EMG')
}
}
if(length(canales)<1){
stop('ERROR: Lista de canales tiene longitud cero')
}
#if(missing(etiqueta)){
# etiqueta = nombre
#}
#################################################
# parametros dependientes de los datos
#ventana = fr_muestreo*dur_epoca
n_canales = length(canales)
#usar_stl = T
#if(dur_epoca<=2){
# usar_stl = F
#}
#if(usar_loess){
# usar_stl = F
#}
#################################################
# inicio del ciclo que recorre los canales
for(ch in 1:n_canales){
# construye el nombre del archivo
ch_actual = canales[ch]
nom_archivo = paste0(nombre,'_',ch_actual,extension)
if(no_repetir){
setwd(dir_res)
if(file.exists(paste0('EST_',nombre,'_',ch_actual,'_T.csv' ))){
warning('El canal ',ch_actual,
' se ha omitido, pues se encontraron resultados previos')
next()
}
}
# cargar los datos
setwd(dir_datos)
if(!file.exists(nom_archivo)){
warning('ERROR: En canal ',ch_actual,
', no se encontro el archivo ',nom_archivo)
next()
}
DATOS = read.csv(nom_archivo)
DATOS = as.numeric(unlist(DATOS))
sp = specgram(DATOS,Fs=512)
setwd(dir_res)
write.table(Mod(sp$S),file=paste0('SPEC_',nombre,'_',ch_actual,'.txt'),
col.names=F,row.names=F)
write.table(sp$f,file=paste0('FREC_',nombre,'_',ch_actual,'.txt'),
col.names=F,row.names=F)
write.table(sp$t,file=paste0('TIME_',nombre,'_',ch_actual,'.txt'),
col.names=F,row.names=F)
}
# fin del ciclo que recorre canales
#################################################
# fin del script
############################################################################### |
f362ad3689756c441f9e6243226a0a56c43d3d4a | 27ec1587ce6f4b7092b61d5ef0971cc37e89b816 | /AdiposeTissue_adj.R | 55aeb93ff3855b35f1079749f4c46cb306fe9c5c | [] | no_license | DannyArends/BachelorThesisCode | 69300ab4867bc5ed313fd2f5892b33911528e48a | 3d60457b238be16cdbf3e4c3f37c501419b3b278 | refs/heads/master | 2020-05-16T10:22:45.617252 | 2019-04-23T09:38:45 | 2019-04-23T09:38:45 | 182,981,970 | 1 | 0 | null | 2019-04-23T09:32:47 | 2019-04-23T09:32:47 | null | UTF-8 | R | false | false | 7,638 | r | AdiposeTissue_adj.R | # Analysis of weight of WATgon, WATsc and BAT
# (c) Danny Arends and Aimee Freiberg (HU Berlin), 2018 - 2024
# load data
setwd("/Users/aimeefreiberg/Documents/Universtiy Bachelor/AG Brockmann/Bachelorarbeit/R_Data")
mdata <- read.table("Organs.txt", sep="\t", row.names=1, header=TRUE, na.strings = c("","NA"))
Genotype <- read.table("Genotype1.txt", sep="\t", row.names=1, header=TRUE, na.strings = c("","NA"))
Factors <- read.table("factors_AIL.txt", sep="\t", row.names=1, header=TRUE, na.strings = c("","NA"))
# bind data into one table
mdata <- cbind(mdata, Genotype = NA)
mdata[rownames(Genotype), "Genotype"] <- as.character(unlist(Genotype[,1]))
mdata <- cbind(mdata, Mother = NA, WG = NA, Sex = NA)
mdata[rownames(Factors), "Mother"] <- as.character(unlist(Factors[,1]))
mdata[rownames(Factors), "WG"] <- as.character(unlist(Factors[,2]))
mdata[rownames(Factors), "Sex"] <- as.character(unlist(Factors[,3]))
# calculate relative fat storage weight (in %)
mdata <- cbind(mdata, "Rel.WATgon" = unlist(mdata[, "WATgon"] / mdata["Schlachtgewicht"])*100)
mdata <- cbind(mdata, "Rel.WATsc" = unlist(mdata[, "WATsc"] / mdata["Schlachtgewicht"])*100)
mdata <- cbind(mdata, "Rel.BAT" = unlist(mdata[, "BAT"] / mdata["Schlachtgewicht"])*100)
# test for impact of the factors sex, mother and litter size on data
# test for Rel.WATgon
# set up null hypothesis
GH0 <- lm(Rel.WATgon ~ Sex + as.numeric(WG) + Mother, data=mdata)
AIC(GH0)
# set up alternative hypothesis
# Genotypes coded as dominant/recessive due to knowledge from literature
genotypes_num = as.numeric(factor(mdata[,"Genotype"], levels=c("CC", "CT", "TT")))
names(genotypes_num) = rownames(mdata)
genotypes_dom = as.numeric(factor(mdata[,"Genotype"], levels=c("CC", "CT", "TT")))
genotypes_dom[genotypes_dom == 3] <- 2
GHAdom <- lm(Rel.WATgon ~ Sex + as.numeric(WG) + Mother + genotypes_dom, data=mdata)
AIC(GHAdom)
# null hypothesis rejected, when AIC(H0) > AIC (HAdom)+20 <-
AIC(GH0, GHAdom)
# NULL HYPTHESIS REJECTED
# set up second alternative hypothesis (remove mothers)
GHAdomNoM <- lm(Rel.WATgon ~ Sex + as.numeric(WG) + genotypes_dom, data=mdata)
# HAdom rejected, when AIC(HAdom) > AIC (HAdomNoM)+20 <-
AIC(GHAdomNoM, GHAdom)
# HAdom rejected in favor of HAdomNoM
# check normed model HAdomNoM for normal distribution
qqnorm(GHAdomNoM$residuals)
qqline(GHAdomNoM$residuals, col="red")
shapiro.test(GHAdomNoM$residuals)
# DATA IS NORMALLY DISTRIBUTED
# see which factors are significantly different
anova(GHAdomNoM)
# ALL FACTORS SIGNIFICANTLY INFLUENCE REL.WATGON
# WATgon significantly different between genotypes
# set up model for plotting WATgon
Gplot <- lm(Rel.WATgon ~ Sex + as.numeric(WG), data=mdata)
# test for Rel.WATsc
SH0 <- lm(Rel.WATsc ~ Sex + as.numeric(WG) + Mother, data=mdata)
AIC(SH0)
# set up alternative hypothesis
# Genotypes coded as dominant/recessive due to knowledge from literature
genotypes_dom = as.numeric(factor(mdata[,"Genotype"], levels=c("CC", "CT", "TT")))
genotypes_dom[genotypes_dom == 3] <- 2
SHAdom <- lm(Rel.WATsc ~ Sex + as.numeric(WG) + Mother + genotypes_dom, data=mdata)
AIC(SHAdom)
# null hypothesis rejected, when AIC(H0) > AIC (HAdom)+20
AIC(SH0, SHAdom)
# NULL HYPTHESIS REJECTED
# set up second alternative hypothesis (remove mothers)
SHAdomNoM <- lm(Rel.WATsc ~ Sex + as.numeric(WG) + genotypes_dom, data=mdata)
# set uo thrids alternative hypothesis (remove littersize, because so significant influence)
SHAdomSex <- lm(Rel.WATsc ~ Sex + genotypes_dom, data=mdata)
# check for best model again
AIC(SHAdomNoM, SHAdom, SH0, SHAdomSex)
# SHAdomNoMWG, because significant difference to H0 and least degrees of freedoms used
# check normed model HAdomNoM for normal distribution
qqnorm(SHAdomSex$residuals)
qqline(SHAdomSex$residuals, col="red")
shapiro.test(SHAdomSex$residuals)
# DATA IS NORMALLY DISTRIBUTED (with a few outliers)
# see which factors are significantly different
anova(SHAdomSex)
# ALL FACTORS (BESIDES LITTER SIZE) SIGNIFICANTLY INFLUENCE REL.WATSC
# WATsc significantly different between genotypes
# set up model for plotting WATgon
Splot <- lm(Rel.WATsc ~ Sex, data=mdata)
# test for BAT
BH0 <- lm(Rel.BAT ~ Sex + as.numeric(WG) + Mother, data=mdata)
AIC(BH0)
# set up alternative hypothesis
# Genotypes coded as dominant/recessive due to knowledge from literature
genotypes_dom = as.numeric(factor(mdata[,"Genotype"], levels=c("CC", "CT", "TT")))
genotypes_dom[genotypes_dom == 3] <- 2
BHAdom <- lm(Rel.BAT ~ Sex + as.numeric(WG) + Mother + genotypes_dom, data=mdata)
AIC(BHAdom)
# null hypothesis rejected, when AIC(H0) > AIC (HAdom)+20
AIC(BH0, BHAdom)
# NULL HYPTHESIS ACCEPTED
# set up second alternative hypothesis (remove mothers)
BHAdomNoM <- lm(Rel.BAT ~ Sex + as.numeric(WG) + genotypes_dom, data=mdata)
# set up third alternative hypothesis (without sex and WG)
BHAdomM <- lm(Rel.BAT ~ Mother + genotypes_dom, data=mdata)
#set up fourth alternative hypothesis
BHAdom0 <- lm(Rel.BAT ~ genotypes_dom, data=mdata)
# null hypothesis rejected, when AIC(H0) > AIC (HAdom)+20
AIC(BH0, BHAdom, BHAdomNoM, BHAdomM, BHAdom0)
# no significant differences, choose BHAdom0 because of least degrees of freedom used
# check normed model HAdomNoM for normal distribution
qqnorm(BHAdom0$residuals)
qqline(BHAdom0$residuals, col="red")
shapiro.test(BHAdomNoM$residuals)
# DATA IS NORMALLY DISTRIBUTED
# see which factors are significantly different
anova(BHAdom0)
# BAT significantly different between genotypes
# plot data with normed models
par(mfrow=c(1,3))
#WATgon
plot(x = c(0, 4), y=c(-1, 12), xaxt="n", main= "WATgon", ylab=" weight [%]", xlab="Genotype", t='n', las=2, cex.lab=2, cex.axis=1.5, cex.main=2)
boxplot(Gplot$residuals + mean(mdata[, "Rel.WATgon"], na.rm=TRUE) ~ mdata[names(Gplot$residuals), "Genotype"], add=TRUE, col=c(rgb(255, 165, 0, 125, maxColorValue=255), rgb(0, 165, 255, 125, maxColorValue=255), rgb(0, 255, 0, 125, maxColorValue=255)), yaxt='n')
lines(x=c(1,2.5), y = c(11, 11))
lines(x=c(1,1), y = c(10, 11))
lines(x=c(2.5,2.5), y = c(11, 10.5))
lines(x=c(2,3), y = c(10.5, 10.5))
lines(x=c(3,3), y = c(10, 10.5))
lines(x=c(2,2), y= c(10, 10.5))
text(1.7, 11.3, paste0("***"), cex = 3)
#WATsc
plot(x = c(0, 4), y=c(-1, 12), xaxt="n", main= "WATsc", ylab=" weight [%]", xlab="Genotype", t='n', las=2, cex.lab=2, cex.axis=1.5, cex.main=2)
boxplot(Splot$residuals + mean(mdata[, "Rel.WATsc"], na.rm=TRUE) ~ mdata[names(Splot$residuals), "Genotype"], add=TRUE, col=c(rgb(255, 165, 0, 125, maxColorValue=255), rgb(0, 165, 255, 125, maxColorValue=255), rgb(0, 255, 0, 125, maxColorValue=255)), yaxt='n')
lines(x=c(1,2.5), y = c(6, 6))
lines(x=c(1,1), y = c(5, 6))
lines(x=c(2.5,2.5), y = c(6, 5.5))
lines(x=c(2,3), y = c(5.5, 5.5))
lines(x=c(3,3), y = c(5, 5.5))
lines(x=c(2,2), y= c(5, 5.5))
text(1.7, 6.3, paste0("***"), cex = 3)
#BAT
plot(x = c(0, 4), y=c(0, 1), xaxt="n", main= "BAT", ylab=" weight [%]", xlab="Genotype", t='n', las=2, cex.lab=2, cex.axis=1.5, cex.main=2)
boxplot(mdata[, "Rel.BAT"] ~ mdata[, "Genotype"], add=TRUE, col=c(rgb(255, 165, 0, 125, maxColorValue=255), rgb(0, 165, 255, 125, maxColorValue=255), rgb(0, 255, 0, 125, maxColorValue=255)), yaxt='n')
lines(x=c(1,2.5), y = c(0.8, 0.8))
lines(x=c(1,1), y = c(0.7, 0.8))
lines(x=c(2.5,2.5), y = c(0.8, 0.75))
lines(x=c(2,3), y = c(0.75, 0.75))
lines(x=c(3,3), y = c(0.7, 0.75))
lines(x=c(2,2), y= c(0.7, 0.75))
text(1.7, 0.83, paste0("***"), cex = 3)
# END
|
572041890eba61f8a1d4307100a78abcade87036 | 77157987168fc6a0827df2ecdd55104813be77b1 | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615829791-test.R | 0d6501bc1fc680a129c80ec5335c2c945548b9da | [] | no_license | akhikolla/updatedatatype-list2 | e8758b374f9a18fd3ef07664f1150e14a2e4c3d8 | a3a519440e02d89640c75207c73c1456cf86487d | refs/heads/master | 2023-03-21T13:17:13.762823 | 2021-03-20T15:46:49 | 2021-03-20T15:46:49 | 349,766,184 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 427 | r | 1615829791-test.R | testlist <- list(Beta = 0, CVLinf = 9.96472095782093e-101, FM = -1.40001632617123e+149, L50 = 0, L95 = 0, LenBins = 1.29860727674822e-231, LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = -7.02707720788345e+182, SL95 = 9.99313099513286e-222, nage = -437977088L, nlen = 8192L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
99adaee9b716b68c5f3aab9784a052511644df90 | 391d5c76ec69b89c726b35b263a98529fac2e59a | /Module_C/044_arbeitsblatt_04.R | c6efa3fa26c5e967b85576fe5defd462a197c76e | [] | no_license | qualityland/ZHAW_CAS_Data_Analysis | 760e5e1325ccab65bb9fcc13d9609648d9ff0ee8 | 4268d6e16d1689e5c32f2229bf724947ad3f607b | refs/heads/master | 2021-01-06T19:58:47.144476 | 2020-07-10T16:11:22 | 2020-07-10T16:11:22 | 241,469,897 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 397 | r | 044_arbeitsblatt_04.R |
# Aufgabe 1 - Stock Market
library(ISLR)
data("Smarket")
# data.path <- '/Users/schmis12/wrk/studio/ZHAW_CAS_Data_Analysis/Module_C/data/'
# load(paste0(data.path, 'smarket.rdata'))
# a)
dim(Smarket)
summary(Smarket)
str(Smarket)
table(Smarket$Year)
ts.plot(Smarket$Today)
train_set <- Smarket[Smarket$Year < 2005,]
test_set <- Smarket[Smarket$Year == 2005, ]
head(train_set)
head(test_set)
|
fe4a3104ed74e9d0bbd9397c214daddab2681bd9 | bf33e089f6fc6326500193f47c0757c1753d0296 | /R/bndovbme.R | 04f4fe178fd757fd28d252c3d7bb74b3ef5e9742 | [] | no_license | yujunghwang/bndovb | b98a2d1e7ac198aa6c04fa950a61cc085b7be63a | dc4479bacc5e598282233f51c7c04670ccaba530 | refs/heads/main | 2023-07-15T05:35:01.284331 | 2021-08-26T17:34:20 | 2021-08-26T17:34:20 | 347,130,710 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 27,797 | r | bndovbme.R | #' @title bndovbme
#' @description This function runs a two sample least squares when main data contains a dependent variable and
#' every right hand side regressor but one omitted variable.
#' The function requires an auxiliary data which includes every right hand side regressor but one omitted variable,
#' and enough proxy variables for the omitted variable.
#' When the omitted variable is continuous, the auxiliary data must contain at least two continuous proxy variables.
#' When the omitted variable is discrete, the auxiliary data must contain at least three continuous proxy variables.
#' @author Yujung Hwang, \email{yujungghwang@gmail.com}
#' @references \describe{
#' \item{Hwang, Yujung (2021)}{Bounding Omitted Variable Bias Using Auxiliary Data. Available at SSRN. \doi{10.2139/ssrn.3866876}}}
#' @importFrom utils install.packages
#' @import stats
#' @importFrom pracma pinv eye
#' @importFrom MASS mvrnorm
#' @import factormodel
#' @importFrom nnet multinom
#'
#' @param maindat Main data set. It must be a data frame.
#' @param auxdat Auxiliary data set. It must be a data frame.
#' @param depvar A name of a dependent variable in main dataset
#' @param pvar A vector of the names of the proxy variables for the omitted variable.
#' When proxy variables are continuous, the first proxy variable is used as an anchoring variable.
#' When proxy variables are discrete, the first proxy variable is used for initialization (For details, see a documentation for "dproxyme" function).
#' @param ptype Either 1 (continuous) or 2 (discrete). Whether proxy variables are continuous or discrete. Default is 1 (continuous).
#' @param comvar A vector of the names of the common regressors existing in both main data and auxiliary data
#' @param sbar A cardinality of the support of the discrete proxy variables. Default is 2. If proxy variables are continuous, this variable is irrelevant.
#' @param mainweights An optional weight vector for the main dataset. The length must be equal to the number of rows of 'maindat'.
#' @param auxweights An optional weight vector for the auxiliary dataset. The length must be equal to the number of rows of 'auxdat'.
#' @param normalize Whether to normalize the omitted variable to have mean 0 and standard deviation 1. Set TRUE or FALSE.
#' Default is TRUE. If FALSE, then the scale of the omitted variable is anchored with the first proxy variable in pvar list.
#' @param signres An option to impose a sign restriction on a coefficient of an omitted variable. Set either NULL or pos or neg.
#' Default is NULL. If NULL, there is no sign restriction.
#' If 'pos', the estimator imposes an extra restriction that the coefficient of an omitted variable must be positive.
#' If 'neg', the estimator imposes an extra restriction that the coefficient of an omitted variable must be negative.
#' @param ci An option to compute an equal-tailed confidence interval. Default is FALSE. It may take some time to compute CI from bootstrap.
#' @param nboot Number of bootstraps to compute the confidence interval. Default is 100.
#' @param scale A tuning parameter for rescaled numerical bootstrap. The value must be between -1/2 and 0. (main data sample size)^scale is the tuning parameter epsilon_n in Hwang (2021). Default is -1/2 (that is, standard bootstrap).
#' @param tau Significance level. (1-tau)% confidence interval is computed. Default is 0.05.
#' @param seed Seed for random number generation. Default is 210823.
#' @param display It must be either TRUE or FALSE. Whether to display progress and messages. Default is TRUE.
#'
#' @return Returns a list of 4 components : \describe{
#' \item{hat_beta_l}{lower bound estimates of regression coefficients}
#'
#' \item{hat_beta_u}{upper bound estimates of regression coefficients}
#'
#' \item{mu_l}{lower bound estimate of E\[ovar*depvar\]}
#'
#' \item{mu_u}{upper bound estimate of E\[ovar*depvar\]}
#'
#' \item{hat_beta_l_cil}{(1-tau)% confidence interval lower bound for hat_beta_l}
#'
#' \item{hat_beta_l_ciu}{(1-tau)% confidence interval upper bound for hat_beta_l}
#'
#' \item{hat_beta_u_cil}{(1-tau)% confidence interval lower bound for hat_beta_u}
#'
#' \item{hat_beta_u_ciu}{(1-tau)% confidence interval upper bound for hat_beta_u}
#'
#' \item{mu_l_cil}{(1-tau)% confidence interval lower bound for mu_l}
#'
#' \item{mu_l_ciu}{(1-tau)% confidence interval upper bound for mu_l}
#'
#' \item{mu_u_cil}{(1-tau)% confidence interval lower bound for mu_u}
#'
#' \item{mu_u_ciu}{(1-tau)% confidence interval upper bound for mu_u}}
#'
#' @examples
#' ## load example data
#' data(maindat_mecont)
#' data(auxdat_mecont)
#'
#' ## set ptype=1 for continuous proxy variables
#' pvar<-c("z1","z2","z3")
#' cvar<-c("x","w1")
#' bndovbme(maindat=maindat_mecont,auxdat=auxdat_mecont,depvar="y",pvar=pvar,ptype=1,comvar=cvar)
#'
#' ## set ptype=2 for discrete proxy variables
#' data(maindat_medisc)
#' data(auxdat_medisc)
#' bndovbme(maindat=maindat_medisc,auxdat=auxdat_medisc,depvar="y",pvar=pvar,ptype=2,comvar=cvar)
#'
#' @export
bndovbme <- function(maindat,auxdat,depvar,pvar,ptype=1,comvar,sbar=2,mainweights=NULL,auxweights=NULL,normalize=TRUE,signres=NULL,ci=FALSE,nboot=100,scale=-1/2,tau=0.05,seed=210823,display=TRUE){
# load libraries
requireNamespace("stats")
requireNamespace("utils")
requireNamespace("pracma")
requireNamespace("factormodel")
requireNamespace("nnet")
#############
# check if inputs are there in a correct form
#############
if (!is.data.frame(maindat)){
stop("please provide main data in a data frame format.")
}
if (!is.data.frame(auxdat)){
stop("please provide auxiliary data in a data frame format.")
}
# check if column names of auxiliary data exists
if (is.null(colnames(auxdat))){
stop("column names of auxiliary data do not exist.")
}
# check if column names of main data exists
if (is.null(colnames(maindat))){
stop("column names of main data do not exist.")
}
# check if auxiliary dataset includes every independent regressor
if ((sum(comvar%in%colnames(auxdat))<length(comvar)) | (sum(pvar%in%colnames(auxdat))<length(pvar)) ){
stop("auxiliary dataset does not contain every right-hand side regressor.")
}
# check if main dataset includes every independent regressor
if (sum(comvar%in%colnames(maindat))<length(comvar)){
stop("main dataset does not contain every common right-hand side regressor.")
}
# check if main dataset includes dependent variable
if (!(depvar%in%colnames(maindat))){
stop("main dataset does not include the dependent variable.")
}
# check if the proxy variable type is correctly specified
if (!(ptype%in%c(1,2))){
stop("Incorrect type was specified for proxy variables. ptype should be either 1 or 2.")
}
# check if there are enough proxy variables
if ((ptype==1) & (length(pvar)<2)){
stop("There are insufficient number of proxy variables. There must be at least 2 proxy variables when the omitted variable is continuous.")
}
if ((ptype==2) & (length(pvar)<3)){
stop("There are insufficient number of proxy variables. There must be at least 3 proxy variables when the omitted variable is discrete.")
}
if (!is.null(mainweights)){
# check if the weight vector has right length
if (length(mainweights)!=dim(maindat)[1]){
stop("The length of 'mainweights' is not equal to the number of rows of 'maindat'.")
}
# check if any weight vector includes NA or NaN or Inf
if (sum(is.na(mainweights))>0|sum(is.nan(mainweights))>0|sum(is.infinite(mainweights))>0){
stop("mainweights vector can not include any NAs or NaNs or Infs.")
}
}
if (!is.null(auxweights)){
# check if the weight variable is included in the auxdat
if (length(auxweights)!=dim(auxdat)[1]){
stop("The length of 'auxweights' is not equal to the number of rows of 'auxdat'.")
}
# check if any weight vector includes NA or NaN or Inf
if (sum(is.na(auxweights))>0|sum(is.nan(auxweights))>0|sum(is.infinite(auxweights))>0){
stop("auxweights vector can not include any NAs or NaNs or Infs.")
}
}
if (!is.null(signres)){
if (signres!="pos" & signres!="neg"){
stop("signres must be either NULL or pos or neg.")
}
}
if (nboot<2){
stop("The number of bootstrap is too small. Enter a number greater than 1.")
}
if ((scale < -1/2) | (scale > 0)){
stop("The scale parameter must be between -1/2 and 0.")
}
if ((tau<0) | (tau>1)){
stop("tau must be between 0 and 1.")
}
if (!is.logical(ci)){
stop("ci must be either TRUE or FALSE.")
}
#############
# prepare data in a right form
#############
# number of observations
Nm <- dim(maindat)[1]
Na <- dim(auxdat)[1]
# add 1 vector
comvar <- c(comvar,"con")
maindat$con <- rep(1,Nm)
auxdat$con <- rep(1,Na)
# leave only necessary variables and make the order of variables consistent
maindat <- maindat[,c(depvar,comvar)]
auxdat <- auxdat[,c(pvar,comvar)]
# add a weight vector to use 'lm' later
maindat$mainweights <- mainweights
auxdat$auxweights <- auxweights
# number of regressors in a regression model (assuming there is only one omitted variable)
nr <- length(comvar)+1
# a subroutine computing XX, B_l, B_u, mu_l, mu_u
bndovbme_moments <- function(maindat,auxdat,mainweights,auxweights){
#############
# estimate CDF and Quantile function
#############
# estimate N(depvar | comvar)
f1 <- paste0(depvar,"~ 0 +",comvar[1])
if (length(comvar)>1){
for (k in 2:length(comvar)){
f1 <- paste0(f1,"+",comvar[k])
}
}
if (is.null(mainweights)){
oout1 <- lm(formula=f1,data=maindat) ## regression without intercept because of "con" in "comvar"
} else{
oout1 <- lm(formula=f1,data=maindat,weights=mainweights) ## regression without intercept because of "con" in "comvar"
}
Fypar <- matrix(oout1$coefficients,ncol=1)
Fypar[is.na(Fypar)] <- 0
yhat <- as.matrix(maindat[,comvar])%*%Fypar
ysd <- sd(oout1$residuals,na.rm=TRUE)
# estimate f(pvar | ovar)
if (ptype==1){
# continuous proxy variables
if (is.null(auxweights)){
pout <- cproxyme(dat=auxdat[,pvar],anchor=1)
} else{
pout <- cproxyme(dat=auxdat[,pvar],anchor=1,weights=auxweights)
}
if (normalize==TRUE){
# noramlize proxy variables so that latent variable has mean 0 and std 1
for (g in 1:length(pvar)){
auxdat[,pvar[g]] <- (auxdat[,pvar[g]] - pout$mtheta)/(sqrt(pout$vartheta))
}
# reestimate measurement equations with normalized proxy variables
if (is.null(auxweights)){
pout <- cproxyme(dat=auxdat[,pvar],anchor=1)
} else{
pout <- cproxyme(dat=auxdat[,pvar],anchor=1,weights=auxweights)
}
}
alpha0 <- pout$alpha0
alpha1 <- pout$alpha1
varnu <- pout$varnu
mtheta <- pout$mtheta
vartheta <- pout$vartheta
} else if (ptype==2){
if (is.null(auxweights)){
pout <- dproxyme(dat=auxdat[,pvar],sbar,initvar=1)
} else{
pout <- dproxyme(dat=auxdat[,pvar],sbar,initvar=1,weights=auxweights)
}
M_param <-pout$M_param
M_param_col <-pout$M_param_col
M_param_row <-pout$M_param_row
mparam <-pout$mparam
typeprob <-pout$typeprob
} else {
stop("ptype should be either 1 or 2.")
}
N <- dim(auxdat)[1]
nc <- length(comvar)
# estimate N(ovar | comvar)
if (ptype==1){
# construct normalized proxy variables
npdat <- auxdat[,pvar]
np <- length(pvar)
nsdnu <- rep(NA,np)
for (i in 1:np){
npdat[,i] <- (npdat[,i]-alpha0[i])/alpha1[i]
nsdnu[i] <- sqrt(varnu[i]/(alpha1[i]^2))
}
# stack up the normalized proxy data
sdat <- cbind(npdat[,1],auxdat[,comvar])
colnames(sdat) <- c("y",comvar)
for (a in 2:np){
sdat0 <- cbind(npdat[,a],auxdat[,comvar])
colnames(sdat0) <- c("y",comvar)
sdat <- rbind(sdat,sdat0)
}
sdat <- as.data.frame(sdat)
f2 <- paste0("y ~ 0 +",comvar[1])
if (length(comvar)>1){
for (k in 2:length(comvar)){
f2 <- paste0(f2,"+",comvar[k])
}
}
if (is.null(auxweights)){
oout2 <- lm(formula=f2,data=sdat) ## regression without intercept because of "con" in "comvar"
} else{
sdat$weights <- rep(auxweights,np)
oout2 <- lm(formula=f2,data=sdat,weights=weights) ## regression without intercept because of "con" in "comvar"
}
# prediction in main data, not auxiliary data
param <- oout2$coefficients
param[is.na(param)] <- 0
Fopar <- matrix(param[1:nc],ncol=1)
ohat <- as.matrix(maindat[,comvar])%*%Fopar
varNoNA <- function(x) var(x,na.rm=TRUE)
res <- sdat[,"y"] - as.matrix(sdat[,comvar])%*%Fopar
osd <- mean(sqrt(pmax(apply(matrix(res,ncol=np),2,varNoNA)-(nsdnu)^2,0.01)))
#############
# compute bounds of E[(depvar)*(omitted variable)]
#############
ovar_m_l <- rep(NA,Nm)
ovar_m_u <- rep(NA,Nm)
for (k in 1:Nm){
if (!is.na(maindat[k,depvar]) & !is.nan(maindat[k,depvar]) & !is.na(yhat[k]) & !is.nan(yhat[k]) & !is.na(ysd) & !is.nan(ysd) & !is.na(ohat[k]) & !is.nan(ohat[k]) & !is.na(osd) & !is.nan(osd) ){
ovar_m_u[k] <- qnorm(p= pnorm(q=maindat[k,depvar],mean=yhat[k],sd=ysd) ,mean=ohat[k],sd=osd)
ovar_m_l[k] <- qnorm(p=(1-pnorm(q=maindat[k,depvar],mean=yhat[k],sd=ysd)),mean=ohat[k],sd=osd)
}
}
} else if (ptype==2){
if (is.null(auxweights)){
oout2 <- multinom(formula=typeprob~as.matrix(auxdat[,comvar[1:(nc-1)]]),maxit=10000,trace=FALSE) ## regression without intercept because of "con" in "comvar"
} else{
oout2 <- multinom(formula=typeprob~as.matrix(auxdat[,comvar[1:(nc-1)]]),weights=auxweights,maxit=10000,trace=FALSE) ## regression without intercept because of "con" in "comvar"
}
param <- t(coef(oout2))
param[is.na(param)]<-0
npr <- dim(param)[1]
npc <- dim(param)[2]
# move intercept to the last row
Fopar <- rbind(matrix(param[2:npr,],ncol=npc),matrix(param[1,],ncol=npc))
# prediction in main data, not auxiliary data
Fopar <- cbind(rep(0,nc),Fopar)
oprob <- exp(as.matrix(maindat[,comvar])%*%Fopar)
oprob <- oprob/matrix(rep(apply(oprob,1,sum),sbar),ncol=sbar)
coprob <- t(apply(oprob,1,cumsum))
#############
# compute bounds of E[(depvar)*(omitted variable)]
#############
ovar_m_l <- rep(NA,Nm)
ovar_m_u <- rep(NA,Nm)
# discrete
typemat <- t(matrix(rep(c(1:sbar),dim(typeprob)[1]),ncol=dim(typeprob)[1]))
if (is.null(auxweights)){
mtheta <- mean(apply(typeprob*typemat,1,sum),na.rm=TRUE)
vartheta <- mean(apply(typeprob*(typemat^2),1,sum),na.rm=TRUE) - mtheta^2
} else{
mtheta <- weighted.mean(x=apply(typeprob*typemat,1,sum),w=auxweights,na.rm=TRUE)
vartheta <- weighted.mean(x=apply(typeprob*(typemat-mtheta)^2,1,sum),w=auxweights,na.rm=TRUE)
}
if (normalize==TRUE){
# fix normalization
ogrid <- (c(1:sbar)-mtheta)/sqrt(vartheta)
typemat <- t(matrix(rep(ogrid,dim(typeprob)[1]),ncol=dim(typeprob)[1]))
# normalized mean and var, close to 0 and 1
if (is.null(auxweights)){
mtheta <- mean(apply(typeprob*typemat,1,sum),na.rm=TRUE)
vartheta <- mean(apply(typeprob*(typemat^2),1,sum),na.rm=TRUE) - mtheta^2
} else{
mtheta <- weighted.mean(x=apply(typeprob*typemat,1,sum),w=auxweights,na.rm=TRUE)
vartheta <- weighted.mean(x=apply(typeprob*(typemat-mtheta)^2,1,sum),w=auxweights,na.rm=TRUE)
}
} else{
ogrid <- c(1:sbar)
}
for (k in 1:Nm){
if (!is.na(maindat[k,depvar]) & !is.nan(maindat[k,depvar]) & !is.na(yhat[k]) & !is.nan(yhat[k]) & !is.na(ysd) & !is.nan(ysd) & sum(is.na(coprob[k,])|is.nan(coprob[k,]))==0 ){
ovar_m_u[k] <- ogrid[which( pnorm(q=maindat[k,depvar],mean=yhat[k],sd=ysd) <coprob[k,])[1]]
ovar_m_l[k] <- ogrid[which((1-pnorm(q=maindat[k,depvar],mean=yhat[k],sd=ysd))<coprob[k,])[1]]
}
}
} else {
stop("ptype should be either 1 or 2.")
}
#############
# compute lower bound and upper bound
#############
# replace missing values to 0 and create a dummy for missingness
Imaindat <- !is.na(maindat)
Iauxdat <- !is.na(auxdat)
colnames(Imaindat) <- colnames(maindat)
colnames(Iauxdat) <- colnames(auxdat)
maindat[!Imaindat] <-0
auxdat[!Iauxdat] <-0
Iovar_m_l <- !is.na(ovar_m_l)
Iovar_m_u <- !is.na(ovar_m_u)
ovar_m_l[!Iovar_m_l] <-0
ovar_m_u[!Iovar_m_u] <-0
if (is.null(mainweights)){
mu_l <- sum(maindat[,depvar]*ovar_m_l) / sum(Imaindat[,depvar]*Iovar_m_l)
mu_u <- sum(maindat[,depvar]*ovar_m_u) / sum(Imaindat[,depvar]*Iovar_m_u)
} else{
mu_l <- sum(maindat[,depvar]*ovar_m_l*mainweights) / sum(Imaindat[,depvar]*Iovar_m_l*mainweights)
mu_u <- sum(maindat[,depvar]*ovar_m_u*mainweights) / sum(Imaindat[,depvar]*Iovar_m_u*mainweights)
}
# submatrices
if (ptype==1){
Inpdat <- !is.na(npdat)
npdat[!Inpdat] <- 0
# continuous
A1 <- vartheta + mtheta^2
# use normalized proxies to compute covariance, A2
A2 <- matrix(NA,nrow=1,ncol=nc)
for (k in 1:nc){
if (is.null(auxweights)){
A2[1,k] <- sum(rep(auxdat[,comvar[k]],np)*matrix(as.matrix(npdat),ncol=1)) / sum(rep(Iauxdat[,comvar[k]],np)*matrix(as.matrix(Inpdat),ncol=1))
} else{
A2[1,k] <- sum(rep(auxweights*auxdat[,comvar[k]],np)*matrix(as.matrix(npdat),ncol=1)) / sum(rep(auxweights*Iauxdat[,comvar[k]],np)*matrix(as.matrix(Inpdat),ncol=1))
}
}
} else if (ptype==2){
A1 <- vartheta + mtheta^2
A2 <- matrix(0,nrow=1,ncol=nc)
for (k in 1:nc){
temp <- 0
for (l in 1:sbar){
temp <- temp + ogrid[l]*auxdat[,comvar[k]]*typeprob[,l]
}
if (is.null(auxweights)){
A2[1,k] <- sum(temp) / sum(Iauxdat[,comvar[k]])
} else{
A2[1,k] <- sum(temp*auxweights) / sum(Iauxdat[,comvar[k]]*auxweights)
}
rm(temp)
}
} else{
stop("ptype must be either 1 or 2")
}
if (is.null(auxweights) & is.null(mainweights)){
C <- as.matrix(rbind( maindat[,comvar], auxdat[,comvar]))
IC <- as.matrix(rbind(Imaindat[,comvar],Iauxdat[,comvar]))
A3 <- (t(C)%*%C)/(t(IC)%*%IC)
} else if(!is.null(auxweights) & is.null(mainweights)){
aw <- matrix(rep(auxweights, length(comvar)),ncol=length(comvar)) *(1/sum(auxweights)) * Na
C <- as.matrix(rbind( maindat[,comvar],aw* auxdat[,comvar]))
IC <- as.matrix(rbind(Imaindat[,comvar],aw*Iauxdat[,comvar]))
C2 <- as.matrix(rbind( maindat[,comvar], auxdat[,comvar]))
IC2 <- as.matrix(rbind(Imaindat[,comvar],Iauxdat[,comvar]))
A3 <- (t(C)%*%C2)/(t(IC)%*%IC2)
} else if(is.null(auxweights) & !is.null(mainweights)){
mw <- matrix(rep(mainweights,length(comvar)),ncol=length(comvar)) *(1/sum(mainweights)) * Nm
C <- as.matrix(rbind(mw* maindat[,comvar], auxdat[,comvar]))
IC <- as.matrix(rbind(mw*Imaindat[,comvar], Iauxdat[,comvar]))
C2 <- as.matrix(rbind( maindat[,comvar], auxdat[,comvar]))
IC2 <- as.matrix(rbind(Imaindat[,comvar], Iauxdat[,comvar]))
A3 <- (t(C)%*%C2)/(t(IC)%*%IC2)
} else{
mw <- matrix(rep(mainweights,length(comvar)),ncol=length(comvar)) *(1/sum(mainweights)) * Nm
aw <- matrix(rep(auxweights, length(comvar)),ncol=length(comvar)) *(1/sum(auxweights)) * Na
C <- as.matrix(rbind(mw* maindat[,comvar], aw* auxdat[,comvar]))
IC <- as.matrix(rbind(mw*Imaindat[,comvar], aw*Iauxdat[,comvar]))
C2 <- as.matrix(rbind( maindat[,comvar], auxdat[,comvar]))
IC2 <- as.matrix(rbind(Imaindat[,comvar], Iauxdat[,comvar]))
A3 <- (t(C)%*%C2)/(t(IC)%*%IC2)
}
XX <- as.matrix(rbind(cbind(A1,A2),cbind(t(A2),A3)))
# OLS formula
if (is.null(mainweights)){
B <- (t(as.matrix(maindat[,depvar]))%*%as.matrix(maindat[,comvar]))/(t(as.matrix(Imaindat[,depvar]))%*%as.matrix(Imaindat[,comvar]))
} else {
B <- (t(as.matrix(mainweights*maindat[,depvar]))%*%as.matrix(maindat[,comvar]))/(t(as.matrix(mainweights*Imaindat[,depvar]))%*%as.matrix(Imaindat[,comvar]))
}
B_l <- matrix(c(mu_l,B),ncol=1)
B_u <- matrix(c(mu_u,B),ncol=1)
return(list(XX,B_l,B_u,mu_l,mu_u))
}
# compute XX, B_l, B_u
mout <- bndovbme_moments(maindat,auxdat,mainweights,auxweights)
XX <- mout[[1]]
B_l <- mout[[2]]
B_u <- mout[[3]]
mu_l <- mout[[4]]
mu_u <- mout[[5]]
# subroutine to compute hat_beta_l and hat_beta_u and mu_l and mu_u (sign restriction adjustment) given XX, B_l, B_u, mu_l, mu_u
# return hat_beta_l, hat_beta_u
bndovbme_coef <- function(XX,B_l,B_u,mu_l,mu_u){
hat_beta_l <- matrix(pmin(pinv(XX)%*%B_l,pinv(XX)%*%B_u),nrow=1)
hat_beta_u <- matrix(pmax(pinv(XX)%*%B_l,pinv(XX)%*%B_u),nrow=1)
colnames(hat_beta_l) <- c("ovar",comvar)
colnames(hat_beta_u) <- c("ovar",comvar)
if (!is.null(signres)){
# length(ovar)=1
B <- B_l[2:nr]
if (signres=="pos" & (hat_beta_l[1]<0)){
# solve the inverse problem
M <- pinv(XX)
mu_zero <- -(M[1,2:nr]%*%matrix(B,ncol=1))/M[1,1]
if (M[1,1]<0){
mu_u <- mu_zero
mu_l <- min(mu_zero,mu_l)
} else{
mu_l <- mu_zero
mu_u <- max(mu_zero,mu_u)
}
# sign restricted model
rB_l <- matrix(c(mu_l,B),ncol=1)
rB_u <- matrix(c(mu_u,B),ncol=1)
hat_beta_l <- matrix(pmin(pinv(XX)%*%rB_l,pinv(XX)%*%rB_u),nrow=1)
hat_beta_u <- matrix(pmax(pinv(XX)%*%rB_l,pinv(XX)%*%rB_u),nrow=1)
colnames(hat_beta_l) <- c("ovar",comvar)
colnames(hat_beta_u) <- c("ovar",comvar)
}
if (signres=="neg" & (hat_beta_u[1]>0)){
# solve the inverse problem
M <- pinv(XX)
mu_zero <- -(M[1,2:nr]%*%matrix(B,ncol=1))/M[1,1]
if (M[1,1]<0){
mu_l <- mu_zero
mu_u <- max(mu_zero,mu_u)
} else{
mu_u <- mu_zero
mu_l <- min(mu_zero,mu_l)
}
# sign restricted model
rB_l <- matrix(c(mu_l,B),ncol=1)
rB_u <- matrix(c(mu_u,B),ncol=1)
hat_beta_l <- matrix(pmin(pinv(XX)%*%rB_l,pinv(XX)%*%rB_u),nrow=1)
hat_beta_u <- matrix(pmax(pinv(XX)%*%rB_l,pinv(XX)%*%rB_u),nrow=1)
colnames(hat_beta_l) <- c("ovar",comvar)
colnames(hat_beta_u) <- c("ovar",comvar)
}
}
# change the order of OLS coefficients
comvar2 <- comvar[comvar!="con"]
hat_beta_l <- c(hat_beta_l[,"con"],hat_beta_l[,"ovar"],hat_beta_l[,comvar2])
hat_beta_u <- c(hat_beta_u[,"con"],hat_beta_u[,"ovar"],hat_beta_u[,comvar2])
return(list(hat_beta_l,hat_beta_u,mu_l,mu_u))
}
moout2 <- bndovbme_coef(XX,B_l,B_u,mu_l,mu_u)
hat_beta_l <- moout2[[1]]
hat_beta_u <- moout2[[2]]
mu_l <- moout2[[3]]
mu_u <- moout2[[4]]
###################################
# Confidence Interval computation
###################################
hat_beta_l_cil <- NULL
hat_beta_l_ciu <- NULL
hat_beta_u_cil <- NULL
hat_beta_u_ciu <- NULL
mu_l_cil <- NULL
mu_l_ciu <- NULL
mu_u_cil <- NULL
mu_u_ciu <- NULL
if (ci==TRUE){
# set seed
set.seed(seed)
# draw bootstrap samples with replacement
bmain_ind <- randi(Nm,n=Nm,m=nboot)
baux_ind <- randi(Na,n=Na,m=nboot)
# matrices to save derivatives
dhat_beta_l <- array(NA,dim=c(nr,nboot))
dhat_beta_u <- array(NA,dim=c(nr,nboot))
dmu_l <- rep(NA,nboot)
dmu_u <- rep(NA,nboot)
# progress message
prog <- round(quantile(c(1:nboot),probs=seq(0.1,1,0.1)),digits=0)
prog_ind <- 1
for (b1 in 1:nboot){
if (display==TRUE){
if (b1%in%prog){
print(paste0(names(prog)[prog_ind]," completed"))
prog_ind <- prog_ind + 1
}
}
# bootstrap sample
bmaindat <- maindat[bmain_ind[,b1],]
bauxdat <- auxdat[ baux_ind[,b1],]
# compute bootstrap moments (return : XX, B_l, B_u)
bmout <- bndovbme_moments(maindat=bmaindat,auxdat=bauxdat,mainweights=as.vector(bmaindat$mainweights),auxweights=as.vector(bauxdat$auxweights))
bXX <- bmout[[1]]
bB_l <- bmout[[2]]
bB_u <- bmout[[3]]
bmu_l <- bmout[[4]]
bmu_u <- bmout[[5]]
# rescale by sample size
# tuning parameter
en <- Nm^scale
rn <- sqrt(Nm)
adjbXX <- XX + en*rn*(bXX-XX)
adjbB_l <- B_l + en*rn*(bB_l-B_l)
adjbB_u <- B_u + en*rn*(bB_u-B_u)
adjbmu_l <- mu_l + en*rn*(bmu_l-mu_l)
adjbmu_u <- mu_u + en*rn*(bmu_u-mu_u)
# take the derivative
bmoout2 <- bndovbme_coef(adjbXX,adjbB_l,adjbB_u,adjbmu_l,adjbmu_u)
bhat_beta_l <- bmoout2[[1]]
bhat_beta_u <- bmoout2[[2]]
bmu_l <- bmoout2[[3]]
bmu_u <- bmoout2[[4]]
dhat_beta_l[,b1] <- (bhat_beta_l - hat_beta_l)/en
dhat_beta_u[,b1] <- (bhat_beta_u - hat_beta_u)/en
dmu_l[b1] <- (bmu_l - mu_l)/en
dmu_u[b1] <- (bmu_u - mu_u)/en
}
rquantile <- function(x){
return(quantile(x,probs=(1-tau/2)))
}
lquantile <- function(x){
return(quantile(x,probs=(tau/2)))
}
# find the tau and (1-tau) percentile
dhat_beta_l_r <- apply(dhat_beta_l,1,rquantile)
dhat_beta_l_l <- apply(dhat_beta_l,1,lquantile)
dhat_beta_u_r <- apply(dhat_beta_u,1,rquantile)
dhat_beta_u_l <- apply(dhat_beta_u,1,lquantile)
dmu_l_r <- rquantile(dmu_l)
dmu_l_l <- lquantile(dmu_l)
dmu_u_r <- rquantile(dmu_u)
dmu_u_l <- lquantile(dmu_u)
# compute the bound
hat_beta_l_cil <- hat_beta_l - dhat_beta_l_r / rn
hat_beta_l_ciu <- hat_beta_l - dhat_beta_l_l / rn
hat_beta_u_cil <- hat_beta_u - dhat_beta_u_r / rn
hat_beta_u_ciu <- hat_beta_u - dhat_beta_u_l / rn
mu_l_cil <- mu_l - dmu_l_r /rn
mu_l_ciu <- mu_l - dmu_l_l /rn
mu_u_cil <- mu_u - dmu_u_r /rn
mu_u_ciu <- mu_u - dmu_u_l /rn
}
if ((ci==FALSE) & (display==TRUE)){
print("If you want to compute an equal-tailed confidence interval using a numerical delta method, set ci=TRUE instead. Default is 95% CI. If you want a different coverage, set a different tau.")
}
return(list(hat_beta_l=hat_beta_l,hat_beta_u=hat_beta_u,mu_l=mu_l,mu_u=mu_u,
hat_beta_l_cil=hat_beta_l_cil,hat_beta_l_ciu=hat_beta_l_ciu,hat_beta_u_cil=hat_beta_u_cil,hat_beta_u_ciu=hat_beta_u_ciu,
mu_l_cil=mu_l_cil,mu_l_ciu=mu_l_ciu,mu_u_cil=mu_u_cil,mu_u_ciu=mu_u_ciu))
}
|
aae0958ec56b89d3c4070f3ecc46730701c7fd7d | 66f9ae7985c6849f898e5139ad2bea5f1431744a | /InterviewPractice/sudoku2.R | 64326ada8ea0da40fac6a70ec99ca59ff3951ef1 | [] | no_license | chaegeunsong/RCodePractice | 697d97c586d1fe46cc3b1283393976047098caf8 | b1a2c6bb74956522fda851a72cbb7b980ef57cb4 | refs/heads/master | 2023-07-05T23:16:25.732456 | 2020-11-26T08:31:00 | 2020-11-26T08:31:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,609 | r | sudoku2.R | # Sudoku is a number-placement puzzle. The objective is to fill a 9 × 9 grid with numbers in such a way that each column, each row, and each of the nine 3 × 3 sub-grids that compose the grid all contain all of the numbers from 1 to 9 one time.
#
# Implement an algorithm that will check whether the given grid of numbers represents a valid Sudoku puzzle according to the layout rules described above. Note that the puzzle represented by grid does not have to be solvable.
#
# Example
#
# For
#
# grid = [['.', '.', '.', '1', '4', '.', '.', '2', '.'],
# ['.', '.', '6', '.', '.', '.', '.', '.', '.'],
# ['.', '.', '.', '.', '.', '.', '.', '.', '.'],
# ['.', '.', '1', '.', '.', '.', '.', '.', '.'],
# ['.', '6', '7', '.', '.', '.', '.', '.', '9'],
# ['.', '.', '.', '.', '.', '.', '8', '1', '.'],
# ['.', '3', '.', '.', '.', '.', '.', '.', '6'],
# ['.', '.', '.', '.', '.', '7', '.', '.', '.'],
# ['.', '.', '.', '5', '.', '.', '.', '7', '.']]
#
# the output should be
# sudoku2(grid) = true;
#
# For
#
# grid = [['.', '.', '.', '.', '2', '.', '.', '9', '.'],
# ['.', '.', '.', '.', '6', '.', '.', '.', '.'],
# ['7', '1', '.', '.', '7', '5', '.', '.', '.'],
# ['.', '7', '.', '.', '.', '.', '.', '.', '.'],
# ['.', '.', '.', '.', '8', '3', '.', '.', '.'],
# ['.', '.', '8', '.', '.', '7', '.', '6', '.'],
# ['.', '.', '.', '.', '.', '2', '.', '.', '.'],
# ['.', '1', '.', '2', '.', '.', '.', '.', '.'],
# ['.', '2', '.', '.', '3', '.', '.', '.', '.']]
#
# the output should be
# sudoku2(grid) = false.
#
# The given grid is not correct because there are two 1s in the second column. Each column, each row, and each 3 × 3 subgrid can only contain the numbers 1 through 9 one time.
#
# Input/Output
#
# [execution time limit] 5 seconds (r)
#
# [input] array.array.char grid
#
# A 9 × 9 array of characters, in which each character is either a digit from '1' to '9' or a period '.'.
#
# [output] boolean
#
# Return true if grid represents a valid Sudoku puzzle, otherwise return false.
#no repeating numbers in a row or a column or in a 3x3 grid
#if they are then false
grid = list(list('.', '.', '.', '.', '2', '.', '.', '9', '.'),
list('.', '.', '.', '.', '6', '.', '.', '.', '.'),
list('7', '1', '.', '.', '7', '5', '.', '.', '.'),
list('.', '7', '.', '.', '.', '.', '.', '.', '.'),
list('.', '.', '.', '.', '8', '3', '.', '.', '.'),
list('.', '.', '8', '.', '.', '7', '.', '6', '.'),
list('.', '.', '.', '.', '.', '2', '.', '.', '.'),
list('.', '1', '.', '2', '.', '.', '.', '.', '.'),
list('.', '2', '.', '.', '3', '.', '.', '.', '.'))
# grid = list(list('.','.','.'),
# list('.','.','.'),
# list('.','.','.'))
# logic1: vanilla: solved 17/20 tests.
#logic2: additional to vanilla, solve the failing cases
# failinginput in logic1 : issue: silliest of all. lapply return is not global.
#had to check for any false in that iteration and then return.
grid = list(list(".",".","4",".",".",".","6","3","."),
list(".",".",".",".",".",".",".",".","."),
list("5",".",".",".",".",".",".","9","."),
list(".",".",".","5","6",".",".",".","."),
list("4",".","3",".",".",".",".",".","1"),
list(".",".",".","7",".",".",".",".","."),
list(".",".",".","5",".",".",".",".","."),
list(".",".",".",".",".",".",".",".","."),
list(".",".",".",".",".",".",".",".","."))
##issues: number of passing tests varyign for the same solution even when all hidden tests are revealed :|
#logic5: hopefully the last one
#do.call(rbind,grid) was slowing it all down
#all that was needed was matrix() function.
#since the numeric thing was not done, instead of NA checks, checks for dots are being done.
sudoku2 <- function(grid) {
# grid <- lapply(grid,as.numeric)
# grid <- do.call(rbind,grid)
grid <- matrix(unlist(grid),nrow = 9,ncol = 9,byrow = T)
validsudoku <- TRUE
for (rowindex in 1:3) {
for (colindex in 1:3) {
rowdata = grid[3*(rowindex-1) + colindex,]
if (length(rowdata[rowdata != "."]) != length(unique(rowdata[rowdata != "."]))) {
validsudoku <- FALSE
break
}
coldata = grid[,3*(rowindex-1) + colindex]
if (length(coldata[coldata != "."]) != length(unique(coldata[coldata != "."]))) {
validsudoku <- FALSE
break
}
temp = grid[(3*(rowindex-1) + 1):(3*(rowindex-1) + 3),(3*(colindex-1) + 1):(3*(colindex-1) + 3)]
if (length(temp[temp != "."]) != length(unique(temp[temp != "."]))) {
validsudoku <- FALSE
break
}
}
if (validsudoku == FALSE) {
break
}
}
if (validsudoku == FALSE) {
return(FALSE)
} else {
return(TRUE)
}
}
#logic4: with some wisdom after breaking head for days : 29 tests passing. time up on 30th.
sudoku2 <- function(grid) {
grid <- lapply(grid,as.numeric)
grid <- do.call(rbind,grid)
for (rowindex in 1:3) {
for (colindex in 1:3) {
rowdata = grid[3*(rowindex-1) + colindex,]
rowdata = rowdata[!is.na(rowdata)]
if (length(rowdata) != length(unique(rowdata))) {
return(FALSE)
}
coldata = grid[,3*(rowindex-1) + colindex]
coldata = coldata[!is.na(coldata)]
if (length(coldata) != length(unique(coldata))) {
return(FALSE)
}
temp = grid[(3*(rowindex-1) + 1):(3*(rowindex-1) + 3),(3*(colindex-1) + 1):(3*(colindex-1) + 3)]
temp = temp[!is.na(temp)]
if (length(temp) != length(unique(temp))) {
return(FALSE)
}
}
}
return(TRUE)
}
#logic3: slightly faster
sudoku2 <- function(grid) {
# start.time <- Sys.time()
grid <- lapply(grid,as.numeric)
grid <- do.call(rbind,grid)
# grid <- matrix(unlist(grid), ncol = length(grid), byrow = T)
# grid <- matrix(unlist(grid),nrow = 9,byrow = T)
# grid <- rbindlist(grid)
# grid <- grid[, lapply(.SD, as.numeric)]
# validsudoku <- TRUE
for (rowindex in 1:3) {
# print(:rowindex)
for (colindex in 1:3) {
# print(colindex)
rowdata = grid[3*(rowindex-1) + colindex,]
rowdata = rowdata[!is.na(rowdata)]
if (length(rowdata) > 0 && sum(duplicated(rowdata)) > 0) {
return(FALSE)
}
coldata = grid[,3*(rowindex-1) + colindex]
if (length(coldata) > 0 && sum(duplicated(coldata)) > 0) {
return(FALSE)
}
temp = grid[(3*(rowindex-1) + 1):(3*(rowindex-1) + 3),(3*(colindex-1) + 1):(3*(colindex-1) + 3)]
if (length(temp) > 0 && sum(duplicated(temp)) > 0) {
return(FALSE)
}
}
}
return(TRUE)
}
# sudoku2 <- function(grid) {
# grid <- lapply(grid,as.numeric)
# grid <- do.call(rbind,grid)
# validsudoku <- TRUE
#
# for (rowindex in 1:3) {
# for (colindex in 1:3) {
# rowdata = grid[3*(rowindex-1) + colindex,]
# if (length(rowdata[!is.na(rowdata)]) != length(unique(rowdata[!is.na(rowdata)]))) {
# validsudoku <- FALSE
# break
# }
# coldata = grid[,3*(rowindex-1) + colindex]
# if (length(coldata[!is.na(coldata)]) != length(unique(coldata[!is.na(coldata)]))) {
# validsudoku <- FALSE
# break
# }
# temp = grid[(3*(rowindex-1) + 1):(3*(rowindex-1) + 3),(3*(colindex-1) + 1):(3*(colindex-1) + 3)]
# if (length(temp[!is.na(temp)]) != length(unique(temp[!is.na(temp)]))) {
# validsudoku <- FALSE
# break
# }
# }
# if (validsudoku == FALSE) {
# break
# }
# }
# if (validsudoku == FALSE) {
# return(FALSE)
# } else {
# return(TRUE)
# }
# }
# sudoku2 <- function(grid) {
# grid <- lapply(grid,as.numeric)
# grid <- do.call(rbind,grid)
#
# #empty is true
# # if (all(is.na(grid))) {
# # return(TRUE)
# # }
# # grid <- as.data.table(grid)
#
# #apply on each row
# response <- apply(grid,1,function(x) {
# # x = x[!is.na(x)]
# if (length(x[!is.na(x)]) != length(unique(x[!is.na(x)]))) {
# return(FALSE)
# }
# })
#
# if(any(unlist(response) == FALSE)) {
# return(FALSE)
# }
#
# #apply on each column
# response <- apply(grid,2,function(x) {
# # x = x[!is.na(x)]
# if (length(x[!is.na(x)]) != length(unique(x[!is.na(x)]))) {
# return(FALSE)
# }
# })
#
# if(any(unlist(response) == FALSE)) {
# return(FALSE)
# }
#
# #apply on each 3x3 matrix
# #there are 9 such grids
# for (rowindex in 1:3) {
# for (colindex in 1:3) {
# temp = grid[(3*(rowindex-1) + 1):(3*(rowindex-1) + 3),(3*(colindex-1) + 1):(3*(colindex-1) + 3)]
# if (length(temp[!is.na(temp)]) != length(unique(temp[!is.na(temp)]))) {
# return(FALSE)
# }
# }
# }
# return(TRUE)
# }
#
|
c0181a5d83081725cb79db5dbfd117c1579e36f3 | a709bd69a0e768f37703f970cd95e7511297e776 | /data/create_alc.R | 194643bc2a0f72cc81e6288cf727288466333231 | [] | no_license | YuliyaSkakun/IODS-project | b4139a42c9a16cc15bcc8710e030a6f0ef087585 | 0113b9cbdcd08c4f2f98c8d74c7f122b97a72fd5 | refs/heads/master | 2021-01-11T16:03:11.527307 | 2017-02-24T18:44:32 | 2017-02-24T18:44:32 | 79,991,849 | 0 | 0 | null | 2017-01-25T07:15:59 | 2017-01-25T07:15:59 | null | UTF-8 | R | false | false | 2,024 | r | create_alc.R | # Name: Yuliya Skakun
# Date: 07.02.2017
#The file is containing the information on the alcochol consumprion of students in Portugal (resource: https://archive.ics.uci.edu/ml/datasets/STUDENT+ALCOHOL+CONSUMPTION)
#Read The CSV file
setwd("/Users/skakunyuliya/IODS-project/data")
math <-read.csv("student-mat.csv",sep=";",header=TRUE)
por <- read.csv("student-por.csv",sep=";",header=TRUE)
# Merge two datasets
library(dplyr)
join_by <- c("school", "sex", "age", "address", "famsize", "Pstatus", "Medu", "Fedu", "Mjob", "Fjob", "reason", "nursery","internet")
math_por <- inner_join(math, por, suffix=c(".math", ".por"), by =join_by)
#See the structure and the dimension
str(math_por)
dim(math_por)
alc <- select(math_por, one_of(join_by))
# the columns in the datasets which were not used for joining the data
notjoined_columns <- colnames(math)[!colnames(math) %in% join_by]
# print out the columns not used for joining
notjoined_columns
# for every column name not used for joining...
for(column_name in notjoined_columns) {
# select two columns from 'math_por' with the same original name
two_columns <- select(math_por, starts_with(column_name))
# select the first column vector of those two columns
first_column <- select(two_columns, 1)[[1]]
# if that first column vector is numeric...
if(is.numeric(first_column)) {
# take a rounded average of each row of the two columns and
# add the resulting vector to the alc data frame
alc[column_name] <- round(rowMeans(two_columns))
} else { # else if it's not numeric...
# add the first column vector to the alc data frame
alc[column_name] <- first_column
}
}
#average of the answers related to weekday and weekend alcohol consumption to create a new column 'alc_use'
alc <- mutate(alc, alc_use = (Dalc + Walc) / 2)
#Then use 'alc_use' to create a new logical column 'high_use'
alc <- mutate(alc, high_use = alc_use > 2)
#Glimpse at the newly created data
glimpse(alc)
write.csv(alc, file="write.csv", row.names = FALSE)
|
92b2d69650db679181904ab47bc1591e8ac6863b | 52c0fed455b5829b2016ec418204fd1df3429bf9 | /scriptsWRS/Epigenetics_WRS.R | 9aaff8c48e3268da4d4a155715a887c438869200 | [] | no_license | wrshoemaker/JanthinoViol | 286ad91661d1bfed9a4254090490f7a1be62e41a | 1f392becd0deac5d34494df6f80b780beb4abd0a | refs/heads/master | 2020-04-01T22:17:34.806792 | 2017-02-20T17:31:40 | 2017-02-20T17:31:40 | 39,451,977 | 0 | 1 | null | 2015-07-27T03:50:02 | 2015-07-21T14:57:27 | R | UTF-8 | R | false | false | 1,139 | r | Epigenetics_WRS.R | rm(list=ls())
getwd()
setwd('~/github/JanthinoViol/data/')
getwd()
library(ggplot2)
library(lattice)
library(lsmeans)
library(multcompView)
library(plyr)
library(reshape)
epi <- read.csv("EpigeneticsViolacein_07272015_WJB.csv", header = T)
epi.melt <- melt(epi)
qqnorm(epi.melt$value)
wilcox.test(epi$Aza, epi$Control, paired=T)
# So there isn't a significant difference and we can tell by just looking
# at the data there's something not right with it.
ggplot(epi.melt, aes(x=variable, y=value, fill=variable)) +
geom_boxplot() +
geom_jitter()
# We have a really wide variance, so let's see what happens if
# we log-transform the data
epi$AzaLog <- log(epi$Aza, 10)
epi$ControlLog <- log(epi$Control, 10)
wilcox.test(epi$AzaLog, epi$ControlLog, paired=T)
# Still not significant, but the graph looks better
meltepiLog <- melt(subset(epi, select = c(AzaLog,ControlLog)))
ggplot(meltepiLog, aes(x=variable, y=value, fill=variable)) +
geom_boxplot() +
geom_blank() + xlab("Treatment") + ylab("Violacein Units") + scale_fill_manual(values=c("darkorchid4", "white"), name="Treatment", labels=c("Azacytidine", "Control")) |
2d4f2da2853962b500ca5283073fc4ea95520816 | f779b7bd7020d47e669765c6309d0bf7d113d91e | /ExploratoryDataAnalysis1/plot3[1].R | e3e910fbce9437c5b81e46194a0e66011f1d8d24 | [] | no_license | dwong0021/DataScienceProjects | 6160f285aa94803728d364fbfb62cfa794664614 | 4cd5cac96b1a0ea2b00370548837ac200b196ace | refs/heads/master | 2021-09-05T06:22:03.831043 | 2018-01-24T19:10:09 | 2018-01-24T19:10:09 | 118,806,754 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 697 | r | plot3[1].R | #plot 3
plot3<-function(x="large"){
with(sub_power, plot(timestamp, Sub_metering_1, type="l", xlab="", ylab="Energy sub metering"))
lines(sub_power$timestamp, sub_power$Sub_metering_2, type="l", col="red")
lines(sub_power$timestamp, sub_power$Sub_metering_3, type="l", col="blue")
if(x=="small"){
legend("topleft", col=c("black", "red", "blue"), c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1), bty="n", pt.cex=1, cex=1)
}
else {
legend("topleft", col=c("black", "red", "blue"), c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1), lwd=c(1,1), cex=1)
}
}
png("plot3.png", width=504, height=504)
plot3()
dev.off() |
f90b89d1cfbd4e075f812d20e4b19840ef582a45 | 77eee201446603d4e25a363063bb0d2f0008ed75 | /man/parLapply_wrapper.Rd | a728ce69432a3cb6182f1325abfcdb12b476f7b5 | [] | no_license | ada-w-yan/reassortment | a1c684cbc06a39bf5d712aa16df16945290aac13 | 7173ff6b84f150f8daaeefd179e6dc7146a9fe6a | refs/heads/master | 2023-03-30T03:14:17.963386 | 2021-04-08T05:43:33 | 2021-04-08T05:43:33 | 156,770,485 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 493 | rd | parLapply_wrapper.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/general_functions.R
\name{parLapply_wrapper}
\alias{parLapply_wrapper}
\title{wrapper for parLapply for cluster}
\usage{
parLapply_wrapper(run_parallel, x, fun, ...)
}
\arguments{
\item{x}{first argument of lapply}
\item{fun}{second argument of lapply}
\item{run_parallel:}{logical: if TRUE, use parLapply, else use lapply}
}
\value{
output arguments of lapply
}
\description{
wrapper for parLapply for cluster
}
|
5ffb47aff20229d2a26538a9b1864730289ec478 | 1a4f6b9c90f3bd3ed60a65e4a93c9cdb3a0f2126 | /Result/Histogramplot_overall.R | d79eccab50a0d8e227ef803ebeb09ce7a1104820 | [
"MIT"
] | permissive | diaoenmao/MIREX-Audio-Melody-Extraction-Data-Analysis | 48ebf6b41c00b2a6e64bac8d1d9e4530e0e05bce | 9a1c8c5d5c12b8fe6b000590586103c62b0f838d | refs/heads/master | 2023-05-26T23:43:42.372443 | 2018-03-30T23:05:20 | 2018-03-30T23:05:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,551 | r | Histogramplot_overall.R | Histogramplot_overall <- function() {
mir09_0db<-DataOverallAccuracy("mir09_0db")
adc04<-DataOverallAccuracy("adc04")
mir05<-DataOverallAccuracy("mir05")
ind08<-DataOverallAccuracy("ind08")
mir09_m5db<-DataOverallAccuracy("mir09_m5db")
mir09_p5db<-DataOverallAccuracy("mir09_p5db")
Length<-max(length(mir09_0db), length(adc04), length(mir05), length(ind08),
length(mir09_m5db), length(mir09_p5db))
names<-c("MIREX2009 0db", "ADC04",
"MIREX2005","MIREX2008",
"MIREX2009 +5db", "MIREX2009 -5db")
out<-data.frame()
out<-data.frame(matrix(ncol = length(names), nrow = Length))
colnames(out)<-names
out[1:length(mir09_0db),1]<-mir09_0db
out[1:length(adc04),2]<-adc04
out[1:length(mir05),3]<-mir05
out[1:length(ind08),4]<-ind08
out[1:length(mir09_m5db),5]<-mir09_m5db
out[1:length(mir09_p5db),6]<-mir09_p5db
out<-out*100
par(mfrow=c(3,2))
x <- mir09_0db
h<-hist(x, breaks=10, xlab="Overall Accuracy/%",
main="MIREX09 0db", ylim=c(0,8))
xfit<-seq(min(x),max(x),length=40)
yfit<-dnorm(xfit,mean=mean(x),sd=sd(x))
yfit <- yfit*diff(h$mids[1:2])*length(x)
lines(xfit, yfit, lwd=1)
grid()
x <- adc04
h<-hist(x, breaks=10, xlab="Overall Accuracy/%",
main="ADC04", ylim=c(0,8))
xfit<-seq(min(x),max(x),length=40)
yfit<-dnorm(xfit,mean=mean(x),sd=sd(x))
yfit <- yfit*diff(h$mids[1:2])*length(x)
lines(xfit, yfit, lwd=1)
grid()
x <- mir05
h<-hist(x, breaks=10, xlab="Overall Accuracy/%",
main="MIREX05", ylim=c(0,10))
xfit<-seq(min(x),max(x),length=40)
yfit<-dnorm(xfit,mean=mean(x),sd=sd(x))
yfit <- yfit*diff(h$mids[1:2])*length(x)
lines(xfit, yfit, lwd=1)
grid()
x <- ind08
h<-hist(x, breaks=10, xlab="Overall Accuracy/%",
main="MIREX08", ylim=c(0,8))
xfit<-seq(min(x),max(x),length=40)
yfit<-dnorm(xfit,mean=mean(x),sd=sd(x))
yfit <- yfit*diff(h$mids[1:2])*length(x)
lines(xfit, yfit, lwd=1)
grid()
x <- mir09_m5db
h<-hist(x, breaks=10, xlab="Overall Accuracy/%",
main="MIREX09 -5db", ylim=c(0,8))
xfit<-seq(min(x),max(x),length=40)
yfit<-dnorm(xfit,mean=mean(x),sd=sd(x))
yfit <- yfit*diff(h$mids[1:2])*length(x)
lines(xfit, yfit, lwd=1)
grid()
x <- mir09_p5db
h<-hist(x, breaks=10, xlab="Overall Accuracy/%",
main="MIREX09 +5db", ylim=c(0,10))
xfit<-seq(min(x),max(x),length=40)
yfit<-dnorm(xfit,mean=mean(x),sd=sd(x))
yfit <- yfit*diff(h$mids[1:2])*length(x)
lines(xfit, yfit, lwd=1)
grid()
} |
622948161429a0414167e64eede8d3b53ad728e3 | c88b0cbeda0edf9e745e324ef942a504e27d4f87 | /longevity/eLife revision/__modWeighting.R | 793555e80893ff3b7e62c323c70fe372ac7a276c | [] | no_license | Diapadion/R | 5535b2373bcb5dd9a8bbc0b517f0f9fcda498f27 | 1485c43c0e565a947fdc058a1019a74bdd97f265 | refs/heads/master | 2023-05-12T04:21:15.761115 | 2023-04-27T16:26:35 | 2023-04-27T16:26:35 | 28,046,921 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 56,515 | r | __modWeighting.R | ### Models to weight and aggregate
library(frailtypack)
library(parfm)
library(AICcmodavg)
### How can the specification vary?
# x2
# In/exclude Origin
# x2
# In/exclude sex
# x2
# Leave confounded
# Residulaize data by DoB
# x4
# Method used
# pwe - piecewise equidistant
# pwp - piecewise percent
# wb - Weibull
# gm - Gompertz
### Piecewise
pf.u.o.s.pwe = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(origin) + as.factor(sex) +
Agr_CZ + Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ,
data = datX, hazard = 'Piecewise-equi' , nb.int = 3
)
pf.u.x.x.pwe = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
Agr_CZ + Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ,
data = datX, hazard = 'Piecewise-equi' , nb.int = 3
)
pf.u.x.s.pwe = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(sex) +
Agr_CZ + Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ,
data = datX, hazard = 'Piecewise-equi' , nb.int = 3
)
pf.u.o.x.pwe = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(origin) +
Agr_CZ + Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ,
data = datX, hazard = 'Piecewise-equi' , nb.int = 3
)
pf.r.o.s.pwe = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(origin) + as.factor(sex) +
Agr_CZ + D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB,
data = datX, hazard = 'Piecewise-equi' , nb.int = 3
)
pf.r.x.x.pwe = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
Agr_CZ + D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB,
data = datX, hazard = 'Piecewise-equi' , nb.int = 3
)
pf.r.x.s.pwe = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(sex) +
Agr_CZ + D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB,
data = datX, hazard = 'Piecewise-equi' , nb.int = 3
)
pf.r.o.x.pwe = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(origin) +
Agr_CZ + D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB,
data = datX, hazard = 'Piecewise-equi' , nb.int = 3
)
pf.u.o.s.pwp = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(origin) + as.factor(sex) +
Agr_CZ + Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ,
data = datX, hazard = 'Piecewise-per' , nb.int = 3
)
pf.u.x.x.pwp = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
Agr_CZ + Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ,
data = datX, hazard = 'Piecewise-per' , nb.int = 3
)
pf.u.x.s.pwp = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(sex) +
Agr_CZ + Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ,
data = datX, hazard = 'Piecewise-per' , nb.int = 3
)
pf.u.o.x.pwp = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(origin) +
Agr_CZ + Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ,
data = datX, hazard = 'Piecewise-per' , nb.int = 3
)
pf.r.o.s.pwp = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(origin) + as.factor(sex) +
Agr_CZ + D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB,
data = datX, hazard = 'Piecewise-per' , nb.int = 3
)
pf.r.x.x.pwp = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
Agr_CZ + D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB,
data = datX, hazard = 'Piecewise-per' , nb.int = 3
)
pf.r.x.s.pwp = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(sex) +
Agr_CZ + D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB,
data = datX, hazard = 'Piecewise-per' , nb.int = 3
)
pf.r.o.x.pwp = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(origin) +
Agr_CZ + D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB,
data = datX, hazard = 'Piecewise-per' , nb.int = 3
)
### Weibull
pf.u.o.s.wb = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(origin) + as.factor(sex) +
Agr_CZ + Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ,
RandDist = 'Gamma'
,data = datX, hazard = 'Weibull'
)
pf.u.x.x.wb = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
Agr_CZ + Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ,
RandDist = 'Gamma'
,data = datX, hazard = 'Weibull'
)
pf.u.x.s.wb = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(sex) + #as.factor(origin) +
Agr_CZ + Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ,
RandDist = 'Gamma'
,data = datX, hazard = 'Weibull'
)
pf.u.o.x.wb = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(origin) +
Agr_CZ + Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ,
RandDist = 'Gamma'
,data = datX, hazard = 'Weibull'
)
pf.r.x.s.wb = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(sex) +
Agr_CZ + D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB,
RandDist = 'Gamma'
,data = datX, hazard = 'Weibull'
)
pf.r.o.x.wb = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(origin) +
Agr_CZ + D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB,
RandDist = 'Gamma'
,data = datX, hazard = 'Weibull'
)
pf.r.o.s.wb = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(origin) + as.factor(sex) +
Agr_CZ + D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB,
RandDist = 'Gamma', maxit = 1000, recurrentAG=F
,data = datX, hazard = 'Weibull'
)
pf.r.x.x.wb = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
Agr_CZ + D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB,
RandDist = 'Gamma'
,data = datX, hazard = 'Weibull'
)
### Gompertz via parfm
pf.u.o.s.gm = parfm(Surv(age_pr, age, status) ~
as.factor(origin) + as.factor(sex) + Agr_CZ
+ Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ
,cluster="sample"
, frailty = 'gamma'
, data=datX, dist='gompertz', method ='ucminf')
pf.u.x.s.gm = parfm(Surv(age_pr, age, status) ~
as.factor(sex) + Agr_CZ
+ Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ
,cluster="sample"
, frailty = 'gamma'
, data=datX, dist='gompertz', method ='ucminf')
pf.u.o.x.gm = parfm(Surv(age_pr, age, status) ~
as.factor(origin) + Agr_CZ
+ Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ
,cluster="sample"
, frailty = 'gamma'
, data=datX, dist='gompertz', method ='ucminf')
pf.u.x.x.gm = parfm(Surv(age_pr, age, status) ~
Agr_CZ
+ Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ
,cluster="sample"
, frailty = 'gamma'
, data=datX, dist='gompertz', method ='ucminf')
pf.r.o.s.gm = parfm(Surv(age_pr, age, status) ~
as.factor(origin) + as.factor(sex) + Agr_CZ
+ D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB
,cluster="sample"
, frailty = 'gamma'
, data=datX, dist='gompertz', method ='ucminf')
pf.r.x.s.gm = parfm(Surv(age_pr, age, status) ~
as.factor(sex) + Agr_CZ
+ D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB
,cluster="sample"
, frailty = 'gamma'
, data=datX, dist='gompertz', method ='ucminf')
pf.r.o.x.gm = parfm(Surv(age_pr, age, status) ~
as.factor(origin) + Agr_CZ
+ D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB
,cluster="sample"
, frailty = 'gamma'
, data=datX, dist='gompertz', method ='ucminf')
pf.r.x.x.gm = parfm(Surv(age_pr, age, status) ~
Agr_CZ
+ D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB
,cluster="sample"
, frailty = 'gamma'
, data=datX, dist='gompertz', method ='ucminf')
### AIC weighting average regression tables ###
# Remember: unadjusted and adjusted models are not based on the same data,
# so they should be tabulated separately
# # rememeber, AIC = 2k - 2LL
# # so, if all K = 1; LL = 1 - (AIC)/2
#
# AIC2LL <- function(AIC,k=1){
# LL = k - AIC/2
# return(LL)
# }
LLv.u = c(pf.u.o.s.wb$logLik,pf.u.o.s.pwe$logLik,pf.u.o.s.pwp$logLik,logLik(pf.u.o.s.gm)[1],
pf.u.o.x.wb$logLik,pf.u.o.x.pwe$logLik,pf.u.o.x.pwp$logLik,logLik(pf.u.o.x.gm)[1],
pf.u.x.s.wb$logLik,pf.u.x.s.pwe$logLik,pf.u.x.s.pwp$logLik,logLik(pf.u.x.s.gm)[1],
pf.u.x.x.wb$logLik,pf.u.x.x.pwe$logLik,pf.u.x.x.pwp$logLik,logLik(pf.u.x.x.gm)[1])
LLv.r = c(pf.r.o.s.wb$logLik,
pf.r.o.s.pwe$logLik,pf.r.o.s.pwp$logLik,logLik(pf.r.o.s.gm)[1],
pf.r.o.x.wb$logLik,pf.r.o.x.pwe$logLik,pf.r.o.x.pwp$logLik,logLik(pf.r.o.x.gm)[1],
pf.r.x.s.wb$logLik,pf.r.x.s.pwe$logLik,pf.r.x.s.pwp$logLik,logLik(pf.r.x.s.gm)[1],
pf.r.x.x.wb$logLik,pf.r.x.x.pwe$logLik,pf.r.x.x.pwp$logLik,logLik(pf.r.x.x.gm)[1])
Kv.u = c(pf.u.o.s.wb$npar,pf.u.o.s.pwe$npar,pf.u.o.s.pwp$npar,attr(logLik(pf.u.o.s.gm),'df'),
pf.u.o.x.wb$npar,pf.u.o.x.pwe$npar,pf.u.o.x.pwp$npar,attr(logLik(pf.u.o.x.gm),'df'),
pf.u.x.s.wb$npar,pf.u.x.s.pwe$npar,pf.u.x.s.pwp$npar,attr(logLik(pf.u.x.s.gm),'df'),
pf.u.x.x.wb$npar,pf.u.x.x.pwe$npar,pf.u.x.x.pwp$npar,attr(logLik(pf.u.x.x.gm),'df'))
Kv.r = c(pf.r.o.s.wb$npar,
pf.r.o.s.pwe$npar,pf.r.o.s.pwp$npar,attr(logLik(pf.r.o.s.gm),'df'),
pf.r.o.x.wb$npar,pf.r.o.x.pwe$npar,pf.r.o.x.pwp$npar,attr(logLik(pf.r.o.x.gm),'df'),
pf.r.x.s.wb$npar,pf.r.x.s.pwe$npar,pf.r.x.s.pwp$npar,attr(logLik(pf.r.x.s.gm),'df'),
pf.r.x.x.wb$npar,pf.r.x.x.pwe$npar,pf.r.x.x.pwp$npar,attr(logLik(pf.r.x.x.gm),'df'))
mnv.u = c('pf.u.o.s.wb','pf.u.o.s.pwe','pf.u.o.s.pwp','pf.u.o.s.gm',
'pf.u.o.x.wb','pf.u.o.x.pwe','pf.u.o.x.pwp','pf.u.o.x.gm',
'pf.u.x.s.wb','pf.u.x.s.pwe','pf.u.x.s.pwp','pf.u.x.s.gm',
'pf.u.x.x.wb','pf.u.x.x.pwe','pf.u.x.x.pwp','pf.u.x.x.gm')
mnv.r = c('pf.r.o.s.wb',
'pf.r.o.s.pwe','pf.r.o.s.pwp','pf.r.o.s.gm',
'pf.r.o.x.wb','pf.r.o.x.pwe','pf.r.o.x.pwp','pf.r.o.x.gm',
'pf.r.x.s.wb','pf.r.x.s.pwe','pf.r.x.s.pwp','pf.r.x.s.gm',
'pf.r.x.x.wb','pf.r.x.x.pwe','pf.r.x.x.pwp','pf.r.x.x.gm')
### Wild
estv.u.W = c(pf.u.o.s.wb$coef['originWILD'],pf.u.o.s.pwe$coef['originWILD'],pf.u.o.s.pwp$coef['originWILD'],coef(pf.u.o.s.gm)['as.factor(origin)WILD'],
pf.u.o.x.wb$coef['originWILD'],pf.u.o.x.pwe$coef['originWILD'],pf.u.o.x.pwp$coef['originWILD'],coef(pf.u.o.x.gm)['as.factor(origin)WILD'])
estv.r.W = c(pf.r.o.s.wb$coef['originWILD'],
pf.r.o.s.pwe$coef['originWILD'],pf.r.o.s.pwp$coef['originWILD'],coef(pf.r.o.s.gm)['as.factor(origin)WILD'],
pf.r.o.x.wb$coef['originWILD'],pf.r.o.x.pwe$coef['originWILD'],pf.r.o.x.pwp$coef['originWILD'],coef(pf.r.o.x.gm)['as.factor(origin)WILD'])
ind=1
sev.u.W=c(sqrt(diag(pf.u.o.s.wb$varH))[ind],sqrt(diag(pf.u.o.s.pwe$varH))[ind],sqrt(diag(pf.u.o.s.pwp$varH))[ind],pf.u.o.s.gm[ind+3,'SE'],
sqrt(diag(pf.u.o.x.wb$varH))[ind],sqrt(diag(pf.u.o.x.pwe$varH))[ind],sqrt(diag(pf.u.o.x.pwp$varH))[ind],pf.u.o.x.gm[ind+2,'SE'])
sev.r.W=c(sqrt(diag(pf.r.o.s.wb$varH))[ind],
sqrt(diag(pf.r.o.s.pwe$varH))[ind],sqrt(diag(pf.r.o.s.pwp$varH))[ind],pf.r.o.s.gm[ind+3,'SE'],
sqrt(diag(pf.r.o.x.wb$varH))[ind],sqrt(diag(pf.r.o.x.pwe$varH))[ind],sqrt(diag(pf.r.o.x.pwp$varH))[ind],pf.r.o.x.gm[ind+2,'SE'])
mavg.u.W = modavgCustom(LLv.u[1:8],Kv.u[1:8],mnv.u[1:8],estv.u.W,sev.u.W,second.ord=F)
exp(mavg.u.W$Mod.avg.est)
exp(mavg.u.W$Lower.CL)
exp(mavg.u.W$Upper.CL)
mavg.r.W = modavgCustom(LLv.r[1:8],Kv.r[1:8],mnv.r[1:8],estv.r.W,sev.r.W,second.ord=F)
exp(mavg.r.W$Mod.avg.est)
exp(mavg.r.W$Lower.CL)
exp(mavg.r.W$Upper.CL)
### Sex
estv.u.S = c(pf.u.o.s.wb$coef['sex1'],pf.u.o.s.pwe$coef['sex1'],pf.u.o.s.pwp$coef['sex1'],coef(pf.u.o.s.gm)['as.factor(sex)1'],
pf.u.x.s.wb$coef['sex1'],pf.u.x.s.pwe$coef['sex1'],pf.u.x.s.pwp$coef['sex1'],coef(pf.u.x.s.gm)['as.factor(sex)1'])
estv.r.S = c(pf.r.o.s.wb$coef['sex1'],
pf.r.o.s.pwe$coef['sex1'],pf.r.o.s.pwp$coef['sex1'],coef(pf.r.o.s.gm)['as.factor(sex)1'],
pf.r.x.s.wb$coef['sex1'],pf.r.x.s.pwe$coef['sex1'],pf.r.x.s.pwp$coef['sex1'],coef(pf.r.x.s.gm)['as.factor(sex)1'])
ind = 2
sev.u.S=c(sqrt(diag(pf.u.o.s.wb$varH))[ind],sqrt(diag(pf.u.o.s.pwe$varH))[ind],sqrt(diag(pf.u.o.s.pwp$varH))[ind],pf.u.o.s.gm[ind+3,'SE'],
sqrt(diag(pf.u.x.s.wb$varH))[ind-1],sqrt(diag(pf.u.x.s.pwe$varH))[ind-1],sqrt(diag(pf.u.x.s.pwp$varH))[ind-1],pf.u.x.s.gm[ind+2,'SE'])
sev.r.S=c(sqrt(diag(pf.r.o.s.wb$varH))[ind],
sqrt(diag(pf.r.o.s.pwe$varH))[ind],sqrt(diag(pf.r.o.s.pwp$varH))[ind],pf.r.o.s.gm[ind+3,'SE'],
sqrt(diag(pf.r.x.s.wb$varH))[ind-1],sqrt(diag(pf.r.x.s.pwe$varH))[ind-1],sqrt(diag(pf.r.x.s.pwp$varH))[ind-1],pf.r.x.s.gm[ind+2,'SE'])
mavg.u.S = modavgCustom(LLv.u[c(1:4,9:12)],Kv.u[c(1:4,9:12)],mnv.u[c(1:4,9:12)],estv.u.S,sev.u.S,second.ord=F)
exp(mavg.u.S$Mod.avg.est)
exp(mavg.u.S$Lower.CL)
exp(mavg.u.S$Upper.CL)
mavg.r.S = modavgCustom(LLv.r[c(1:4,9:12)],Kv.r[c(1:4,9:12)],mnv.r[c(1:4,9:12)],estv.r.S,sev.r.S,second.ord=F)
exp(mavg.r.S$Mod.avg.est)
exp(mavg.r.S$Lower.CL)
exp(mavg.r.S$Upper.CL)
### Agreeableness
estv.u.A = c(pf.u.o.s.wb$coef['Agr_CZ'],pf.u.o.s.pwe$coef['Agr_CZ'],pf.u.o.s.pwp$coef['Agr_CZ'],coef(pf.u.o.s.gm)['Agr_CZ'],
pf.u.o.x.wb$coef['Agr_CZ'],pf.u.o.x.pwe$coef['Agr_CZ'],pf.u.o.x.pwp$coef['Agr_CZ'],coef(pf.u.o.x.gm)['Agr_CZ'],
pf.u.x.s.wb$coef['Agr_CZ'],pf.u.x.s.pwe$coef['Agr_CZ'],pf.u.x.s.pwp$coef['Agr_CZ'],coef(pf.u.x.s.gm)['Agr_CZ'],
pf.u.x.x.wb$coef['Agr_CZ'],pf.u.x.x.pwe$coef['Agr_CZ'],pf.u.x.x.pwp$coef['Agr_CZ'],coef(pf.u.x.x.gm)['Agr_CZ'])
estv.r.A = c(pf.r.o.s.wb$coef['Agr_CZ'],
pf.r.o.s.pwe$coef['Agr_CZ'],pf.r.o.s.pwp$coef['Agr_CZ'],coef(pf.r.o.s.gm)['Agr_CZ'],
pf.r.o.x.wb$coef['Agr_CZ'],pf.r.o.x.pwe$coef['Agr_CZ'],pf.r.o.x.pwp$coef['Agr_CZ'],coef(pf.r.o.x.gm)['Agr_CZ'],
pf.r.x.s.wb$coef['Agr_CZ'],pf.r.x.s.pwe$coef['Agr_CZ'],pf.r.x.s.pwp$coef['Agr_CZ'],coef(pf.r.x.s.gm)['Agr_CZ'],
pf.r.x.x.wb$coef['Agr_CZ'],pf.r.x.x.pwe$coef['Agr_CZ'],pf.r.x.x.pwp$coef['Agr_CZ'],coef(pf.r.x.x.gm)['Agr_CZ'])
ind = 3
sev.u.A=c(sqrt(diag(pf.u.o.s.wb$varH))[ind],sqrt(diag(pf.u.o.s.pwe$varH))[ind],sqrt(diag(pf.u.o.s.pwp$varH))[ind],pf.u.o.s.gm[ind+3,'SE'],
sqrt(diag(pf.u.o.x.wb$varH))[ind-1],sqrt(diag(pf.u.o.x.pwe$varH))[ind-1],sqrt(diag(pf.u.o.x.pwp$varH))[ind-1],pf.u.o.x.gm[ind+2,'SE'],
sqrt(diag(pf.u.x.s.wb$varH))[ind-1],sqrt(diag(pf.u.x.s.pwe$varH))[ind-1],sqrt(diag(pf.u.x.s.pwp$varH))[ind-1],pf.u.x.s.gm[ind+2,'SE'],
sqrt(diag(pf.u.x.x.wb$varH))[ind-2],sqrt(diag(pf.u.x.x.pwe$varH))[ind-2],sqrt(diag(pf.u.x.x.pwp$varH))[ind-2],pf.u.x.s.gm[ind+1,'SE'])
sev.r.A=c(sqrt(diag(pf.r.o.s.wb$varH))[ind],
sqrt(diag(pf.r.o.s.pwe$varH))[ind],sqrt(diag(pf.r.o.s.pwp$varH))[ind],pf.r.o.s.gm[ind+3,'SE'],
sqrt(diag(pf.r.o.x.wb$varH))[ind-1],sqrt(diag(pf.r.o.x.pwe$varH))[ind-1],sqrt(diag(pf.r.o.x.pwp$varH))[ind-1],pf.r.o.x.gm[ind+2,'SE'],
sqrt(diag(pf.r.x.s.wb$varH))[ind-1],sqrt(diag(pf.r.x.s.pwe$varH))[ind-1],sqrt(diag(pf.r.x.s.pwp$varH))[ind-1],pf.r.x.s.gm[ind+2,'SE'],
sqrt(diag(pf.r.x.x.wb$varH))[ind-2],sqrt(diag(pf.r.x.x.pwe$varH))[ind-2],sqrt(diag(pf.r.x.x.pwp$varH))[ind-2],pf.r.x.x.gm[ind+1,'SE'])
mavg.u.A = modavgCustom(LLv.u,Kv.u,mnv.u,estv.u.A,sev.u.A,second.ord=F)
exp(mavg.u.A$Mod.avg.est)
exp(mavg.u.A$Lower.CL)
exp(mavg.u.A$Upper.CL)
mavg.r.A = modavgCustom(LLv.r,Kv.r,mnv.r,estv.r.A,sev.r.A,second.ord=F)
exp(mavg.r.A$Mod.avg.est)
exp(mavg.r.A$Lower.CL)
exp(mavg.r.A$Upper.CL)
### Dominance
estv.u.D = c(pf.u.o.s.wb$coef['Dom_CZ'],pf.u.o.s.pwe$coef['Dom_CZ'],pf.u.o.s.pwp$coef['Dom_CZ'],coef(pf.u.o.s.gm)['Dom_CZ'],
pf.u.o.x.wb$coef['Dom_CZ'],pf.u.o.x.pwe$coef['Dom_CZ'],pf.u.o.x.pwp$coef['Dom_CZ'],coef(pf.u.o.x.gm)['Dom_CZ'],
pf.u.x.s.wb$coef['Dom_CZ'],pf.u.x.s.pwe$coef['Dom_CZ'],pf.u.x.s.pwp$coef['Dom_CZ'],coef(pf.u.x.s.gm)['Dom_CZ'],
pf.u.x.x.wb$coef['Dom_CZ'],pf.u.x.x.pwe$coef['Dom_CZ'],pf.u.x.x.pwp$coef['Dom_CZ'],coef(pf.u.x.x.gm)['Dom_CZ'])
estv.r.D = c(pf.r.o.s.wb$coef['D.r2.DoB'],
pf.r.o.s.pwe$coef['D.r2.DoB'],pf.r.o.s.pwp$coef['D.r2.DoB'],coef(pf.r.o.s.gm)['D.r2.DoB'],
pf.r.o.x.wb$coef['D.r2.DoB'],pf.r.o.x.pwe$coef['D.r2.DoB'],pf.r.o.x.pwp$coef['D.r2.DoB'],coef(pf.r.o.x.gm)['D.r2.DoB'],
pf.r.x.s.wb$coef['D.r2.DoB'],pf.r.x.s.pwe$coef['D.r2.DoB'],pf.r.x.s.pwp$coef['D.r2.DoB'],coef(pf.r.x.s.gm)['D.r2.DoB'],
pf.r.x.x.wb$coef['D.r2.DoB'],pf.r.x.x.pwe$coef['D.r2.DoB'],pf.r.x.x.pwp$coef['D.r2.DoB'],coef(pf.r.x.x.gm)['D.r2.DoB'])
ind = 4
sev.u.D=c(sqrt(diag(pf.u.o.s.wb$varH))[ind],sqrt(diag(pf.u.o.s.pwe$varH))[ind],sqrt(diag(pf.u.o.s.pwp$varH))[ind],pf.u.o.s.gm[ind+3,'SE'],
sqrt(diag(pf.u.o.x.wb$varH))[ind-1],sqrt(diag(pf.u.o.x.pwe$varH))[ind-1],sqrt(diag(pf.u.o.x.pwp$varH))[ind-1],pf.u.o.x.gm[ind+2,'SE'],
sqrt(diag(pf.u.x.s.wb$varH))[ind-1],sqrt(diag(pf.u.x.s.pwe$varH))[ind-1],sqrt(diag(pf.u.x.s.pwp$varH))[ind-1],pf.u.x.s.gm[ind+2,'SE'],
sqrt(diag(pf.u.x.x.wb$varH))[ind-2],sqrt(diag(pf.u.x.x.pwe$varH))[ind-2],sqrt(diag(pf.u.x.x.pwp$varH))[ind-2],pf.u.x.s.gm[ind+1,'SE'])
sev.r.D=c(sqrt(diag(pf.r.o.s.wb$varH))[ind],
sqrt(diag(pf.r.o.s.pwe$varH))[ind],sqrt(diag(pf.r.o.s.pwp$varH))[ind],pf.r.o.s.gm[ind+3,'SE'],
sqrt(diag(pf.r.o.x.wb$varH))[ind-1],sqrt(diag(pf.r.o.x.pwe$varH))[ind-1],sqrt(diag(pf.r.o.x.pwp$varH))[ind-1],pf.r.o.x.gm[ind+2,'SE'],
sqrt(diag(pf.r.x.s.wb$varH))[ind-1],sqrt(diag(pf.r.x.s.pwe$varH))[ind-1],sqrt(diag(pf.r.x.s.pwp$varH))[ind-1],pf.r.x.s.gm[ind+2,'SE'],
sqrt(diag(pf.r.x.x.wb$varH))[ind-2],sqrt(diag(pf.r.x.x.pwe$varH))[ind-2],sqrt(diag(pf.r.x.x.pwp$varH))[ind-2],pf.r.x.x.gm[ind+1,'SE'])
mavg.u.D = modavgCustom(LLv.u,Kv.u,mnv.u,estv.u.D,sev.u.D,second.ord=F)
exp(mavg.u.D$Mod.avg.est)
exp(mavg.u.D$Lower.CL)
exp(mavg.u.D$Upper.CL)
mavg.r.D = modavgCustom(LLv.r,Kv.r,mnv.r,estv.r.D,sev.r.D,second.ord=F)
exp(mavg.r.D$Mod.avg.est)
exp(mavg.r.D$Lower.CL)
exp(mavg.r.D$Upper.CL)
### Extraversion
estv.u.E = c(pf.u.o.s.wb$coef['Ext_CZ'],pf.u.o.s.pwe$coef['Ext_CZ'],pf.u.o.s.pwp$coef['Ext_CZ'],coef(pf.u.o.s.gm)['Ext_CZ'],
pf.u.o.x.wb$coef['Ext_CZ'],pf.u.o.x.pwe$coef['Ext_CZ'],pf.u.o.x.pwp$coef['Ext_CZ'],coef(pf.u.o.x.gm)['Ext_CZ'],
pf.u.x.s.wb$coef['Ext_CZ'],pf.u.x.s.pwe$coef['Ext_CZ'],pf.u.x.s.pwp$coef['Ext_CZ'],coef(pf.u.x.s.gm)['Ext_CZ'],
pf.u.x.x.wb$coef['Ext_CZ'],pf.u.x.x.pwe$coef['Ext_CZ'],pf.u.x.x.pwp$coef['Ext_CZ'],coef(pf.u.x.x.gm)['Ext_CZ'])
estv.r.E = c(pf.r.o.s.wb$coef['E.r2.DoB'],
pf.r.o.s.pwe$coef['E.r2.DoB'],pf.r.o.s.pwp$coef['E.r2.DoB'],coef(pf.r.o.s.gm)['E.r2.DoB'],
pf.r.o.x.wb$coef['E.r2.DoB'],pf.r.o.x.pwe$coef['E.r2.DoB'],pf.r.o.x.pwp$coef['E.r2.DoB'],coef(pf.r.o.x.gm)['E.r2.DoB'],
pf.r.x.s.wb$coef['E.r2.DoB'],pf.r.x.s.pwe$coef['E.r2.DoB'],pf.r.x.s.pwp$coef['E.r2.DoB'],coef(pf.r.x.s.gm)['E.r2.DoB'],
pf.r.x.x.wb$coef['E.r2.DoB'],pf.r.x.x.pwe$coef['E.r2.DoB'],pf.r.x.x.pwp$coef['E.r2.DoB'],coef(pf.r.x.x.gm)['E.r2.DoB'])
ind = 5
sev.u.E=c(sqrt(diag(pf.u.o.s.wb$varH))[ind],sqrt(diag(pf.u.o.s.pwe$varH))[ind],sqrt(diag(pf.u.o.s.pwp$varH))[ind],pf.u.o.s.gm[ind+3,'SE'],
sqrt(diag(pf.u.o.x.wb$varH))[ind-1],sqrt(diag(pf.u.o.x.pwe$varH))[ind-1],sqrt(diag(pf.u.o.x.pwp$varH))[ind-1],pf.u.o.x.gm[ind+2,'SE'],
sqrt(diag(pf.u.x.s.wb$varH))[ind-1],sqrt(diag(pf.u.x.s.pwe$varH))[ind-1],sqrt(diag(pf.u.x.s.pwp$varH))[ind-1],pf.u.x.s.gm[ind+2,'SE'],
sqrt(diag(pf.u.x.x.wb$varH))[ind-2],sqrt(diag(pf.u.x.x.pwe$varH))[ind-2],sqrt(diag(pf.u.x.x.pwp$varH))[ind-2],pf.u.x.s.gm[ind+1,'SE'])
sev.r.E=c(sqrt(diag(pf.r.o.s.wb$varH))[ind],
sqrt(diag(pf.r.o.s.pwe$varH))[ind],sqrt(diag(pf.r.o.s.pwp$varH))[ind],pf.r.o.s.gm[ind+3,'SE'],
sqrt(diag(pf.r.o.x.wb$varH))[ind-1],sqrt(diag(pf.r.o.x.pwe$varH))[ind-1],sqrt(diag(pf.r.o.x.pwp$varH))[ind-1],pf.r.o.x.gm[ind+2,'SE'],
sqrt(diag(pf.r.x.s.wb$varH))[ind-1],sqrt(diag(pf.r.x.s.pwe$varH))[ind-1],sqrt(diag(pf.r.x.s.pwp$varH))[ind-1],pf.r.x.s.gm[ind+2,'SE'],
sqrt(diag(pf.r.x.x.wb$varH))[ind-2],sqrt(diag(pf.r.x.x.pwe$varH))[ind-2],sqrt(diag(pf.r.x.x.pwp$varH))[ind-2],pf.r.x.x.gm[ind+1,'SE'])
mavg.u.E = modavgCustom(LLv.u,Kv.u,mnv.u,estv.u.E,sev.u.E,second.ord=F)
exp(mavg.u.E$Mod.avg.est)
exp(mavg.u.E$Lower.CL)
exp(mavg.u.E$Upper.CL)
mavg.r.E = modavgCustom(LLv.r,Kv.r,mnv.r,estv.r.E,sev.r.E,second.ord=F)
exp(mavg.r.E$Mod.avg.est)
exp(mavg.r.E$Lower.CL)
exp(mavg.r.E$Upper.CL)
### Conscientiuousness
estv.u.C = c(pf.u.o.s.wb$coef['Con_CZ'],pf.u.o.s.pwe$coef['Con_CZ'],pf.u.o.s.pwp$coef['Con_CZ'],coef(pf.u.o.s.gm)['Con_CZ'],
pf.u.o.x.wb$coef['Con_CZ'],pf.u.o.x.pwe$coef['Con_CZ'],pf.u.o.x.pwp$coef['Con_CZ'],coef(pf.u.o.x.gm)['Con_CZ'],
pf.u.x.s.wb$coef['Con_CZ'],pf.u.x.s.pwe$coef['Con_CZ'],pf.u.x.s.pwp$coef['Con_CZ'],coef(pf.u.x.s.gm)['Con_CZ'],
pf.u.x.x.wb$coef['Con_CZ'],pf.u.x.x.pwe$coef['Con_CZ'],pf.u.x.x.pwp$coef['Con_CZ'],coef(pf.u.x.x.gm)['Con_CZ'])
estv.r.C = c(pf.r.o.s.wb$coef['Con_CZ'],
pf.r.o.s.pwe$coef['Con_CZ'],pf.r.o.s.pwp$coef['Con_CZ'],coef(pf.r.o.s.gm)['Con_CZ'],
pf.r.o.x.wb$coef['Con_CZ'],pf.r.o.x.pwe$coef['Con_CZ'],pf.r.o.x.pwp$coef['Con_CZ'],coef(pf.r.o.x.gm)['Con_CZ'],
pf.r.x.s.wb$coef['Con_CZ'],pf.r.x.s.pwe$coef['Con_CZ'],pf.r.x.s.pwp$coef['Con_CZ'],coef(pf.r.x.s.gm)['Con_CZ'],
pf.r.x.x.wb$coef['Con_CZ'],pf.r.x.x.pwe$coef['Con_CZ'],pf.r.x.x.pwp$coef['Con_CZ'],coef(pf.r.x.x.gm)['Con_CZ'])
ind = 6
sev.u.C=c(sqrt(diag(pf.u.o.s.wb$varH))[ind],sqrt(diag(pf.u.o.s.pwe$varH))[ind],sqrt(diag(pf.u.o.s.pwp$varH))[ind],pf.u.o.s.gm[ind+3,'SE'],
sqrt(diag(pf.u.o.x.wb$varH))[ind-1],sqrt(diag(pf.u.o.x.pwe$varH))[ind-1],sqrt(diag(pf.u.o.x.pwp$varH))[ind-1],pf.u.o.x.gm[ind+2,'SE'],
sqrt(diag(pf.u.x.s.wb$varH))[ind-1],sqrt(diag(pf.u.x.s.pwe$varH))[ind-1],sqrt(diag(pf.u.x.s.pwp$varH))[ind-1],pf.u.x.s.gm[ind+2,'SE'],
sqrt(diag(pf.u.x.x.wb$varH))[ind-2],sqrt(diag(pf.u.x.x.pwe$varH))[ind-2],sqrt(diag(pf.u.x.x.pwp$varH))[ind-2],pf.u.x.s.gm[ind+1,'SE'])
sev.r.C=c(sqrt(diag(pf.r.o.s.wb$varH))[ind],
sqrt(diag(pf.r.o.s.pwe$varH))[ind],sqrt(diag(pf.r.o.s.pwp$varH))[ind],pf.r.o.s.gm[ind+3,'SE'],
sqrt(diag(pf.r.o.x.wb$varH))[ind-1],sqrt(diag(pf.r.o.x.pwe$varH))[ind-1],sqrt(diag(pf.r.o.x.pwp$varH))[ind-1],pf.r.o.x.gm[ind+2,'SE'],
sqrt(diag(pf.r.x.s.wb$varH))[ind-1],sqrt(diag(pf.r.x.s.pwe$varH))[ind-1],sqrt(diag(pf.r.x.s.pwp$varH))[ind-1],pf.r.x.s.gm[ind+2,'SE'],
sqrt(diag(pf.r.x.x.wb$varH))[ind-2],sqrt(diag(pf.r.x.x.pwe$varH))[ind-2],sqrt(diag(pf.r.x.x.pwp$varH))[ind-2],pf.r.x.x.gm[ind+1,'SE'])
mavg.u.C = modavgCustom(LLv.u,Kv.u,mnv.u,estv.u.C,sev.u.C,second.ord=F)
exp(mavg.u.C$Mod.avg.est)
exp(mavg.u.C$Lower.CL)
exp(mavg.u.C$Upper.CL)
mavg.r.C = modavgCustom(LLv.r,Kv.r,mnv.r,estv.r.C,sev.r.C,second.ord=F)
exp(mavg.r.C$Mod.avg.est)
exp(mavg.r.C$Lower.CL)
exp(mavg.r.C$Upper.CL)
### Neuroticism
estv.u.N = c(pf.u.o.s.wb$coef['Neu_CZ'],pf.u.o.s.pwe$coef['Neu_CZ'],pf.u.o.s.pwp$coef['Neu_CZ'],coef(pf.u.o.s.gm)['Neu_CZ'],
pf.u.o.x.wb$coef['Neu_CZ'],pf.u.o.x.pwe$coef['Neu_CZ'],pf.u.o.x.pwp$coef['Neu_CZ'],coef(pf.u.o.x.gm)['Neu_CZ'],
pf.u.x.s.wb$coef['Neu_CZ'],pf.u.x.s.pwe$coef['Neu_CZ'],pf.u.x.s.pwp$coef['Neu_CZ'],coef(pf.u.x.s.gm)['Neu_CZ'],
pf.u.x.x.wb$coef['Neu_CZ'],pf.u.x.x.pwe$coef['Neu_CZ'],pf.u.x.x.pwp$coef['Neu_CZ'],coef(pf.u.x.x.gm)['Neu_CZ'])
estv.r.N = c(pf.r.o.s.wb$coef['N.r1.DoB'],
pf.r.o.s.pwe$coef['N.r1.DoB'],pf.r.o.s.pwp$coef['N.r1.DoB'],coef(pf.r.o.s.gm)['N.r1.DoB'],
pf.r.o.x.wb$coef['N.r1.DoB'],pf.r.o.x.pwe$coef['N.r1.DoB'],pf.r.o.x.pwp$coef['N.r1.DoB'],coef(pf.r.o.x.gm)['N.r1.DoB'],
pf.r.x.s.wb$coef['N.r1.DoB'],pf.r.x.s.pwe$coef['N.r1.DoB'],pf.r.x.s.pwp$coef['N.r1.DoB'],coef(pf.r.x.s.gm)['N.r1.DoB'],
pf.r.x.x.wb$coef['N.r1.DoB'],pf.r.x.x.pwe$coef['N.r1.DoB'],pf.r.x.x.pwp$coef['N.r1.DoB'],coef(pf.r.x.x.gm)['N.r1.DoB'])
ind = 7
sev.u.N=c(sqrt(diag(pf.u.o.s.wb$varH))[ind],sqrt(diag(pf.u.o.s.pwe$varH))[ind],sqrt(diag(pf.u.o.s.pwp$varH))[ind],pf.u.o.s.gm[ind+3,'SE'],
sqrt(diag(pf.u.o.x.wb$varH))[ind-1],sqrt(diag(pf.u.o.x.pwe$varH))[ind-1],sqrt(diag(pf.u.o.x.pwp$varH))[ind-1],pf.u.o.x.gm[ind+2,'SE'],
sqrt(diag(pf.u.x.s.wb$varH))[ind-1],sqrt(diag(pf.u.x.s.pwe$varH))[ind-1],sqrt(diag(pf.u.x.s.pwp$varH))[ind-1],pf.u.x.s.gm[ind+2,'SE'],
sqrt(diag(pf.u.x.x.wb$varH))[ind-2],sqrt(diag(pf.u.x.x.pwe$varH))[ind-2],sqrt(diag(pf.u.x.x.pwp$varH))[ind-2],pf.u.x.s.gm[ind+1,'SE'])
sev.r.N=c(sqrt(diag(pf.r.o.s.wb$varH))[ind],
sqrt(diag(pf.r.o.s.pwe$varH))[ind],sqrt(diag(pf.r.o.s.pwp$varH))[ind],pf.r.o.s.gm[ind+3,'SE'],
sqrt(diag(pf.r.o.x.wb$varH))[ind-1],sqrt(diag(pf.r.o.x.pwe$varH))[ind-1],sqrt(diag(pf.r.o.x.pwp$varH))[ind-1],pf.r.o.x.gm[ind+2,'SE'],
sqrt(diag(pf.r.x.s.wb$varH))[ind-1],sqrt(diag(pf.r.x.s.pwe$varH))[ind-1],sqrt(diag(pf.r.x.s.pwp$varH))[ind-1],pf.r.x.s.gm[ind+2,'SE'],
sqrt(diag(pf.r.x.x.wb$varH))[ind-2],sqrt(diag(pf.r.x.x.pwe$varH))[ind-2],sqrt(diag(pf.r.x.x.pwp$varH))[ind-2],pf.r.x.x.gm[ind+1,'SE'])
mavg.u.N = modavgCustom(LLv.u,Kv.u,mnv.u,estv.u.N,sev.u.N,second.ord=F)
exp(mavg.u.N$Mod.avg.est)
exp(mavg.u.N$Lower.CL)
exp(mavg.u.N$Upper.CL)
mavg.r.N = modavgCustom(LLv.r,Kv.r,mnv.r,estv.r.N,sev.r.N,second.ord=F)
exp(mavg.r.N$Mod.avg.est)
exp(mavg.r.N$Lower.CL)
exp(mavg.r.N$Upper.CL)
### Openness
estv.u.O = c(pf.u.o.s.wb$coef['Opn_CZ'],pf.u.o.s.pwe$coef['Opn_CZ'],pf.u.o.s.pwp$coef['Opn_CZ'],coef(pf.u.o.s.gm)['Opn_CZ'],
pf.u.o.x.wb$coef['Opn_CZ'],pf.u.o.x.pwe$coef['Opn_CZ'],pf.u.o.x.pwp$coef['Opn_CZ'],coef(pf.u.o.x.gm)['Opn_CZ'],
pf.u.x.s.wb$coef['Opn_CZ'],pf.u.x.s.pwe$coef['Opn_CZ'],pf.u.x.s.pwp$coef['Opn_CZ'],coef(pf.u.x.s.gm)['Opn_CZ'],
pf.u.x.x.wb$coef['Opn_CZ'],pf.u.x.x.pwe$coef['Opn_CZ'],pf.u.x.x.pwp$coef['Opn_CZ'],coef(pf.u.x.x.gm)['Opn_CZ'])
estv.r.O = c(pf.r.o.s.wb$coef['O.r2.DoB'],
pf.r.o.s.pwe$coef['O.r2.DoB'],pf.r.o.s.pwp$coef['O.r2.DoB'],coef(pf.r.o.s.gm)['O.r2.DoB'],
pf.r.o.x.wb$coef['O.r2.DoB'],pf.r.o.x.pwe$coef['O.r2.DoB'],pf.r.o.x.pwp$coef['O.r2.DoB'],coef(pf.r.o.x.gm)['O.r2.DoB'],
pf.r.x.s.wb$coef['O.r2.DoB'],pf.r.x.s.pwe$coef['O.r2.DoB'],pf.r.x.s.pwp$coef['O.r2.DoB'],coef(pf.r.x.s.gm)['O.r2.DoB'],
pf.r.x.x.wb$coef['O.r2.DoB'],pf.r.x.x.pwe$coef['O.r2.DoB'],pf.r.x.x.pwp$coef['O.r2.DoB'],coef(pf.r.x.x.gm)['O.r2.DoB'])
ind = 8
sev.u.O=c(sqrt(diag(pf.u.o.s.wb$varH))[ind],sqrt(diag(pf.u.o.s.pwe$varH))[ind],sqrt(diag(pf.u.o.s.pwp$varH))[ind],pf.u.o.s.gm[ind+3,'SE'],
sqrt(diag(pf.u.o.x.wb$varH))[ind-1],sqrt(diag(pf.u.o.x.pwe$varH))[ind-1],sqrt(diag(pf.u.o.x.pwp$varH))[ind-1],pf.u.o.x.gm[ind+2,'SE'],
sqrt(diag(pf.u.x.s.wb$varH))[ind-1],sqrt(diag(pf.u.x.s.pwe$varH))[ind-1],sqrt(diag(pf.u.x.s.pwp$varH))[ind-1],pf.u.x.s.gm[ind+2,'SE'],
sqrt(diag(pf.u.x.x.wb$varH))[ind-2],sqrt(diag(pf.u.x.x.pwe$varH))[ind-2],sqrt(diag(pf.u.x.x.pwp$varH))[ind-2],pf.u.x.s.gm[ind+1,'SE'])
sev.r.O=c(sqrt(diag(pf.r.o.s.wb$varH))[ind],
sqrt(diag(pf.r.o.s.pwe$varH))[ind],sqrt(diag(pf.r.o.s.pwp$varH))[ind],pf.r.o.s.gm[ind+3,'SE'],
sqrt(diag(pf.r.o.x.wb$varH))[ind-1],sqrt(diag(pf.r.o.x.pwe$varH))[ind-1],sqrt(diag(pf.r.o.x.pwp$varH))[ind-1],pf.r.o.x.gm[ind+2,'SE'],
sqrt(diag(pf.r.x.s.wb$varH))[ind-1],sqrt(diag(pf.r.x.s.pwe$varH))[ind-1],sqrt(diag(pf.r.x.s.pwp$varH))[ind-1],pf.r.x.s.gm[ind+2,'SE'],
sqrt(diag(pf.r.x.x.wb$varH))[ind-2],sqrt(diag(pf.r.x.x.pwe$varH))[ind-2],sqrt(diag(pf.r.x.x.pwp$varH))[ind-2],pf.r.x.x.gm[ind+1,'SE'])
mavg.u.O = modavgCustom(LLv.u,Kv.u,mnv.u,estv.u.O,sev.u.O,second.ord=F)
exp(mavg.u.O$Mod.avg.est)
exp(mavg.u.O$Lower.CL)
exp(mavg.u.O$Upper.CL)
mavg.r.O = modavgCustom(LLv.r,Kv.r,mnv.r,estv.r.O,sev.r.O,second.ord=F)
exp(mavg.r.O$Mod.avg.est)
exp(mavg.r.O$Lower.CL)
exp(mavg.r.O$Upper.CL)
### Splitting the process by sex
### Males ###
pf.u.x.x.pwp.m = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
Agr_CZ + Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Ext_CZ,
data=datX[datX$sex==1,], hazard = 'Piecewise-per' , nb.int = 3
)
pf.u.o.x.pwp.m = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(origin) +
Agr_CZ + Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ,
data=datX[datX$sex==1,], hazard = 'Piecewise-per' , nb.int = 3
)
pf.r.x.x.pwp.m = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
Agr_CZ + D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB,
data=datX[datX$sex==1,], hazard = 'Piecewise-per' , nb.int = 3
)
pf.r.o.x.pwp.m = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(origin) +
Agr_CZ + D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB,
data=datX[datX$sex==1,], hazard = 'Piecewise-per' , nb.int = 3
)
pf.u.x.x.wb.m = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
Agr_CZ + Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ,
RandDist = 'Gamma'
,data=datX[datX$sex==1,], hazard = 'Weibull'
)
pf.u.o.x.wb.m = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(origin) +
Agr_CZ + Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ,
RandDist = 'Gamma'
,data=datX[datX$sex==1,], hazard = 'Weibull'
)
pf.r.o.x.wb.m = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(origin) +
Agr_CZ + D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB,
RandDist = 'Gamma'
,data=datX[datX$sex==1,], hazard = 'Weibull'
)
pf.r.x.x.wb.m = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
Agr_CZ + D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB,
RandDist = 'Gamma'
,data=datX[datX$sex==1,], hazard = 'Weibull'
)
pf.u.o.x.gm.m = parfm(Surv(age_pr, age, status) ~
as.factor(origin) + Agr_CZ
+ Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ
,cluster="sample"
, frailty = 'gamma'
, data=datX[datX$sex==1,], dist='gompertz', method ='ucminf')
pf.u.x.x.gm.m = parfm(Surv(age_pr, age, status) ~
Agr_CZ
+ Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ
,cluster="sample"
, frailty = 'gamma'
, data=datX[datX$sex==1,], dist='gompertz', method ='ucminf')
pf.r.o.x.gm.m = parfm(Surv(age_pr, age, status) ~
as.factor(origin) + Agr_CZ
+ D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB
,cluster="sample"
, frailty = 'gamma'
, data=datX[datX$sex==1,], dist='gompertz', method ='ucminf')
pf.r.x.x.gm.m = parfm(Surv(age_pr, age, status) ~
Agr_CZ
+ D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB
,cluster="sample"
, frailty = 'gamma'
, data=datX[datX$sex==1,], dist='gompertz', method ='ucminf')
### Females ###
pf.u.x.x.pwp.f = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
Agr_CZ + Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ,
data=datX[datX$sex==0,], hazard = 'Piecewise-per' , nb.int = 3
)
pf.u.o.x.pwp.f = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(origin) +
Agr_CZ + Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ,
data=datX[datX$sex==0,], hazard = 'Piecewise-per' , nb.int = 3
)
pf.r.x.x.pwp.f = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
Agr_CZ + D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB,
data=datX[datX$sex==0,], hazard = 'Piecewise-per' , nb.int = 3
)
pf.r.o.x.pwp.f = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(origin) +
Agr_CZ + D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB,
data=datX[datX$sex==0,], hazard = 'Piecewise-per' , nb.int = 3
)
pf.u.x.x.wb.f = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
Agr_CZ + Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ,
RandDist = 'Gamma' #LogN'
,data=datX[datX$sex==0,], hazard = 'Weibull'
)
pf.u.o.x.wb.f = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(origin) +
Agr_CZ + Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ,
RandDist = 'Gamma' #'LogN'
,data=datX[datX$sex==0,], hazard = 'Weibull'
)
pf.r.o.x.wb.f = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
as.factor(origin) +
Agr_CZ + D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB,
RandDist = 'Gamma'
,data=datX[datX$sex==0,], hazard = 'Weibull'
)
pf.r.x.x.wb.f = frailtyPenal(Surv(age_pr, age, status) ~ cluster(sample) +
Agr_CZ + D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB,
RandDist = 'Gamma'
,data=datX[datX$sex==0,], hazard = 'Weibull'
)
pf.u.o.x.gm.f = parfm(Surv(age_pr, age, status) ~
as.factor(origin) + Agr_CZ
+ Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ
,cluster="sample"
, frailty = 'gamma'
, data=datX[datX$sex==0,], dist='gompertz', method ='ucminf')
pf.u.x.x.gm.f = parfm(Surv(age_pr, age, status) ~
Agr_CZ
+ Dom_CZ + Ext_CZ + Con_CZ + Neu_CZ + Opn_CZ
,cluster="sample"
, frailty = 'gamma'
, data=datX[datX$sex==0,], dist='gompertz', method ='ucminf')
pf.r.o.x.gm.f = parfm(Surv(age_pr, age, status) ~
as.factor(origin) + Agr_CZ
+ D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB
,cluster="sample"
, frailty = 'gamma'
, data=datX[datX$sex==0,], dist='gompertz', method ='ucminf')
pf.r.x.x.gm.f = parfm(Surv(age_pr, age, status) ~
Agr_CZ
+ D.r2.DoB + E.r2.DoB + Con_CZ + N.r1.DoB + O.r2.DoB
,cluster="sample"
, frailty = 'gamma'
, data=datX[datX$sex==0,], dist='gompertz', method ='ucminf')
### Model averaged parameter tables
### MALES
LLv.u.m = c(pf.u.o.x.wb.m$logLik,pf.u.o.x.pwp.m$logLik,logLik(pf.u.o.x.gm.m)[1],
pf.u.x.x.wb.m$logLik,pf.u.x.x.pwp.m$logLik,logLik(pf.u.x.x.gm.m)[1])
LLv.r.m = c(pf.r.o.x.wb.m$logLik,pf.r.o.x.pwp.m$logLik,logLik(pf.r.o.x.gm.m)[1],
pf.r.x.x.wb.m$logLik,pf.r.x.x.pwp.m$logLik,logLik(pf.r.x.x.gm.m)[1])
Kv.u.m = c(pf.u.o.x.wb.m$npar,pf.u.o.x.pwp.m$npar,attr(logLik(pf.u.o.x.gm.m),'df'),
pf.u.x.x.wb.m$npar,pf.u.x.x.pwp.m$npar,attr(logLik(pf.u.x.x.gm.m),'df'))
Kv.r.m = c(pf.r.o.x.wb.m$npar,pf.r.o.x.pwp.m$npar,attr(logLik(pf.r.o.x.gm.m),'df'),
pf.r.x.x.wb.m$npar,pf.r.x.x.pwp.m$npar,attr(logLik(pf.r.x.x.gm.m),'df'))
mnv.u.m = c('pf.u.o.x.wb.m','pf.u.o.x.pwp.m','pf.u.o.x.gm.m',
'pf.u.x.x.wb.m','pf.u.x.x.pwp.m','pf.u.x.x.gm.m')
mnv.r.m = c('pf.r.o.x.wb.m','pf.r.o.x.pwp.m','pf.r.o.x.gm.m',
'pf.r.x.x.wb.m','pf.r.x.x.pwp.m','pf.r.x.x.gm.m')
### Wild
estv.u.m.or = c(pf.u.o.x.wb.m$coef['originWILD'],pf.u.o.x.pwp.m$coef['originWILD'],coef(pf.u.o.x.gm.m)['as.factor(origin)WILD'])
estv.r.m.or = c(pf.r.o.x.wb.m$coef['originWILD'],pf.r.o.x.pwp.m$coef['originWILD'],coef(pf.r.o.x.gm.m)['as.factor(origin)WILD'])
ind = 2
sev.u.m.or=c(sqrt(diag(pf.u.o.x.wb.m$varH))[ind-1],sqrt(diag(pf.u.o.x.pwp.m$varH))[ind-1],pf.u.o.x.gm.m[ind+2,'SE'])
sev.r.m.or=c(sqrt(diag(pf.r.o.x.wb.m$varH))[ind-1],sqrt(diag(pf.r.o.x.pwp.m$varH))[ind-1],pf.r.o.x.gm.m[ind+2,'SE'])
mavg.u.W.m = modavgCustom(LLv.u.m[1:3],Kv.u.m[1:3],mnv.u.m[1:3],estv.u.m.or,sev.u.m.or,second.ord=F)
exp(mavg.u.W.m$Mod.avg.est)
exp(mavg.u.W.m$Lower.CL)
exp(mavg.u.W.m$Upper.CL)
mavg.r.W.m = modavgCustom(LLv.r.m[1:3],Kv.r.m[1:3],mnv.r.m[1:3],estv.r.m.or,sev.r.m.or,second.ord=F)
exp(mavg.r.W.m$Mod.avg.est)
exp(mavg.r.W.m$Lower.CL)
exp(mavg.r.W.m$Upper.CL)
### Agr
estv.u.m.A = c(pf.u.o.x.wb.m$coef['Agr_CZ'],pf.u.o.x.pwp.m$coef['Agr_CZ'],coef(pf.u.o.x.gm.m)['Agr_CZ'],
pf.u.x.x.wb.m$coef['Agr_CZ'],pf.u.x.x.pwp.m$coef['Agr_CZ'],coef(pf.u.x.x.gm.m)['Agr_CZ'])
estv.r.m.A = c(pf.r.o.x.wb.m$coef['Agr_CZ'],pf.r.o.x.pwp.m$coef['Agr_CZ'],coef(pf.r.o.x.gm.m)['Agr_CZ'],
pf.r.x.x.wb.m$coef['Agr_CZ'],pf.r.x.x.pwp.m$coef['Agr_CZ'],coef(pf.r.x.x.gm.m)['Agr_CZ'])
ind = 3
sev.u.m.A=c(sqrt(diag(pf.u.o.x.wb.m$varH))[ind-1],sqrt(diag(pf.u.o.x.pwp.m$varH))[ind-1],pf.u.o.x.gm.m[ind+2,'SE'],
sqrt(diag(pf.u.x.x.wb.m$varH))[ind-2],sqrt(diag(pf.u.x.x.pwp.m$varH))[ind-2],pf.u.x.x.gm.m[ind+1,'SE'])
sev.r.m.A=c(sqrt(diag(pf.r.o.x.wb.m$varH))[ind-1],sqrt(diag(pf.r.o.x.pwp.m$varH))[ind-1],pf.r.o.x.gm.m[ind+2,'SE'],
sqrt(diag(pf.r.x.x.wb.m$varH))[ind-2],sqrt(diag(pf.r.x.x.pwp.m$varH))[ind-2],pf.r.x.x.gm.m[ind+1,'SE'])
mavg.u.A.m = modavgCustom(LLv.u.m,Kv.u.m,mnv.u.m,estv.u.m.A,sev.u.m.A,second.ord=F)
exp(mavg.u.A.m$Mod.avg.est)
exp(mavg.u.A.m$Lower.CL)
exp(mavg.u.A.m$Upper.CL)
mavg.r.A.m = modavgCustom(LLv.r.m,Kv.r.m,mnv.r.m,estv.r.m.A,sev.r.m.A,second.ord=F)
exp(mavg.r.A.m$Mod.avg.est)
exp(mavg.r.A.m$Lower.CL)
exp(mavg.r.A.m$Upper.CL)
### Dom
estv.u.m.D = c(pf.u.o.x.wb.m$coef['Dom_CZ'],pf.u.o.x.pwp.m$coef['Dom_CZ'],coef(pf.u.o.x.gm.m)['Dom_CZ'],
pf.u.x.x.wb.m$coef['Dom_CZ'],pf.u.x.x.pwp.m$coef['Dom_CZ'],coef(pf.u.x.x.gm.m)['Dom_CZ'])
estv.r.m.D = c(pf.r.o.x.wb.m$coef['D.r2.DoB'],pf.r.o.x.pwp.m$coef['D.r2.DoB'],coef(pf.r.o.x.gm.m)['D.r2.DoB'],
pf.r.x.x.wb.m$coef['D.r2.DoB'],pf.r.x.x.pwp.m$coef['D.r2.DoB'],coef(pf.r.x.x.gm.m)['D.r2.DoB'])
ind = 4
sev.u.m.D=c(sqrt(diag(pf.u.o.x.wb.m$varH))[ind-1],sqrt(diag(pf.u.o.x.pwp.m$varH))[ind-1],pf.u.o.x.gm.m[ind+2,'SE'],
sqrt(diag(pf.u.x.x.wb.m$varH))[ind-2],sqrt(diag(pf.u.x.x.pwp.m$varH))[ind-2],pf.u.x.x.gm.m[ind+1,'SE'])
sev.r.m.D=c(sqrt(diag(pf.r.o.x.wb.m$varH))[ind-1],sqrt(diag(pf.r.o.x.pwp.m$varH))[ind-1],pf.r.o.x.gm.m[ind+2,'SE'],
sqrt(diag(pf.r.x.x.wb.m$varH))[ind-2],sqrt(diag(pf.r.x.x.pwp.m$varH))[ind-2],pf.r.x.x.gm.m[ind+1,'SE'])
mavg.u.D.m = modavgCustom(LLv.u.m,Kv.u.m,mnv.u.m,estv.u.m.D,sev.u.m.D,second.ord=F)
exp(mavg.u.D.m$Mod.avg.est)
exp(mavg.u.D.m$Lower.CL)
exp(mavg.u.D.m$Upper.CL)
mavg.r.D.m = modavgCustom(LLv.r.m,Kv.r.m,mnv.r.m,estv.r.m.D,sev.r.m.D,second.ord=F)
exp(mavg.r.D.m$Mod.avg.est)
exp(mavg.r.D.m$Lower.CL)
exp(mavg.r.D.m$Upper.CL)
### Ext
estv.u.m.E = c(pf.u.o.x.wb.m$coef['Ext_CZ'],pf.u.o.x.pwp.m$coef['Ext_CZ'],coef(pf.u.o.x.gm.m)['Ext_CZ'],
pf.u.x.x.wb.m$coef['Ext_CZ'],pf.u.x.x.pwp.m$coef['Ext_CZ'],coef(pf.u.x.x.gm.m)['Ext_CZ'])
estv.r.m.E = c(pf.r.o.x.wb.m$coef['E.r2.DoB'],pf.r.o.x.pwp.m$coef['E.r2.DoB'],coef(pf.r.o.x.gm.m)['E.r2.DoB'],
pf.r.x.x.wb.m$coef['E.r2.DoB'],pf.r.x.x.pwp.m$coef['E.r2.DoB'],coef(pf.r.x.x.gm.m)['E.r2.DoB'])
ind = 5
sev.u.m.E=c(sqrt(diag(pf.u.o.x.wb.m$varH))[ind-1],sqrt(diag(pf.u.o.x.pwp.m$varH))[ind-1],pf.u.o.x.gm.m[ind+2,'SE'],
sqrt(diag(pf.u.x.x.wb.m$varH))[ind-2],sqrt(diag(pf.u.x.x.pwp.m$varH))[ind-2],pf.u.x.x.gm.m[ind+1,'SE'])
sev.r.m.E=c(sqrt(diag(pf.r.o.x.wb.m$varH))[ind-1],sqrt(diag(pf.r.o.x.pwp.m$varH))[ind-1],pf.r.o.x.gm.m[ind+2,'SE'],
sqrt(diag(pf.r.x.x.wb.m$varH))[ind-2],sqrt(diag(pf.r.x.x.pwp.m$varH))[ind-2],pf.r.x.x.gm.m[ind+1,'SE'])
mavg.u.E.m = modavgCustom(LLv.u.m,Kv.u.m,mnv.u.m,estv.u.m.E,sev.u.m.E,second.ord=F)
exp(mavg.u.E.m$Mod.avg.est)
exp(mavg.u.E.m$Lower.CL)
exp(mavg.u.E.m$Upper.CL)
mavg.r.E.m = modavgCustom(LLv.r.m,Kv.r.m,mnv.r.m,estv.r.m.E,sev.r.m.E,second.ord=F)
exp(mavg.r.E.m$Mod.avg.est)
exp(mavg.r.E.m$Lower.CL)
exp(mavg.r.E.m$Upper.CL)
### Con
estv.u.m.C = c(pf.u.o.x.wb.m$coef['Con_CZ'],pf.u.o.x.pwp.m$coef['Con_CZ'],coef(pf.u.o.x.gm.m)['Con_CZ'],
pf.u.x.x.wb.m$coef['Con_CZ'],pf.u.x.x.pwp.m$coef['Con_CZ'],coef(pf.u.x.x.gm.m)['Con_CZ'])
estv.r.m.C = c(pf.r.o.x.wb.m$coef['Con_CZ'],pf.r.o.x.pwp.m$coef['Con_CZ'],coef(pf.r.o.x.gm.m)['Con_CZ'],
pf.r.x.x.wb.m$coef['Con_CZ'],pf.r.x.x.pwp.m$coef['Con_CZ'],coef(pf.r.x.x.gm.m)['Con_CZ'])
ind = 6
sev.u.m.C=c(sqrt(diag(pf.u.o.x.wb.m$varH))[ind-1],sqrt(diag(pf.u.o.x.pwp.m$varH))[ind-1],pf.u.o.x.gm.m[ind+2,'SE'],
sqrt(diag(pf.u.x.x.wb.m$varH))[ind-2],sqrt(diag(pf.u.x.x.pwp.m$varH))[ind-2],pf.u.x.x.gm.m[ind+1,'SE'])
sev.r.m.C=c(sqrt(diag(pf.r.o.x.wb.m$varH))[ind-1],sqrt(diag(pf.r.o.x.pwp.m$varH))[ind-1],pf.r.o.x.gm.m[ind+2,'SE'],
sqrt(diag(pf.r.x.x.wb.m$varH))[ind-2],sqrt(diag(pf.r.x.x.pwp.m$varH))[ind-2],pf.r.x.x.gm.m[ind+1,'SE'])
mavg.u.C.m = modavgCustom(LLv.u.m,Kv.u.m,mnv.u.m,estv.u.m.C,sev.u.m.C,second.ord=F)
exp(mavg.u.C.m$Mod.avg.est)
exp(mavg.u.C.m$Lower.CL)
exp(mavg.u.C.m$Upper.CL)
mavg.r.C.m = modavgCustom(LLv.r.m,Kv.r.m,mnv.r.m,estv.r.m.C,sev.r.m.C,second.ord=F)
exp(mavg.r.C.m$Mod.avg.est)
exp(mavg.r.C.m$Lower.CL)
exp(mavg.r.C.m$Upper.CL)
### Neu
estv.u.m.N = c(pf.u.o.x.wb.m$coef['Neu_CZ'],pf.u.o.x.pwp.m$coef['Neu_CZ'],coef(pf.u.o.x.gm.m)['Neu_CZ'],
pf.u.x.x.wb.m$coef['Neu_CZ'],pf.u.x.x.pwp.m$coef['Neu_CZ'],coef(pf.u.x.x.gm.m)['Neu_CZ'])
estv.r.m.N = c(pf.r.o.x.wb.m$coef['N.r1.DoB'],pf.r.o.x.pwp.m$coef['N.r1.DoB'],coef(pf.r.o.x.gm.m)['N.r1.DoB'],
pf.r.x.x.wb.m$coef['N.r1.DoB'],pf.r.x.x.pwp.m$coef['N.r1.DoB'],coef(pf.r.x.x.gm.m)['N.r1.DoB'])
ind = 7
sev.u.m.N=c(sqrt(diag(pf.u.o.x.wb.m$varH))[ind-1],sqrt(diag(pf.u.o.x.pwp.m$varH))[ind-1],pf.u.o.x.gm.m[ind+2,'SE'],
sqrt(diag(pf.u.x.x.wb.m$varH))[ind-2],sqrt(diag(pf.u.x.x.pwp.m$varH))[ind-2],pf.u.x.x.gm.m[ind+1,'SE'])
sev.r.m.N=c(sqrt(diag(pf.r.o.x.wb.m$varH))[ind-1],sqrt(diag(pf.r.o.x.pwp.m$varH))[ind-1],pf.r.o.x.gm.m[ind+2,'SE'],
sqrt(diag(pf.r.x.x.wb.m$varH))[ind-2],sqrt(diag(pf.r.x.x.pwp.m$varH))[ind-2],pf.r.x.x.gm.m[ind+1,'SE'])
mavg.u.N.m = modavgCustom(LLv.u.m,Kv.u.m,mnv.u.m,estv.u.m.N,sev.u.m.N,second.ord=F)
exp(mavg.u.N.m$Mod.avg.est)
exp(mavg.u.N.m$Lower.CL)
exp(mavg.u.N.m$Upper.CL)
mavg.r.N.m = modavgCustom(LLv.r.m,Kv.r.m,mnv.r.m,estv.r.m.N,sev.r.m.N,second.ord=F)
exp(mavg.r.N.m$Mod.avg.est)
exp(mavg.r.N.m$Lower.CL)
exp(mavg.r.N.m$Upper.CL)
### Opn
estv.u.m.O = c(pf.u.o.x.wb.m$coef['Opn_CZ'],pf.u.o.x.pwp.m$coef['Opn_CZ'],coef(pf.u.o.x.gm.m)['Opn_CZ'],
pf.u.x.x.wb.m$coef['Opn_CZ'],pf.u.x.x.pwp.m$coef['Opn_CZ'],coef(pf.u.x.x.gm.m)['Opn_CZ'])
estv.r.m.O = c(pf.r.o.x.wb.m$coef['O.r2.DoB'],pf.r.o.x.pwp.m$coef['O.r2.DoB'],coef(pf.r.o.x.gm.m)['O.r2.DoB'],
pf.r.x.x.wb.m$coef['O.r2.DoB'],pf.r.x.x.pwp.m$coef['O.r2.DoB'],coef(pf.r.x.x.gm.m)['O.r2.DoB'])
ind = 8
sev.u.m.O=c(sqrt(diag(pf.u.o.x.wb.m$varH))[ind-1],sqrt(diag(pf.u.o.x.pwp.m$varH))[ind-1],pf.u.o.x.gm.m[ind+2,'SE'],
sqrt(diag(pf.u.x.x.wb.m$varH))[ind-2],sqrt(diag(pf.u.x.x.pwp.m$varH))[ind-2],pf.u.x.x.gm.m[ind+1,'SE'])
sev.r.m.O=c(sqrt(diag(pf.r.o.x.wb.m$varH))[ind-1],sqrt(diag(pf.r.o.x.pwp.m$varH))[ind-1],pf.r.o.x.gm.m[ind+2,'SE'],
sqrt(diag(pf.r.x.x.wb.m$varH))[ind-2],sqrt(diag(pf.r.x.x.pwp.m$varH))[ind-2],pf.r.x.x.gm.m[ind+1,'SE'])
mavg.u.O.m = modavgCustom(LLv.u.m,Kv.u.m,mnv.u.m,estv.u.m.O,sev.u.m.O,second.ord=F)
exp(mavg.u.O.m$Mod.avg.est)
exp(mavg.u.O.m$Lower.CL)
exp(mavg.u.O.m$Upper.CL)
mavg.r.O.m = modavgCustom(LLv.r.m,Kv.r.m,mnv.r.m,estv.r.m.O,sev.r.m.O,second.ord=F)
exp(mavg.r.O.m$Mod.avg.est)
exp(mavg.r.O.m$Lower.CL)
exp(mavg.r.O.m$Upper.CL)
### FEMALES
LLv.u.f = c(pf.u.o.x.wb.f$logLik,pf.u.o.x.pwp.f$logLik,logLik(pf.u.o.x.gm.f)[1],
pf.u.x.x.wb.f$logLik,pf.u.x.x.pwp.f$logLik,logLik(pf.u.x.x.gm.f)[1])
LLv.r.f = c(pf.r.o.x.wb.f$logLik,pf.r.o.x.pwp.f$logLik,logLik(pf.r.o.x.gm.f)[1],
pf.r.x.x.wb.f$logLik,pf.r.x.x.pwp.f$logLik,logLik(pf.r.x.x.gm.f)[1])
Kv.u.f = c(pf.u.o.x.wb.f$npar,pf.u.o.x.pwp.f$npar,attr(logLik(pf.u.o.x.gm.f),'df'),
pf.u.x.x.wb.f$npar,pf.u.x.x.pwp.f$npar,attr(logLik(pf.u.x.x.gm.f),'df'))
Kv.r.f = c(pf.r.o.x.wb.f$npar,pf.r.o.x.pwp.f$npar,attr(logLik(pf.r.o.x.gm.f),'df'),
pf.r.x.x.wb.f$npar,pf.r.x.x.pwp.f$npar,attr(logLik(pf.r.x.x.gm.f),'df'))
mnv.u.f = c('pf.u.o.x.wb.f','pf.u.o.x.pwp.f','pf.u.o.x.gm.f',
'pf.u.x.x.wb.f','pf.u.x.x.pwp.f','pf.u.x.x.gm.f')
mnv.r.f = c('pf.r.o.x.wb.f','pf.r.o.x.pwp.f','pf.r.o.x.gm.f',
'pf.r.x.x.wb.f','pf.r.x.x.pwp.f','pf.r.x.x.gm.f')
### Wild
estv.u.f.or = c(pf.u.o.x.wb.f$coef['originWILD'],pf.u.o.x.pwp.f$coef['originWILD'],coef(pf.u.o.x.gm.f)['as.factor(origin)WILD'])
estv.r.f.or = c(pf.r.o.x.wb.f$coef['originWILD'],pf.r.o.x.pwp.f$coef['originWILD'],coef(pf.r.o.x.gm.f)['as.factor(origin)WILD'])
ind = 2
sev.u.f.or=c(sqrt(diag(pf.u.o.x.wb.f$varH))[ind-1],sqrt(diag(pf.u.o.x.pwp.f$varH))[ind-1],pf.u.o.x.gm.f[ind+2,'SE'])
sev.r.f.or=c(sqrt(diag(pf.r.o.x.wb.f$varH))[ind-1],sqrt(diag(pf.r.o.x.pwp.f$varH))[ind-1],pf.r.o.x.gm.f[ind+2,'SE'])
mavg.u.W.f = modavgCustom(LLv.u.f[1:3],Kv.u.f[1:3],mnv.u.f[1:3],estv.u.f.or,sev.u.f.or,second.ord=F)
exp(mavg.u.W.f$Mod.avg.est)
exp(mavg.u.W.f$Lower.CL)
exp(mavg.u.W.f$Upper.CL)
mavg.r.W.f = modavgCustom(LLv.r.f[1:3],Kv.r.f[1:3],mnv.r.f[1:3],estv.r.f.or,sev.r.f.or,second.ord=F)
exp(mavg.r.W.f$Mod.avg.est)
exp(mavg.r.W.f$Lower.CL)
exp(mavg.r.W.f$Upper.CL)
### Agr
estv.u.f.A = c(pf.u.o.x.wb.f$coef['Agr_CZ'],pf.u.o.x.pwp.f$coef['Agr_CZ'],coef(pf.u.o.x.gm.f)['Agr_CZ'],
pf.u.x.x.wb.f$coef['Agr_CZ'],pf.u.x.x.pwp.f$coef['Agr_CZ'],coef(pf.u.x.x.gm.f)['Agr_CZ'])
estv.r.f.A = c(pf.r.o.x.wb.f$coef['Agr_CZ'],pf.r.o.x.pwp.f$coef['Agr_CZ'],coef(pf.r.o.x.gm.f)['Agr_CZ'],
pf.r.x.x.wb.f$coef['Agr_CZ'],pf.r.x.x.pwp.f$coef['Agr_CZ'],coef(pf.r.x.x.gm.f)['Agr_CZ'])
ind = 3
sev.u.f.A=c(sqrt(diag(pf.u.o.x.wb.f$varH))[ind-1],sqrt(diag(pf.u.o.x.pwp.f$varH))[ind-1],pf.u.o.x.gm.f[ind+2,'SE'],
sqrt(diag(pf.u.x.x.wb.f$varH))[ind-2],sqrt(diag(pf.u.x.x.pwp.f$varH))[ind-2],pf.u.x.x.gm.f[ind+1,'SE'])
sev.r.f.A=c(sqrt(diag(pf.r.o.x.wb.f$varH))[ind-1],sqrt(diag(pf.r.o.x.pwp.f$varH))[ind-1],pf.r.o.x.gm.f[ind+2,'SE'],
sqrt(diag(pf.r.x.x.wb.f$varH))[ind-2],sqrt(diag(pf.r.x.x.pwp.f$varH))[ind-2],pf.r.x.x.gm.f[ind+1,'SE'])
mavg.u.A.f = modavgCustom(LLv.u.f,Kv.u.f,mnv.u.f,estv.u.f.A,sev.u.f.A,second.ord=F)
exp(mavg.u.A.f$Mod.avg.est)
exp(mavg.u.A.f$Lower.CL)
exp(mavg.u.A.f$Upper.CL)
mavg.r.A.f = modavgCustom(LLv.r.f,Kv.r.f,mnv.r.f,estv.r.f.A,sev.r.f.A,second.ord=F)
exp(mavg.r.A.f$Mod.avg.est)
exp(mavg.r.A.f$Lower.CL)
exp(mavg.r.A.f$Upper.CL)
### Dom
estv.u.f.D = c(pf.u.o.x.wb.f$coef['Dom_CZ'],pf.u.o.x.pwp.f$coef['Dom_CZ'],coef(pf.u.o.x.gm.f)['Dom_CZ'],
pf.u.x.x.wb.f$coef['Dom_CZ'],pf.u.x.x.pwp.f$coef['Dom_CZ'],coef(pf.u.x.x.gm.f)['Dom_CZ'])
estv.r.f.D = c(pf.r.o.x.wb.f$coef['D.r2.DoB'],pf.r.o.x.pwp.f$coef['D.r2.DoB'],coef(pf.r.o.x.gm.f)['D.r2.DoB'],
pf.r.x.x.wb.f$coef['D.r2.DoB'],pf.r.x.x.pwp.f$coef['D.r2.DoB'],coef(pf.r.x.x.gm.f)['D.r2.DoB'])
ind = 4
sev.u.f.D=c(sqrt(diag(pf.u.o.x.wb.f$varH))[ind-1],sqrt(diag(pf.u.o.x.pwp.f$varH))[ind-1],pf.u.o.x.gm.f[ind+2,'SE'],
sqrt(diag(pf.u.x.x.wb.f$varH))[ind-2],sqrt(diag(pf.u.x.x.pwp.f$varH))[ind-2],pf.u.x.x.gm.f[ind+1,'SE'])
sev.r.f.D=c(sqrt(diag(pf.r.o.x.wb.f$varH))[ind-1],sqrt(diag(pf.r.o.x.pwp.f$varH))[ind-1],pf.r.o.x.gm.f[ind+2,'SE'],
sqrt(diag(pf.r.x.x.wb.f$varH))[ind-2],sqrt(diag(pf.r.x.x.pwp.f$varH))[ind-2],pf.r.x.x.gm.f[ind+1,'SE'])
mavg.u.D.f = modavgCustom(LLv.u.f,Kv.u.f,mnv.u.f,estv.u.f.D,sev.u.f.D,second.ord=F)
exp(mavg.u.D.f$Mod.avg.est)
exp(mavg.u.D.f$Lower.CL)
exp(mavg.u.D.f$Upper.CL)
mavg.r.D.f = modavgCustom(LLv.r.f,Kv.r.f,mnv.r.f,estv.r.f.D,sev.r.f.D,second.ord=F)
exp(mavg.r.D.f$Mod.avg.est)
exp(mavg.r.D.f$Lower.CL)
exp(mavg.r.D.f$Upper.CL)
### Ext
estv.u.f.E = c(pf.u.o.x.wb.f$coef['Ext_CZ'],pf.u.o.x.pwp.f$coef['Ext_CZ'],coef(pf.u.o.x.gm.f)['Ext_CZ'],
pf.u.x.x.wb.f$coef['Ext_CZ'],pf.u.x.x.pwp.f$coef['Ext_CZ'],coef(pf.u.x.x.gm.f)['Ext_CZ'])
estv.r.f.E = c(pf.r.o.x.wb.f$coef['E.r2.DoB'],pf.r.o.x.pwp.f$coef['E.r2.DoB'],coef(pf.r.o.x.gm.f)['E.r2.DoB'],
pf.r.x.x.wb.f$coef['E.r2.DoB'],pf.r.x.x.pwp.f$coef['E.r2.DoB'],coef(pf.r.x.x.gm.f)['E.r2.DoB'])
ind = 5
sev.u.f.E=c(sqrt(diag(pf.u.o.x.wb.f$varH))[ind-1],sqrt(diag(pf.u.o.x.pwp.f$varH))[ind-1],pf.u.o.x.gm.f[ind+2,'SE'],
sqrt(diag(pf.u.x.x.wb.f$varH))[ind-2],sqrt(diag(pf.u.x.x.pwp.f$varH))[ind-2],pf.u.x.x.gm.f[ind+1,'SE'])
sev.r.f.E=c(sqrt(diag(pf.r.o.x.wb.f$varH))[ind-1],sqrt(diag(pf.r.o.x.pwp.f$varH))[ind-1],pf.r.o.x.gm.f[ind+2,'SE'],
sqrt(diag(pf.r.x.x.wb.f$varH))[ind-2],sqrt(diag(pf.r.x.x.pwp.f$varH))[ind-2],pf.r.x.x.gm.f[ind+1,'SE'])
mavg.u.E.f = modavgCustom(LLv.u.f,Kv.u.f,mnv.u.f,estv.u.f.E,sev.u.f.E,second.ord=F)
exp(mavg.u.E.f$Mod.avg.est)
exp(mavg.u.E.f$Lower.CL)
exp(mavg.u.E.f$Upper.CL)
mavg.r.E.f = modavgCustom(LLv.r.f,Kv.r.f,mnv.r.f,estv.r.f.E,sev.r.f.E,second.ord=F)
exp(mavg.r.E.f$Mod.avg.est)
exp(mavg.r.E.f$Lower.CL)
exp(mavg.r.E.f$Upper.CL)
### Con
estv.u.f.C = c(pf.u.o.x.wb.f$coef['Con_CZ'],pf.u.o.x.pwp.f$coef['Con_CZ'],coef(pf.u.o.x.gm.f)['Con_CZ'],
pf.u.x.x.wb.f$coef['Con_CZ'],pf.u.x.x.pwp.f$coef['Con_CZ'],coef(pf.u.x.x.gm.f)['Con_CZ'])
estv.r.f.C = c(pf.r.o.x.wb.f$coef['Con_CZ'],pf.r.o.x.pwp.f$coef['Con_CZ'],coef(pf.r.o.x.gm.f)['Con_CZ'],
pf.r.x.x.wb.f$coef['Con_CZ'],pf.r.x.x.pwp.f$coef['Con_CZ'],coef(pf.r.x.x.gm.f)['Con_CZ'])
ind = 6
sev.u.f.C=c(sqrt(diag(pf.u.o.x.wb.f$varH))[ind-1],sqrt(diag(pf.u.o.x.pwp.f$varH))[ind-1],pf.u.o.x.gm.f[ind+2,'SE'],
sqrt(diag(pf.u.x.x.wb.f$varH))[ind-2],sqrt(diag(pf.u.x.x.pwp.f$varH))[ind-2],pf.u.x.x.gm.f[ind+1,'SE'])
sev.r.f.C=c(sqrt(diag(pf.r.o.x.wb.f$varH))[ind-1],sqrt(diag(pf.r.o.x.pwp.f$varH))[ind-1],pf.r.o.x.gm.f[ind+2,'SE'],
sqrt(diag(pf.r.x.x.wb.f$varH))[ind-2],sqrt(diag(pf.r.x.x.pwp.f$varH))[ind-2],pf.r.x.x.gm.f[ind+1,'SE'])
mavg.u.C.f = modavgCustom(LLv.u.f,Kv.u.f,mnv.u.f,estv.u.f.C,sev.u.f.C,second.ord=F)
exp(mavg.u.C.f$Mod.avg.est)
exp(mavg.u.C.f$Lower.CL)
exp(mavg.u.C.f$Upper.CL)
mavg.r.C.f = modavgCustom(LLv.r.f,Kv.r.f,mnv.r.f,estv.r.f.C,sev.r.f.C,second.ord=F)
exp(mavg.r.C.f$Mod.avg.est)
exp(mavg.r.C.f$Lower.CL)
exp(mavg.r.C.f$Upper.CL)
### Neu
estv.u.f.N = c(pf.u.o.x.wb.f$coef['Neu_CZ'],pf.u.o.x.pwp.f$coef['Neu_CZ'],coef(pf.u.o.x.gm.f)['Neu_CZ'],
pf.u.x.x.wb.f$coef['Neu_CZ'],pf.u.x.x.pwp.f$coef['Neu_CZ'],coef(pf.u.x.x.gm.f)['Neu_CZ'])
estv.r.f.N = c(pf.r.o.x.wb.f$coef['N.r1.DoB'],pf.r.o.x.pwp.f$coef['N.r1.DoB'],coef(pf.r.o.x.gm.f)['N.r1.DoB'],
pf.r.x.x.wb.f$coef['N.r1.DoB'],pf.r.x.x.pwp.f$coef['N.r1.DoB'],coef(pf.r.x.x.gm.f)['N.r1.DoB'])
ind = 7
sev.u.f.N=c(sqrt(diag(pf.u.o.x.wb.f$varH))[ind-1],sqrt(diag(pf.u.o.x.pwp.f$varH))[ind-1],pf.u.o.x.gm.f[ind+2,'SE'],
sqrt(diag(pf.u.x.x.wb.f$varH))[ind-2],sqrt(diag(pf.u.x.x.pwp.f$varH))[ind-2],pf.u.x.x.gm.f[ind+1,'SE'])
sev.r.f.N=c(sqrt(diag(pf.r.o.x.wb.f$varH))[ind-1],sqrt(diag(pf.r.o.x.pwp.f$varH))[ind-1],pf.r.o.x.gm.f[ind+2,'SE'],
sqrt(diag(pf.r.x.x.wb.f$varH))[ind-2],sqrt(diag(pf.r.x.x.pwp.f$varH))[ind-2],pf.r.x.x.gm.f[ind+1,'SE'])
mavg.u.N.f = modavgCustom(LLv.u.f,Kv.u.f,mnv.u.f,estv.u.f.N,sev.u.f.N,second.ord=F)
exp(mavg.u.N.f$Mod.avg.est)
exp(mavg.u.N.f$Lower.CL)
exp(mavg.u.N.f$Upper.CL)
mavg.r.N.f = modavgCustom(LLv.r.f,Kv.r.f,mnv.r.f,estv.r.f.N,sev.r.f.N,second.ord=F)
exp(mavg.r.N.f$Mod.avg.est)
exp(mavg.r.N.f$Lower.CL)
exp(mavg.r.N.f$Upper.CL)
### Opn
estv.u.f.O = c(pf.u.o.x.wb.f$coef['Opn_CZ'],pf.u.o.x.pwp.f$coef['Opn_CZ'],coef(pf.u.o.x.gm.f)['Opn_CZ'],
pf.u.x.x.wb.f$coef['Opn_CZ'],pf.u.x.x.pwp.f$coef['Opn_CZ'],coef(pf.u.x.x.gm.f)['Opn_CZ'])
estv.r.f.O = c(pf.r.o.x.wb.f$coef['O.r2.DoB'],pf.r.o.x.pwp.f$coef['O.r2.DoB'],coef(pf.r.o.x.gm.f)['O.r2.DoB'],
pf.r.x.x.wb.f$coef['O.r2.DoB'],pf.r.x.x.pwp.f$coef['O.r2.DoB'],coef(pf.r.x.x.gm.f)['O.r2.DoB'])
ind = 8
sev.u.f.O=c(sqrt(diag(pf.u.o.x.wb.f$varH))[ind-1],sqrt(diag(pf.u.o.x.pwp.f$varH))[ind-1],pf.u.o.x.gm.f[ind+2,'SE'],
sqrt(diag(pf.u.x.x.wb.f$varH))[ind-2],sqrt(diag(pf.u.x.x.pwp.f$varH))[ind-2],pf.u.x.x.gm.f[ind+1,'SE'])
sev.r.f.O=c(sqrt(diag(pf.r.o.x.wb.f$varH))[ind-1],sqrt(diag(pf.r.o.x.pwp.f$varH))[ind-1],pf.r.o.x.gm.f[ind+2,'SE'],
sqrt(diag(pf.r.x.x.wb.f$varH))[ind-2],sqrt(diag(pf.r.x.x.pwp.f$varH))[ind-2],pf.r.x.x.gm.f[ind+1,'SE'])
mavg.u.O.f = modavgCustom(LLv.u.f,Kv.u.f,mnv.u.f,estv.u.f.O,sev.u.f.O,second.ord=F)
exp(mavg.u.O.f$Mod.avg.est)
exp(mavg.u.O.f$Lower.CL)
exp(mavg.u.O.f$Upper.CL)
mavg.r.O.f = modavgCustom(LLv.r.f,Kv.r.f,mnv.r.f,estv.r.f.O,sev.r.f.O,second.ord=F)
exp(mavg.r.O.f$Mod.avg.est)
exp(mavg.r.O.f$Lower.CL)
exp(mavg.r.O.f$Upper.CL)
|
3f6f0b70298c9ea3559cb9a89f885237e418064a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/PerformanceAnalytics/examples/chart.ACF.Rd.R | e5368bc70c0af427e7e33273fdcc9781d62f9e56 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 235 | r | chart.ACF.Rd.R | library(PerformanceAnalytics)
### Name: chart.ACF
### Title: Create ACF chart or ACF with PACF two-panel chart
### Aliases: chart.ACF chart.ACFplus chart.ACFplus
### ** Examples
data(edhec)
chart.ACFplus(edhec[,1,drop=FALSE])
|
fd05e2832cd31b885dc308bd651b171ac4cac52d | daa9f4956f44861ac9e1e946ba70b80b7961cb2a | /man/lgb.save_model.Rd | 3d73cfccdb3147b10b0362031f8537e1fa9e36e6 | [] | no_license | 6chaoran/suw | 8f0a89003ef5b13916d0da181a8a063667b6d928 | 148220991a578d66cd80360138de76883471b6f4 | refs/heads/master | 2022-11-21T03:31:43.176205 | 2020-07-28T12:27:42 | 2020-07-28T12:27:42 | 277,235,745 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 621 | rd | lgb.save_model.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_lightgbm.R
\name{lgb.save_model}
\alias{lgb.save_model}
\title{lgb.save_model}
\usage{
lgb.save_model(bst, model.dir, model.id = "", verbose = T)
}
\arguments{
\item{bst}{boosting model from \code{lgb.train.cv}}
\item{model.dir}{base path for saving model object and meta-data}
\item{model.id}{identifier for model}
\item{verbose}{whether display saving information, defaults at TRUE}
}
\description{
save trained LGB model together with related meta-data
}
\examples{
\dontrun{
save.lgb.model(bst, './saved_model', 'base_model')
}
}
|
43f36cb55bb24ffa3676a9902696b928bad0100c | 9b7888b0b9ecab83ac55e020d2c59917d6452f39 | /R/translatePattern.R | 9b97cd1c8ea71ec01788cf438391dedb74311512 | [] | no_license | jianhong/ChIPpeakAnno | 703580b9ce6a7708f60d92a78a3714bc9d82a562 | d2136538718c58881a420c9985c53c6e89e223f4 | refs/heads/devel | 2023-08-22T15:29:29.888828 | 2023-07-25T14:57:28 | 2023-07-25T14:57:28 | 186,652,664 | 10 | 6 | null | 2023-09-01T20:48:22 | 2019-05-14T15:41:28 | R | UTF-8 | R | false | false | 1,263 | r | translatePattern.R | #' translate pattern from IUPAC Extended Genetic Alphabet to regular expression
#'
#' translate pattern containing the IUPAC nucleotide ambiguity codes to regular
#' expression. For example,Y->[C|T], R-> [A|G], S-> [G|C], W-> [A|T], K->
#' [T|U|G], M-> [A|C], B-> [C|G|T], D-> [A|G|T], H-> [A|C|T], V-> [A|C|G] and
#' N-> [A|C|T|G].
#'
#'
#' @param pattern a character vector with the IUPAC nucleotide ambiguity codes
#' @return a character vector with the pattern represented as regular
#' expression
#' @author Lihua Julie Zhu
#' @seealso countPatternInSeqs, summarizePatternInPeaks
#' @keywords misc
#' @export
#' @examples
#'
#' pattern1 = "AACCNWMK"
#' translatePattern(pattern1)
#'
translatePattern <- function(pattern)
{
pattern = toupper(pattern)
pattern = gsub("Y","[C|T]", pattern)
pattern = gsub("R", "[A|G]", pattern)
pattern = gsub("S", "[G|C]", pattern)
pattern = gsub("W", "[A|T]", pattern)
pattern = gsub("K", "[T|U|G]", pattern)
pattern = gsub("M", "[A|C]", pattern)
pattern = gsub("B", "[C|G|T]", pattern)
pattern = gsub("D", "[A|G|T]", pattern)
pattern = gsub("H", "[A|C|T]", pattern)
pattern = gsub("V", "[A|C|G]", pattern)
pattern = gsub("N", "[A|C|T|G]", pattern)
pattern
}
|
b3e235bc731597db3f210125017c3f9d045e2515 | 6aee3782cc2969eec39e9bddeddb799e6733f5c2 | /Data608/hw3/Q1/global.R | fb56e29e7acba1b1a366c83208a4eaa7d720a754 | [] | no_license | talham/CUNY | 7ab32980c98044945d32a848552946a1f8c03bff | 9491fad9ece55e66c9da7fb46b11528258a5201b | refs/heads/master | 2023-08-11T10:52:21.237501 | 2023-07-28T23:28:42 | 2023-07-28T23:28:42 | 63,563,053 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,011 | r | global.R | #load the data
#Question 1
#As a researcher, you frequently compare mortality rates from particular causes across
#different States. You need a visualization that will let you see (for 2010 only) the crude
#mortality rate, across all States, from one cause (for example, Neoplasms, which are
#effectively cancers). Create a visualization that allows you to rank States by crude mortality
#for each cause of death.
library(RCurl)
cdc_data<-read.csv(text=getURL("https://raw.githubusercontent.com/charleyferrari/CUNY_DATA608/master/lecture3/data/cleaned-cdc-mortality-1999-2010-2.csv"),header=TRUE,sep=",")
cdc_data$State<-as.factor(cdc_data$State)
cdc_data$ICD.Chapter<-as.character(cdc_data$ICD.Chapter)
colors = c("#F1EEF6", "#D4B9DA", "#C994C7", "#DF65B0", "#DD1C77", "#980043","red3")
cdc_data$colorBuckets <- as.numeric(cut(cdc_data$Crude.Rate, c(4.6,24,50.5,67.3,192.6,249.7,478.4)))
leg.txt <- c("<4.6", ">4.6,<24", ">24,<50.5", "<50.5,<67.3", "<67.3,<192.6", "<192.6,<249.7","<249.7,<478.4") |
aa30e0688acfd1d69ba3a4da7b6fb0f6662830f9 | 0fc113f4f05e6c0cfde50df7b6f3d784bb0e2a6c | /Replication Codes/main_TablesFigures.R | 6e076f263ae7a6f22190d4d78d6763a95893a9de | [] | no_license | gvschweinitz/ES_18_The-Joint-Dynamics-of-Sovereign-Ratings-and-Government-Bond-Yields | 96a0ab927da25a3fa35524ffa30a43c36ad3e94a | 224360fb55f1041b23d579e9619a56704d8190ee | refs/heads/main | 2023-01-24T02:01:15.397095 | 2020-12-01T09:28:57 | 2020-12-01T09:28:57 | 316,429,699 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,917 | r | main_TablesFigures.R | # This script creates all Tables and Figures in
# "The joint dynamics of sovereign ratings and government bond yields"
# Makram El-Shagi and Gregor von Schweinitz, Journal of Banking and Finance (2018)
library("plyr")
wd <- getwd()
load(paste(wd,"/ratingfirst_asym.RData",sep=""))
source(paste(wd,"/estim_smooth_post_bs.R",sep=""))
do.col <- FALSE # TRUE: colored figures
mai.default <- par("mai") # Needed for resetting plot settings
#--------------------------------------------
#SUMMARY STATISTICS
IMFmatch <- matrix("",46,2)
IMFmatch[,1] <- as.character(unique(data$country))
IMFmatch[,2] <- "Advanced"
IMFmatch[c(1,5,7:9,18,19,24,25,29:31,39,43,44),2] <- "Developing"
IMFmatch[c(16,32,35,36),2] <- "Transition"
data$IMFclass <- IMFmatch[pmatch(data$country,IMFmatch[,1],duplicates.ok=TRUE),2]
# Table A2 and A3
sumstat_country <- ddply(data,.(country),summarize,
mindate=min(as.Date(dateid,format="%m/%d/%Y")),maxdate=max(as.Date(dateid,format="%m/%d/%Y")),
meany=mean(lyields,na.rm=TRUE),sdy=sd(lyields,na.rm=TRUE),miny=min(lyields,na.rm=TRUE),maxy=max(lyields,na.rm=TRUE),
meanr=mean(ratings_f_first,na.rm=TRUE),sdr=sd(ratings_f_first,na.rm=TRUE),minr=min(ratings_f_first,na.rm=TRUE),maxr=max(ratings_f_first,na.rm=TRUE),
maxdiff = max(diff(ratings_f_first,lag=12),na.rm=TRUE),mindiff = min(diff(ratings_f_first,lag=12),na.rm=TRUE),sddiff = sd(diff(ratings_f_first,lag=12),na.rm=TRUE))
sumstat_country$text <- paste(sumstat_country$mindate,"-",sumstat_country$maxdate)
# Table 2: Summary statistics by country group
sumstat_group <- data.frame(Indicator = "yields", IMFclass="total sample", mean=mean(data$lyields,na.rm=TRUE),sd=sd(data$lyields,na.rm=TRUE),min=min(data$lyields,na.rm=TRUE),max=max(data$lyields,na.rm=TRUE))
sumstat_group <- rbind(sumstat_group,
cbind(Indicator="yields",ddply(data,.(IMFclass),summarize,
mean=mean(lyields,na.rm=TRUE),sd=sd(lyields,na.rm=TRUE),min=min(lyields,na.rm=TRUE),max=max(lyields,na.rm=TRUE))))
sumstat_group <- rbind(sumstat_group,
cbind(Indicator = "ratings", IMFclass="total sample", mean=mean(data$ratings_f_first,na.rm=TRUE),sd=sd(data$ratings_f_first,na.rm=TRUE),min=min(data$ratings_f_first,na.rm=TRUE),max=max(data$ratings_f_first,na.rm=TRUE)))
sumstat_group <- rbind(sumstat_group,
cbind(Indicator="ratings",ddply(data,.(IMFclass),summarize,
mean=mean(ratings_f_first,na.rm=TRUE),sd=sd(ratings_f_first,na.rm=TRUE),min=min(ratings_f_first,na.rm=TRUE),max=max(ratings_f_first,na.rm=TRUE))))
##############
#SUMMARY PLOTS OF RATING AND YIELD DATA
# Figure 1: Density of yields
pdf("dens_yields.pdf",width=7,height=4)
d <- density(data$ryields,na.rm=TRUE)
plot(d$x,d$y,main="",xlab="Real yields (in per cent p.a.)",ylab="Density",type='l',lwd=2)
dev.off()
# Figure 2: Scatterplot of ratings and yields
pdf("ry_scat.pdf",width=7.5,height=4)
x <- translate_rating(c(6:24))
plot(-data$ratings_f_first,data$ryields,type="p",pch=20,main="",xlab="Ratings",ylab="Yields (in per cent p.a.)",xaxt='n')
abline(h=0)
axis(1,at=-c(6:24),x,cex.axis=1.1)
dev.off()
# Figure 3: Histogram of monthly ratings, by country group
pdf("hist_ratings_bygroup.pdf",width=7,height=4)
par(mai=rep(1.2, 4))
freqall <- ddply(data,.(IMFclass,lgratings),summarise,freq=length(lgratings))
freqall <- freqall[!is.na(freqall[,2]),]
rat <- seq(max(freqall[,2]),min(freqall[,2]),-1)
x <- translate_rating(rat)
freq <- matrix(0,length(rat),4)
freq[,1] <- rat
freq[match(freqall[freqall[,1]=="Advanced",2],freq),2] <- freqall[freqall[,1]=="Advanced",3]
freq[match(freqall[freqall[,1]=="Transition",2],freq),3] <- freqall[freqall[,1]=="Transition",3]
freq[match(freqall[freqall[,1]=="Developing",2],freq),4] <- freqall[freqall[,1]=="Developing",3]
colnames(freq) <- c("Level","Advanced","Transition","Developing")
barplot(height=t(freq[,2:4]),names.arg=x,legend.text=TRUE,args.legend=c(x="topright"),xlab="Ratings",ylab="Frequency",cex.axis=1.1,cex.lab=1.1)
par(mai=mai.default)
dev.off()
# Figure 4: Histogram of monthly rating changes, by country group
pdf("hist_rchanges_bygroup.pdf",width=7,height=4)
d <- ddply(data,.(country),transform,dratings = c(NA,diff(ratings_f_first,lag=1)))
d <- d[,c("IMFclass","dratings")]
d <- d[!is.na(d[,2]),]
d[,2] <- round(d[,2]*3)
d <- d[d[,2]!=0,]
freqall <- ddply(d,.(IMFclass,dratings),summarise,freq=length(dratings))
freqall <- freqall[!is.na(freqall[,2]),]
rat <- c(min(d[,2]):max(d[,2]))
freq <- matrix(0,length(rat),4)
freq[,1] <- rat
freq[match(freqall[freqall[,1]=="Advanced",2],freq),2] <- freqall[freqall[,1]=="Advanced",3]
freq[match(freqall[freqall[,1]=="Transition",2],freq),3] <- freqall[freqall[,1]=="Transition",3]
freq[match(freqall[freqall[,1]=="Developing",2],freq),4] <- freqall[freqall[,1]=="Developing",3]
freq[,1] <- freq[,1]/3
colnames(freq) <- c("Level","Advanced","Transition","Developing")
barplot(height=t(freq[,2:4]),names.arg=freq[,1],legend.text=TRUE,args.legend=c(x="topleft"),xlab="Rating Changes",ylab="Frequency",cex.axis=1.1)
dev.off()
# Figure 5: Event study, yield development before / after rating change
pdf("event_study.pdf",width=7,height=4)
Tdiff <- 12
d <- data[,c("country","dateid","yields_m","dratings0")]
countries <- unique(d[,"country"])
matpos <- matrix(NA,sum(d[,4]>0,na.rm=TRUE),2*Tdiff+1)
matneg <- matrix(NA,sum(d[,4]<0,na.rm=TRUE),2*Tdiff+1)
countpos <- 0
countneg <- 0
for (coun in countries){
print(coun)
temp <- d[d[,"country"]==coun,]
T <- dim(temp)[1]
pos <- which(temp[,4]!=0)
for (i in pos){
vec <- c(rep(NA,max(Tdiff-i+1,0)),temp[max(1,i-Tdiff):min(T,(i+Tdiff)),3],rep(NA,max(i+Tdiff-T,0)))
vec <- vec/vec[Tdiff+1]*100
if (temp[i,4]>0){
countpos <- countpos+1
matpos[countpos,] <- vec
}
else {
countneg <- countneg+1
matneg[countneg,] <- vec
}
}
}
matpos <- matpos[1:countpos,]
matneg <- matneg[1:countneg,]
plotpos <- apply(matpos,2,quantile,c(0.5),na.rm=TRUE)
plotneg <- apply(matneg,2,quantile,c(0.5),na.rm=TRUE)
plotvals <- rbind(plotpos,plotneg)
matplot(c(-Tdiff:Tdiff),t(plotvals),type='l',lty=c(2,3),lwd=c(2,2),col=1,xlab="Months before/after rating change",ylab="normalized yield",cex.axis=1.1)
legend("bottomleft",legend=c("yields around upgrade","yields around downgrade"),lty=c(2,3),lwd=c(2,2),col=1)
dev.off()
# Figure 6: idealized relations of ratings and yields
pdf("ideal_relation_rest.pdf",width=6,height=4)
par(mai=c(rep(0.8, 3),0.25))
x <- seq(-5,2,0.1)
y <- mat.or.vec(length(x),2)
y[,1] <- exp(x)
y[,2] <- 2*exp(x/2)-0.5
if (do.col){
matplot(x,y,type='l',lwd=2,col=c(2,4),xaxt='n',yaxt='n',xlab="",ylab="")
}else{
matplot(x,y,type='l',lwd=2,col=1,xaxt='n',yaxt='n',xlab="",ylab="")
}
title(xlab="Ratings",ylab="Yields",line=1,cex.lab=1)
xtext = c(-2.5,0.8)
ytext = c(0.5,3.2)
stext = c("good","bad")
text(xtext,ytext,stext)
par(mai=mai.default)
dev.off()
#--------------------------------------------
#LONG-RUN RELATIONSHIP
# Table 3 (second column of coeff_y, and coeff_r plus stats_bs)
coeff_y <- cbind(y_jointlambda$res_smooth$coefficients,apply(coeffs_bs1000$tab_y,2,quantile,0.5),colMeans(coeffs_bs1000$tab_y>0))
colnames(coeff_y) <- c("mean_coeff","median_coeff","p>0")
coeff_r <- cbind(r_jointlambda$res_smooth$coefficients,apply(coeffs_bs1000$tab_r,2,quantile,0.5),colMeans(coeffs_bs1000$tab_r>0))
colnames(coeff_r) <- c("mean_coeff","median_coeff","p>0")
y_jointlambda$post_bs <- estim_smooth_post_bs(y_jointlambda,c(coeff_y[,2]),oprob=FALSE)
r_jointlambda$post_bs <- estim_smooth_post_bs(r_jointlambda,c(coeff_r[,2]),oprob=TRUE)
stats_bs <- rbind(lambda=c(y_jointlambda$lambda,r_jointlambda$lambda),
coefficients = c(y_jointlambda$k,r_jointlambda$k),
LL_data=c(y_jointlambda$post_bs$LL_out$LL_data,r_jointlambda$post_bs$LL_out$LL_data),
LL_smooth=c(y_jointlambda$post_bs$LL_out$LL_smooth,r_jointlambda$post_bs$LL_out$LL_smooth),
R2=c(y_jointlambda$post_bs$R2,r_jointlambda$post_bs$R2),
R2adj=c(y_jointlambda$post_bs$R2adj,r_jointlambda$post_bs$R2adj),
BIC=c(y_jointlambda$post_bs$BIC,r_jointlambda$post_bs$BIC),
AIC=c(y_jointlambda$post_bs$AIC,r_jointlambda$post_bs$AIC))
# Table 4
time_taken_medy <- t(as.matrix(apply(time_taken_yshock,2,median)/12))
# Note: Figure 7 is plotted as part of main_estimation.R
#--------------------------------------------
#IRF PLOTS
# Figures 8 and 9: Median IRFs after two-notch downgrade from different starting values
pos <- c(1:5,11,14,17)
# 8 curves per plot
if (do.col){
# differentiate by color
reps <- ceiling(length(pos)/8)
col <- rep(c(1:8),reps)
type <- sort(rep(1:reps,8))
pch <- rep(NA,length(pos))
col.scen <- c(1,4,4,4,4,4)
lty.scen <- c(1,1,2,2,3,3)
}else{
print("b/w plots only work for 8 curves")
col <- 1
type <- rep(1:4,2)
col.points <- 1:4
pch <- c(1:4,rep(NA,4))
point.pos <- seq(2,122,10)
col.scen <- 1
lty.scen <- c(6,1,2,2,3,3)
}
pdf("IRF_y_lim.pdf",width=10.5,height=7.5)
matplot(1:122,t(IRF_eqyields_down$IRF_mult_med_y[pos,]),xaxt="n",type="l",col=col,lty=type,lwd=2,xlab="Months",ylab="Yields",main="",cex.axis=1.1,cex.lab=1.2)
if (!do.col){for (k in col.points){points(point.pos,t(IRF_eqyields_down$IRF_mult_med_y[pos[k],point.pos]),pch=pch[k],lwd=2)}}
axis(1,at=seq(2,122,12),labels = seq(0,120,12),cex.axis=1.1)
legend("topright",legend=IRF_eqyields_down$text[pos],col=col,lty=type,pch = pch,lwd=2)
dev.off()
pdf("IRF_r_lim.pdf",width=10.5,height=7.5)
matplot(1:122,t(IRF_eqyields_down$IRF_mult_med_r[pos,]),xaxt="n",type="l",col=col,lty=type,lwd=2,xlab="Months",ylab="Ratings",main="",cex.axis=1.1,cex.lab=1.2)
if (!do.col){
for (k in col.points){
vals.all <- c(IRF_eqyields_down$IRF_mult_med_r[pos[k],])
p.add <- which(diff(vals.all)!=0)
vals.add <- apply(matrix(p.add),1,FUN=function(x){mean(vals.all[x:(x+1)])})
points(c(point.pos,p.add+0.5),c(vals.all[point.pos],vals.add),pch=pch[k],lwd=2)
}
}
axis(1,at=seq(2,122,12),labels = seq(0,120,12),cex.axis=1.1)
legend("topleft",legend=IRF_eqyields_down$text[pos],col=col,pch = pch,lty=type,lwd=2)
dev.off()
########
# Figures 10 and 11: Scenario plots
scen_names <- ls()[grep("scen_",ls())]
scen_names <- c("scen_ITA","scen_GRE_I")
for (i in 1:length(scen_names)){
pdf(paste(scen_names[i],".pdf",sep=""),width=10.5,height=5.2)
print_lvl <- c(0.05,0.95)
scen <- get(scen_names[i])
periods <- length(scen$scen$obs_yield)
xtext <- seq(as.Date(paste(scen$year,"/",scen$month,"/1",sep="")),by="month",length.out=periods)
pos_xtext <- which(format(xtext,"%m")=="01")
text <- c("Observed development","IRF",
paste(print_lvl[1],"conf without shocks"),paste(print_lvl[2],"conf without shocks"),
paste(print_lvl[1],"conf with shocks"),paste(print_lvl[2],"conf with shocks"))
text_exp <- paste("Scenario analysis for",scen$country,"in",xtext[scen$periods_start])
pos_lvl <- charmatch(print_lvl,scen$conf_lvls)
yields <- cbind(scen$scen$obs_yield,scen$IRF_med_y[1:periods],
scen$IRF_conf_med_y[1:periods,pos_lvl],
scen$IRF_conf_shock_y[1:periods,pos_lvl])
ratings <- cbind(scen$scen$obs_ratings,scen$IRF_med_r[1:periods],
scen$IRF_conf_med_r[1:periods,pos_lvl],
scen$IRF_conf_shock_r[1:periods,pos_lvl])
layout(matrix(c(1,2,3,3),ncol=2, byrow = TRUE), heights=c(4, 1.2))
par(mai=c(1,1,1,0.3))
matplot(x=c(1:periods),y=yields,type ="l",main="(a)",xaxt="n",xlab = "Months",ylab = "Yields",lwd=c(2,2,1,1,1,1),col = col.scen,lty = lty.scen,cex.axis=1.1,cex.lab=1.2)
axis(1,at=pos_xtext,labels=format(xtext[pos_xtext],"%Y"))
abline(v=scen$periods_start)
matplot(x=c(1:periods),y=ratings,type ="l",main="(b)",xaxt="n",xlab = "Months",ylab = "Ratings",lwd=c(2,2,1,1,1,1),col = col.scen,lty = lty.scen,cex.axis=1.1,cex.lab=1.2)
axis(1,at=pos_xtext,labels=format(xtext[pos_xtext],"%Y"))
abline(v=scen$periods_start)
par(mai=c(0.5,0,0,0))
plot.new()
legend(x="center", ncol=2,legend=text,lwd=c(2,2,1,1,1,1),col = col.scen,lty = lty.scen)
mtext(text_exp, side = 1, line = 0.5, outer = FALSE)
par(mai=mai.default)
dev.off()
}
##########
# Figure A1: IRF plots- median + conf for multiple rating shocks
posvec <- c(2,4,17)
print_lvl <- c(0.05,0.95)
pos_lvl <- charmatch(print_lvl,IRF_eqyields_down$conf_lvls)
# main <- c("(a)","(b)","(c)")
text <- c("Median IRF",paste(round(print_lvl[1]*100,0),"% confidence"),paste(round(print_lvl[2]*100,0),"% confidence"))
pdf("IRF_conf.pdf",width=10,height=10)
layout(matrix(c(c(1:6),7,7),ncol=2, byrow = TRUE), heights=c(3,3,3,1))
par(mai=c(0.7,0.8,0.5,0.3))
for (i in 1:length(posvec)){
pos <- posvec[i]
main <- paste(IRF_eqyields_down$text[pos],c(", Yields",", Ratings"))
y_y <- cbind(IRF_eqyields_down$IRF_mult_med_y[pos,],IRF_eqyields_down$IRF_mult_conf_y[pos,,pos_lvl])
matplot(x=c(1:122),y=y_y,xaxt="n",type ="l",main=main[1],xlab = "Months",ylab = "Yields",lwd=c(2,1,1),col=1,lty=c(1,2,2),cex.lab=1.2)
axis(1,at=seq(2,122,12),labels = seq(0,120,12))
abline(a=0,b=0)
y_r <- cbind(IRF_eqyields_down$IRF_mult_med_r[pos,],IRF_eqyields_down$IRF_mult_conf_r[pos,,pos_lvl])
matplot(x=c(1:122),y=y_r,xaxt="n",type ="l",main=main[2],xlab = "Months",ylab = "Ratings",lwd=c(2,1,1),col=1,lty=c(1,2,2),ylim=c(-3,1),cex.lab=1.2)
axis(1,at=seq(2,122,12),labels = seq(0,120,12))
abline(a=0,b=0)
}
par(mai=c(0,0,0,0))
plot.new()
legend(x="center", ncol=3,legend=text,lwd=c(2,1,1),col=1,lty=c(1,2,2),cex=1.2)
par(mai=mai.default)
dev.off()
##########
# Figures A2 and A3: IRF plots different shock scenarios
pos <- c(1:5,11,14,17)
for (k_scen in 1:2){
if (k_scen==1){
pdf("IRF_staggered.pdf",width=11,height=6.2)
temp <- IRF_eqyields_staggered
}else{
pdf("IRF_up.pdf",width=11,height=6.2)
temp <- IRF_eqyields_up
}
layout(matrix(c(1,2,3,3),ncol=2, byrow = TRUE), heights=c(5, 1.2))
par(mai = c(0.8,0.8,1,0.2))
T <- dim(temp$IRF_mult_med_y)[2]
T.point <- seq(T%%12,T,12)
matplot(x=c(1:T),y=t(temp$IRF_mult_med_y[pos,]),xaxt="n",type ="l",main="IRF yields",xlab = "Months",ylab = "Yields",lwd=1,col=col,lty=type,cex.lab=1.2)
if (!do.col){for (k in col.points){points(T.point,t(temp$IRF_mult_med_y[pos[k],T.point]),pch=pch[k])}}
axis(1,at=seq(T%%12,T,12),labels = seq(0,T-T%%12,12),cex.axis=0.8)
abline(a=0,b=0)
matplot(x=c(1:T),y=t(temp$IRF_mult_med_r[pos,]),xaxt="n",type ="l",main="IRF ratings",xlab = "Months",ylab = "Ratings",lwd=1,col=col,lty=type,cex.lab=1.2)
if (!do.col){
for (k in col.points){
vals.all <- c(temp$IRF_mult_med_r[pos[k],])
p.add <- which(diff(vals.all)!=0)
vals.add <- apply(matrix(p.add),1,FUN=function(x){mean(vals.all[x:(x+1)])})
points(c(T.point,p.add+0.5),c(vals.all[T.point],vals.add),pch=pch[k],lwd=1)
}
}
axis(1,at=seq(T%%12,T,12),labels = seq(0,T-T%%12,12),cex.axis=0.8)
abline(a=0,b=0)
par(mai=c(0.5,0,0,0))
plot.new()
legend(x="center",ncol=4,legend=paste(temp$text[pos]," "),col=col,pch = pch,lty=type,lwd=1,cex=0.8)
par(mai=mai.default)
dev.off()
rm(temp,T,T.point)
}
###################
# Figures A4 and A5: IRFs to robustness check with median ratings
rm(list=ls())
wd <- getwd()
load(paste(wd,"/medratingfirst_asym.RData",sep=""))
do.col <- FALSE
# 8 curves per plot
pos <- c(1:5,11,14,17)
if (do.col){
# differentiate by color
col <- c(1:8)
type <- 1
pch <- rep(NA,8)
}else{
col <- 1
type <- rep(1:4,2)
col.points <- 1:4
pch <- c(1:4,rep(NA,4))
point.pos <- seq(2,122,10)
}
pdf("IRF_y_lim_med.pdf",width=10.5,height=7.5)
matplot(1:122,t(IRF_eqyields_down$IRF_mult_med_y[pos,]),xaxt="n",type="l",col=col,lty=type,lwd=2,xlab="Months",ylab="Yields",main="",cex.axis=1.1,cex.lab=1.2)
if (!do.col){for (k in col.points){points(point.pos,t(IRF_eqyields_down$IRF_mult_med_y[pos[k],point.pos]),pch=pch[k],lwd=2)}}
axis(1,at=seq(2,122,12),labels = seq(0,120,12),cex.axis=1.1)
legend("topright",legend=IRF_eqyields_down$text[pos],col=col,lty=type,pch = pch,lwd=2)
dev.off()
pdf("IRF_r_lim_med.pdf",width=10.5,height=7.5)
matplot(1:122,t(IRF_eqyields_down$IRF_mult_med_r[pos,]),xaxt="n",type="l",col=col,lty=type,lwd=2,xlab="Months",ylab="Ratings",main="",cex.axis=1.1,cex.lab=1.2)
if (!do.col){
for (k in col.points){
vals.all <- c(IRF_eqyields_down$IRF_mult_med_r[pos[k],])
p.add <- which(diff(vals.all)!=0)
vals.add <- apply(matrix(p.add),1,FUN=function(x){mean(vals.all[x:(x+1)])})
points(c(point.pos,p.add+0.5),c(vals.all[point.pos],vals.add),pch=pch[k],lwd=2)
}
}
axis(1,at=seq(2,122,12),labels = seq(0,120,12),cex.axis=1.1)
legend("topleft",legend=IRF_eqyields_down$text[pos],col=col,pch = pch,lty=type,lwd=2)
dev.off() |
0797cb403d5c3094cd6f76b32dc391b9f3042a6d | 2d34708b03cdf802018f17d0ba150df6772b6897 | /googleadexchangebuyerv14.auto/man/marketplacedeals.update.Rd | 323042bd03bd188273838e8344028deaacd4fcc6 | [
"MIT"
] | permissive | GVersteeg/autoGoogleAPI | 8b3dda19fae2f012e11b3a18a330a4d0da474921 | f4850822230ef2f5552c9a5f42e397d9ae027a18 | refs/heads/master | 2020-09-28T20:20:58.023495 | 2017-03-05T19:50:39 | 2017-03-05T19:50:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,081 | rd | marketplacedeals.update.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adexchangebuyer_functions.R
\name{marketplacedeals.update}
\alias{marketplacedeals.update}
\title{Replaces all the deals in the proposal with the passed in deals}
\usage{
marketplacedeals.update(EditAllOrderDealsRequest, proposalId)
}
\arguments{
\item{EditAllOrderDealsRequest}{The \link{EditAllOrderDealsRequest} object to pass to this method}
\item{proposalId}{The proposalId to edit deals on}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/adexchange.buyer
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/adexchange.buyer)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/ad-exchange/buyer-rest}{Google Documentation}
Other EditAllOrderDealsRequest functions: \code{\link{EditAllOrderDealsRequest}}
}
|
0d2e403a914de59897fa53f45b013b39c16d255b | af545d1594c0aca08e84a6bc2742df962521695e | /Chapter 6/Exercise 6.40.r | 195edb8671af420ea7f7b2038de1f78fd4d69309 | [] | no_license | kmahoski/Statistical-Data-Analysis-in-R | 9da43ae5339d568cd98f19b8c8ad1c472f5c98b2 | 309f85c1284e4691e5670172f2619b75a292dd12 | refs/heads/master | 2021-01-22T11:37:36.858625 | 2014-11-10T21:53:17 | 2014-11-10T21:53:17 | 25,942,516 | 1 | 0 | null | 2014-11-10T21:53:17 | 2014-10-29T21:35:08 | R | UTF-8 | R | false | false | 238 | r | Exercise 6.40.r | grades <- c(
94, 90, 92, 91, 91, 86, 89, 91, 91, 90, 90,
93, 87, 90, 91, 92, 89, 86, 89, 90, 88, 95,
91, 88, 89, 92, 87, 89, 95, 92, 85, 91, 85,
89, 88, 84, 85, 90, 90, 83)
hist(grades, breaks = 10, freq = FALSE, main = "Histogram")
|
98c62a1f18267cd2db06b00c723e030687bf30cd | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/jointseg/examples/PSSeg.Rd.R | 279d9fee9b94266c4ef7da321cced5df2d5ec22d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 620 | r | PSSeg.Rd.R | library(jointseg)
### Name: PSSeg
### Title: Parent-Specific copy number segmentation
### Aliases: PSSeg
### ** Examples
## load known real copy number regions
affyDat <- acnr::loadCnRegionData(dataSet="GSE29172", tumorFraction=0.5)
## generate a synthetic CN profile
K <- 10
len <- 1e4
sim <- getCopyNumberDataByResampling(len, K, regData=affyDat)
datS <- sim$profile
## run binary segmentation (+ dynamic programming)
resRBS <- PSSeg(data=datS, method="RBS", stat=c("c", "d"), K=2*K, profile=TRUE)
resRBS$prof
getTpFp(resRBS$bestBkp, sim$bkp, tol=5)
plotSeg(datS, breakpoints=list(sim$bkp, resRBS$bestBkp))
|
62053902b1a7734c2055b22db9ff106c6c37c0ba | 28d72a611b0ed6a056eb19384fad1b34177e84cc | /R/tidysig_contexts_and_palettes.R | e2fa5bdc1ee0e259895cb8c379425c2140fdd8b7 | [
"MIT"
] | permissive | edawson/tidysig | 2cec0a509a5b8a4eb94b8c5e249426ac5406ca81 | 15d02819f64779b31db986875728b60fd3b3946c | refs/heads/master | 2021-07-22T02:10:09.320220 | 2020-04-28T19:24:46 | 2020-04-28T19:24:46 | 243,092,054 | 4 | 1 | MIT | 2020-04-28T18:26:52 | 2020-02-25T20:08:17 | R | UTF-8 | R | false | false | 2,143 | r | tidysig_contexts_and_palettes.R | #' A list of the SBS96 mutational contexts
#'
sbs_96_contexts <- c("ACA","ACC","ACG","ACT","ACA","ACC",
"ACG","ACT", "ACA","ACC","ACG","ACT",
"ATA","ATC","ATG","ATT","ATA","ATC",
"ATG","ATT","ATA","ATC","ATG","ATT",
"CCA","CCC","CCG","CCT","CCA","CCC",
"CCG","CCT","CCA","CCC","CCG","CCT",
"CTA","CTC","CTG","CTT","CTA","CTC",
"CTG","CTT","CTA","CTC","CTG","CTT",
"GCA","GCC","GCG","GCT","GCA","GCC",
"GCG","GCT","GCA","GCC","GCG","GCT",
"GTA","GTC","GTG","GTT","GTA","GTC",
"GTG","GTT","GTA","GTC","GTG","GTT",
"TCA","TCC","TCG","TCT","TCA","TCC",
"TCG","TCT","TCA","TCC","TCG","TCT",
"TTA","TTC","TTG","TTT","TTA","TTC",
"TTG","TTT","TTA","TTC","TTG","TTT")
#' A list of the six possible (normalized) SBS96 mutational changes
sbs_96_changes <- c("C>A","C>G","C>T",
"T>A","T>C","T>G")
#' A color palette for SBS96 changes. Matches that of SigProfilerPlotting
sbs_96_changes_colors <- c(rgb(3/256,189/256,239/256),
rgb(1/256,1/256,1/256),
rgb(228/256,41/256,38/256),
rgb(203/256,202/256,202/256),
rgb(162/256,207/256,99/256),
rgb(236/256,199/256,197/256))
#' A color palette for ID83 changes. Matches that of SigProfilerPlotting
id_83_colors <- c(rgb(253/256,190/256,111/256), rgb(255/256,128/256,2/256), rgb(176/256,221/256,139/256), rgb(54/256,161/256,46/256),
rgb(253/256,202/256,181/256), rgb(252/256,138/256,106/256), rgb(241/256,68/256,50/256), rgb(188/256,25/256,26/256),
rgb(208/256,225/256,242/256), rgb(148/256,196/256,223/256), rgb(74/256,152/256,201/256), rgb(23/256,100/256,171/256),
rgb(226/256,226/256,239/256), rgb(182/256,182/256,216/256), rgb(134/256,131/256,189/256), rgb(98/256,64/256,155/256))
|
854cb1de7fc8cde35a088c647921ae9e1f068a53 | 38b32e829e7325e446d2a72ccf846cd03de76b4b | /R/funcs.R | b17eb4649dcab912d757b2206ea39d1cf07886cb | [] | no_license | fawda123/Pteropod_biomarker | b638e452f987d09592270f481c829d467f37c308 | e132f3613627105a9a210db80572569ee376e2a4 | refs/heads/master | 2021-08-29T00:51:13.625532 | 2021-08-10T22:50:52 | 2021-08-10T22:50:52 | 123,340,327 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,999 | r | funcs.R | # function for formatting p-values in tables
p_ast <- function(x){
sig_cats <- c('**', '*', 'ns')
sig_vals <- c(-Inf, 0.005, 0.05, Inf)
out <- cut(x, breaks = sig_vals, labels = sig_cats, right = FALSE)
out <- as.character(out)
return(out)
}
# vif function
vif_func<-function(in_frame,thresh=10,trace=T,...){
library(fmsb)
if(any(!'data.frame' %in% class(in_frame))) in_frame<-data.frame(in_frame)
#get initial vif value for all comparisons of variables
vif_init<-NULL
var_names <- names(in_frame)
for(val in var_names){
regressors <- var_names[-which(var_names == val)]
form <- paste(regressors, collapse = '+')
form_in <- formula(paste(val, '~', form))
vif_init<-rbind(vif_init, c(val, VIF(lm(form_in, data = in_frame, ...))))
}
vif_max<-max(as.numeric(vif_init[,2]), na.rm = TRUE)
if(vif_max < thresh){
if(trace==T){ #print output of each iteration
prmatrix(vif_init,collab=c('var','vif'),rowlab=rep('',nrow(vif_init)),quote=F)
cat('\n')
cat(paste('All variables have VIF < ', thresh,', max VIF ',round(vif_max,2), sep=''),'\n\n')
}
return(var_names)
}
else{
in_dat<-in_frame
#backwards selection of explanatory variables, stops when all VIF values are below 'thresh'
while(vif_max >= thresh){
vif_vals<-NULL
var_names <- names(in_dat)
for(val in var_names){
regressors <- var_names[-which(var_names == val)]
form <- paste(regressors, collapse = '+')
form_in <- formula(paste(val, '~', form))
vif_add<-VIF(lm(form_in, data = in_dat, ...))
vif_vals<-rbind(vif_vals,c(val,vif_add))
}
max_row<-which(vif_vals[,2] == max(as.numeric(vif_vals[,2]), na.rm = TRUE))[1]
vif_max<-as.numeric(vif_vals[max_row,2])
if(vif_max<thresh) break
if(trace==T){ #print output of each iteration
prmatrix(vif_vals,collab=c('var','vif'),rowlab=rep('',nrow(vif_vals)),quote=F)
cat('\n')
cat('removed: ',vif_vals[max_row,1],vif_max,'\n\n')
flush.console()
}
in_dat<-in_dat[,!names(in_dat) %in% vif_vals[max_row,1]]
}
return(names(in_dat))
}
}
# Data to plot for effects
#
# modin lm model
# cvar chr string of variable to hold constant
# pos is where the labels are, left or right of effects line
# fct is scaling factor for labels from end of lines
get_pldat <- function(modin, cvar, pos = c('left', 'right'), fct = NULL){
pos <- match.arg(pos)
# crossing of model data by range
x <- modin$model %>%
.[, -1] %>%
data.frame %>%
as.list %>%
map(range) %>%
map(function(x) seq(x[1], x[2], length = 100))
# quantiles for cvar
x[[cvar]] <- modin$model[[cvar]]%>% quantile(., c(0, 1))
# make data frame
nms <- names(x)
x <- crossing(x[[1]], x[[2]])
names(x) <- nms
x <- x[, c(names(x)[!names(x) %in% cvar], cvar)]
# get predictions, combine with exp vars
prd_vl <- predict(modin, newdata = x, se = T) %>%
data.frame(., x) %>%
dplyr::select(-df, -residual.scale) %>%
mutate(
hi = fit + se.fit,
lo = fit - se.fit
)
names(prd_vl)[1] <- all.vars(formula(modin))[1]
# min x axis values for quantile labels
yvar <- names(prd_vl)[1]
xvar <- all.vars(formula(modin))
xvar <- xvar[!xvar %in% c(yvar, cvar)]
locs <- prd_vl %>%
group_by(.dots = list(cvar))
if(pos == 'right'){
if(is.null(fct)) fct <- 1.05
locs <- filter(locs, row_number() == n())
} else {
if(is.null(fct)) fct <- 0.95
locs <- filter(locs, row_number() == 1)
}
yval <- locs[[yvar]]
xval <- locs[[xvar]] %>% unique %>% `*`(fct)
xlab <- data.frame(
lab = c('Max', 'Min'),
x = xval, y = yval,
stringsAsFactors = F)
dr <- locs[[cvar]] %>% range %>% diff %>% sign
if(dr == 1) xlab$lab <- rev(xlab$lab)
# output
out <- list(prd_vl = prd_vl, xlab = xlab)
return(out)
} |
9cd3916fd5ac1139ad24a41231c0fce182da1226 | e15b42c221e5dc8f5daf1e3d61233c453512950b | /regex_program.R | 006d76033129d4ca3d36a2bcba3659a6deccdc37 | [] | no_license | r3sult/blogpost_codes | 4c62f1b8ae5243583f702b717a759214e386862f | 66643265bf9a766b641e39f712076f3753584417 | refs/heads/master | 2020-12-23T07:38:34.277606 | 2019-12-09T19:19:01 | 2019-12-09T19:19:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 344 | r | regex_program.R | # install.packages("devtools")
#devtools::install_github("VerbalExpressions/RVerbalExpressions")
library(RVerbalExpressions)
strings = c('123Abdul233','233Raja434','223Ethan Hunt444')
expr = rx_alpha() %>% rx_word() %>% rx_alpha()
stringr::str_extract_all(strings,expr)
expr = rx_digit()
stringr::str_extract_all(strings,expr)
|
0feac5f6631e7e20e475b79c56b1d2b2563db76e | 050560ef74831a7e07d4838ef452c75498c7df64 | /plot4.R | da4e04a8cb7819fd9f4fa16dfc92177a272f220a | [] | no_license | Mehwishmanzur/ExData_Plotting1 | 8ac87fb44fd135d584a79e75e599493093ee532b | e53e7711e149a1b0804aadfd9596f1f341882ae1 | refs/heads/master | 2022-11-13T23:06:55.601839 | 2020-07-04T23:47:19 | 2020-07-04T23:47:19 | 277,170,796 | 0 | 0 | null | 2020-07-04T19:11:45 | 2020-07-04T19:11:44 | null | UTF-8 | R | false | false | 1,588 | r | plot4.R | # Reading and subsetting power consumption data
mydata <- read.table("household_power_consumption.txt", sep = ";", header = TRUE)
# Extracting data from 2007-02-01 to 2007-02-02
subdata <- subset(mydata, mydata$Date== "1/2/2007" | mydata$Date =="2/2/2007")
# Extracting the missing values
globalActivePower <- as.numeric(subdata$Global_active_power)
globalReactivePower <- as.numeric(subdata$Global_reactive_power)
voltage <- as.numeric(subdata$Voltage)
# Transforming the Date and Time vars from characters into objects of type Date and POSIXlt respectively
datetime <- strptime(paste(subdata$Date, subdata$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
# Extracting missing values from submetering data
subMetering1 <- as.numeric(subdata$Sub_metering_1)
subMetering2 <- as.numeric(subdata$Sub_metering_2)
subMetering3 <- as.numeric(subdata$Sub_metering_3)
# Creating png file
png("plot4.png", width=480, height=480)
# initiating a composite plot with many graphs
par(mfrow = c(2, 2))
# Calling the basic plot function
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power", cex=0.2)
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(datetime, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
8a0e27b0a83dc9dd1ed1a1d526a7404e85281a4b | 167bf75c84a25eba1bb7c64fc35ceaacb1cce833 | /Scripts/poster/Script-Simulação-TCMM.R | 0b0905a381c7ae59e3c6067c148297b68682884f | [] | no_license | AndrMenezes/mcp | f03c6c48483536e1d19650e5570cf15419c4441d | f8a3ba4cd713181238ce7a38381d57150fa1f285 | refs/heads/master | 2020-05-17T04:20:59.148064 | 2019-04-25T20:51:49 | 2019-04-25T20:51:49 | 183,506,331 | 1 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 4,334 | r | Script-Simulação-TCMM.R | rm(list = ls())
options(digits=22)
# Bibliotecas -------------------------------------------------------------
pacotes = c("PMCMR", "plyr", "agricolae", "mutoss" ,"doParallel", "foreach")
sapply(pacotes, require, character.only=T)
# Functions-----------------------------------------------------------------
ngroup<-3
nteste<-10
p<-matrix(integer(ngroup*nteste), ncol = nteste)
valor.p<-function(n,tt){
grp<-factor(rep(1:ngroup, each = n))
mod<-aov(tt ~ grp)
p[,1]<-LSD.test(mod,"grp",p.adj="none",group=F,console = F)$comparison[,2]
p[,2]<-as.vector(na.exclude(as.vector(pairwise.t.test(tt,grp,p.adjust.method = "bonferroni")$p.value)))
p[,3]<-TukeyHSD(mod)$grp[,4]
p[,4]<-SNK.test(mod,"grp",group=F,console = F)$comparison[,2]
p[,5]<-duncan.test(mod,"grp",group=F,console = F)$comparison[,2]
p[,6]<-scheffe.test(mod,"grp",group=F,console = F)$comparison[,2]
# p[,7]<-as.numeric(as.vector(regwq(tt~grp, data=data.frame(tt,grp), alpha=0.05,MSE=NULL, df=NULL, silent = TRUE)$adjPValue))[c(3,1,2)]
p[,7]<-as.vector(na.exclude(as.vector(posthoc.kruskal.nemenyi.test(x=tt, g=grp, dist="Chisquare")$p.value)))
p[,8]<-as.vector(na.exclude(as.vector(posthoc.kruskal.dunn.test(x=tt, g=grp, p.adjust.method="bonferroni")$p.value)))
p[,9]<-as.vector(na.exclude(as.vector(posthoc.kruskal.conover.test(x=tt, g=grp, p.adjust.method="bonferroni")$p.value)))
p[,10]<-as.vector(na.exclude(as.vector(posthoc.vanWaerden.test(x=tt, g=grp, p.adjust.method="bonferroni")$p.value)))
return(p)
}
names_test<-c("LSD","t-Bonferroni","Tukey","SNK","Duncan","Scheffé","Nemenyi","Dunn","Conover", "vanWaerden")
B <- 5000
ni <- c(2, 3, 5, 10, 20)
mu <- 0
mu.a <- -6:6
sd <- c(1, 2)
x6 <- array(dim = c(ngroup,nteste,length(ni),length(sd),length(mu.a),B))
dimnames(x6) <- list(c("2-1", "3-1", "3-2"),names_test,
paste("n = ", ni),paste("sd = ", sd),
paste("mu = ", mu.a), paste("sim",1:B))
# Cria cluster e registra conforme o número of CPU cores.
nodes <- detectCores()
cl <- makeCluster(nodes)
registerDoParallel(cl)
inicio <- proc.time()
set.seed(1502)
foreach(i=1:length(sd)) %do% {
dados_xy <- rnorm(max(ni)*2*B,mean = mu, sd=sd[i])
foreach(j=1:length(mu.a)) %do% {
dados <- c(dados_xy,rnorm(max(ni)*B, mean = mu.a[j], sd=sd[i]))
mat <- cbind(as.data.frame(matrix(dados, nrow=3*max(ni), ncol=B, byrow=T)),
grupo=rep(1:ngroup, each=max(ni)))
foreach(k=1:length(ni)) %do% {
x3 <- ddply(mat, .(grupo), function(u) u[1:ni[k],])[,-(B+1)]
# exporta para os cluster a variável a ser trabalhada
# sem esse comando os clusters não reconhecem a variável direto do global env.
clusterExport(cl, varlist = c("x3", "ngroup", "k", "ni", "B", "nteste", "LSD.test",
"p", "SNK.test", "duncan.test", "scheffe.test","valor.p",
"posthoc.kruskal.nemenyi.test", "posthoc.kruskal.dunn.test",
"posthoc.vanWaerden.test","posthoc.kruskal.conover.test"))
system.time(x6[,,k,i,j,] <- array(parSapply(cl, x3, function(u) valor.p(n=ni[k], tt=u), simplify = "array"),c(ngroup, nteste, B)))[3]
cat(i, j, k, "\n")
}
}
}
(fim <- proc.time() - inicio)
stopCluster(cl) # fecha o cluster
# Poder do teste
poder<-apply(apply(x6, 1:6, function(j) sum(j < 0.05))[-1,,,,,], 2:5, function(k) mean(k!=0))
# Erro Tipo I por cexperimento (familywise)
alpha<-apply(apply(x6, 2:6, function(j) sum(j < 0.05)), 1:4, function(k) mean(k!=0))
# Salvando valores-p ------------------------------------------------------
saveRDS(x6, "Simulations_pvalues.rds")
# Salvando poder ----------------------------------------------------------
dados1<-adply(poder,1:4)
colnames(dados1)<-c("Teste","n","sd","mu" ,"Poder")
levels(dados1$Teste)<-names_test
levels(dados1$mu)<-mu.a
levels(dados1$n)<-ni
levels(dados1$sd)<-sd
write.table(dados1,file="poder-TCMM.csv",sep = ";",row.names = F)
# Salvando alpha (tamanho estimado) ---------------------------------------
dados2<-adply(alpha,1:4)
colnames(dados2)<-c("Teste","n","sd","mu" ,"Tamanho")
levels(dados2$Teste)<-names_test
levels(dados2$mu)<-mu.a
levels(dados2$n)<-ni
levels(dados2$sd)<-sd
dados2<-subset(dados2, mu==0)
write.table(dados2,file="errotipoI-TCMM.csv",sep = ";",row.names = F)
|
cc2ebf28d4705c9e62a3a036dbd0985b3b058d0b | 723dd688a43484c81621e4a51090bfe0c35d894e | /Src/gka_pvalues_test2.R | 8e088d7a8cb51cc44d9c8526ae46c8f3f67a4900 | [] | no_license | XingLLiu/Stats-Modelling-for-Streaming-Data | f3cab8cd97377e3111a8e57f70d57682e5a1df99 | 3b91b4fd420671a29f9957a8e69757e7855aee57 | refs/heads/master | 2020-03-24T18:01:20.924329 | 2019-11-03T11:54:26 | 2019-11-03T11:54:26 | 142,879,584 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,895 | r | gka_pvalues_test2.R | alpha <- 0.05
F <- 'qnorm'
h.u <- qchisq(0.99, 2)
h.l <- qchisq(0.95, 2)
learn.time <- 200
count.limit <- 5
phase.no <- 2
new.phase <- 0
epsilon <- 2/N
gka.summary <- data.frame(matrix(NA, ncol=3, nrow=1))
colnames(gka.summary) <- c('v','g','delta')
gka.summary.temp <- data.frame(matrix(NA, ncol=3, nrow=1))
colnames(gka.summary.temp) <- c('v','g','delta')
gka.summary.total <- list()
for (i in 1:phase.no){
gka.summary.total[[i]] <- gka.summary
}
s <- 0
n.mat <- matrix(0,ncol=phase.no,nrow=1)
p.mat <- matrix(0,ncol=1,nrow=N)
gka.switch <- 0
gka.count <- matrix(0,ncol=phase.no,nrow=1)
sum.index <- 1
for (n in 0:(length(response)-1)){
if (n >learn.time){
# Compute p-values
y <-response[n+1]
gka.summary <- gka.summary.total[[1]]
s <- nrow(gka.summary)
# Find max. index j such that Y_j <= Y
j <- which(gka.summary[,1] > y)[1] - 1
if (is.na(j)){
j <- s
}
# Find the approximate index of Y_j in the stream
if (j != 0){
i_j.max <- sum(gka.summary[1:j,2]) + gka.summary[j,3]
i_j.hat <- i_j.max
}
else {
i_j.hat <- 0
}
# p-value
# n.base = sample size based on the baseline distribution
n.base <- sum(gka.summary[,2])
p.val <- min(1 - i_j.hat/n.base, i_j.hat/n.base)
p.mat[(n+1),] <- -2*log(p.val)#p.val
# Identify if a change in state has occurred
if (p.mat[(n+1),] > h.u && gka.switch == 0){
gka.count[1] <- gka.count[1] + 1
gka.count[-1] <- 0
if (gka.count[1] == count.limit){
start.time <- n+1-count.limit
gka.switch <- 1
gka.count[1] <- 0
print(paste('Started:', start.time))
sum.index <- 2
new.phase <- 1
}
}
else if (p.mat[(n+1),] < h.l && gka.switch == 1){
gka.count[2] <- gka.count[2] + 1
gka.count[-2] <- 0
if (gka.count[2] == count.limit){
end.time <- n+1-count.limit
gka.switch <- 0
gka.count[2] <- 0
print(paste('Ended:', end.time))
sum.index <- 1
new.phase <- 0
# Reset contemporary summary
gka.summary.temp[,] <- NA
}
}
else {
gka.count[,] <- 0
}
}
# if (gka.switch == 0){
if (TRUE){
v <- response[n+1]
gka.summary <- gka.summary.total[[sum.index]]
# Use the temporary summary if a new phase is detected
if (new.phase == 1){
gka.summary <- gka.summary.temp
}
# Fill in the first 1 iteration
if (n.mat[sum.index] == 0){
gka.summary[1,] <- c(v,1,0)
# Update size of summary
s <- nrow(gka.summary)
}
else{
s <- nrow(gka.summary)
if (n.mat[sum.index] %% (1/(2*epsilon)) == 0){
i = s - 1
while (i >= 2){
j = i-1
delta.i <- gka.summary[i,3]
g.sum <- sum(gka.summary[j:i,2])
v <- gka.summary[i,1]
while (j >= 2 && ((g.sum + delta.i) < 2*epsilon*n)){
j <- j - 1
g.sum <- g.sum + gka.summary[j,2]
}
# Tune one index up
j <- j + 1
# DELETE phase
if (j < i){
# Merge tuples from j to i
gka.summary <- gka.summary[-((j+1):i),]
gka.summary[j,] <- data.frame('v'=v, 'g'=g.sum-gka.summary[(j-1),2], 'delta'=delta.i)
}
# Continue from the largest integer smaller than j
i <- j - 1
# Update size of the summary
s <- nrow(gka.summary)
}
}
# INSERT phase
s <- nrow(gka.summary)
v.0 <- gka.summary[1,1]
v.s_1 <- gka.summary[s,1]
# Extreme cases
tuple.new <- data.frame('v'=NA, 'g'=NA, 'delta'=NA)
if ( v < v.0 ){
delta <- 0
new.position <- 0
gka.summary <- rbind(tuple.new, gka.summary)
}
else if ( v > v.s_1 ){
delta <- 0
new.position <- s
gka.summary <- rbind(gka.summary, tuple.new)
}
else{
# Find appropriate index i
new.position <- which( v < gka.summary[,1] )[1] - 1
delta <- gka.summary[new.position,2] + gka.summary[new.position,3] - 1
gka.summary <- rbind(gka.summary, tuple.new)
gka.summary[(new.position+2):(s+1), ] <- gka.summary[(new.position+1):s, ]
}
# Insert new tuple
tuple.new <- data.frame('v'=v, 'g'=1, 'delta'=delta)
gka.summary[(new.position+1),] <- tuple.new
# Update size of summary
s <- nrow(gka.summary)
}
# Update the no. of current data
n.mat[sum.index] <- n.mat[sum.index] + 1
# Update the current summary
gka.summary.total[[sum.index]] <- gka.summary
}
}
|
491ba19f4382acff340d6b2ad08c483f359ffabf | 1a3bcb6ded9b096bab999ae2d0273a8185358101 | /linear_trend_keras_distributed_diff_scale.R | c964dd17361c3a077cc188a806c598c540fac34c | [] | no_license | kevinykuo/timeseries_shootout | f609a2e379707d82e17ed5b16beb43f22c85027d | dec72bf19fddaef74df5779f0ad26bcf8436b977 | refs/heads/master | 2020-03-21T06:58:57.468690 | 2017-10-08T05:44:29 | 2017-10-08T05:44:29 | 138,253,388 | 0 | 1 | null | 2018-06-22T04:09:26 | 2018-06-22T04:09:26 | null | UTF-8 | R | false | false | 4,891 | r | linear_trend_keras_distributed_diff_scale.R | source("common.R")
source("functions.R")
model_exists <- TRUE
lstm_num_predictions <- 4
lstm_num_timesteps <- 4
batch_size <- 1
epochs <- 500
lstm_units <- 32
model_type <- "model_lstm_time_distributed"
lstm_type <- "stateless"
data_type <- "data_diffed_scaled"
test_type <- "TREND"
model_name <- build_model_name(model_type, test_type, lstm_type, data_type, epochs)
cat("\n####################################################################################")
cat("\nRunning model: ", model_name)
cat("\n####################################################################################")
trend_train_diff <- diff(trend_train)
trend_test_diff <- diff(trend_test)
# normalize
minval <- min(trend_train_diff)
maxval <- max(trend_train_diff)
trend_train_diff <- normalize(trend_train_diff, minval, maxval)
trend_test_diff <- normalize(trend_test_diff, minval, maxval)
train_matrix <- build_matrix(trend_train_diff, lstm_num_timesteps + lstm_num_predictions)
test_matrix <- build_matrix(trend_test_diff, lstm_num_timesteps + lstm_num_predictions)
X_train <- train_matrix[ ,1:4]
y_train <- train_matrix[ ,5:8]
X_test <- test_matrix[ ,1:4]
y_test <- test_matrix[ ,5:8]
# Keras LSTMs expect the input array to be shaped as (no. samples, no. time steps, no. features)
X_train <- reshape_X_3d(X_train)
X_test <- reshape_X_3d(X_test)
num_samples <- dim(X_train)[1]
num_steps <- dim(X_train)[2]
num_features <- dim(X_train)[3]
y_train <- reshape_X_3d(y_train)
y_test <- reshape_X_3d(y_test)
# model
if (!model_exists) {
set.seed(22222)
model <- keras_model_sequential()
model %>%
layer_lstm(units = lstm_units, input_shape = c(num_steps, num_features),
return_sequences = TRUE) %>%
time_distributed(layer_dense(units = 1)) %>%
compile(
loss = 'mean_squared_error',
optimizer = 'adam'
)
model %>% summary()
model %>% fit(
X_train, y_train, batch_size = batch_size, epochs = epochs, validation_data = list(X_test, y_test)
)
model %>% save_model_hdf5(filepath = paste0(model_name, ".h5"))
} else {
model <- load_model_hdf5(filepath = paste0(model_name, ".h5"))
}
pred_train <- model %>% predict(X_train, batch_size = 1)
pred_test <- model %>% predict(X_test, batch_size = 1)
pred_train <- denormalize(pred_train, minval, maxval)
pred_test <- denormalize(pred_test, minval, maxval)
# undiff
trend_train_add <- trend_train[(lstm_num_timesteps+1):(length(trend_train)-1)]
trend_train_add_matrix <- build_matrix(trend_train_add, lstm_num_predictions)
pred_train_undiff <- trend_train_add_matrix + pred_train[ , , 1]
trend_test_add <- trend_test[(lstm_num_timesteps+1):(length(trend_test)-1)]
trend_test_add_matrix <- build_matrix(trend_test_add, lstm_num_predictions)
pred_test_undiff <- trend_test_add_matrix + pred_test[ , , 1]
df <- data_frame(time_id = 1:20,
test = trend_test)
for(i in seq_len(nrow(pred_test))) {
varname <- paste0("pred_test", i)
df <- mutate(df, !!varname := c(rep(NA, lstm_num_timesteps+1),
rep(NA, i-1),
pred_test_undiff[i, ],
rep(NA, 12-i)))
}
calc_multiple_rmse <- function(df) {
m <- as.matrix(df)
ground_truth <-m[ ,2]
pred_cols <- m[ , 3:14]
rowwise_squared_error_sums <- apply(pred_cols, 2, function(col) sum((col - ground_truth)^2, na.rm = TRUE))
sqrt(sum(rowwise_squared_error_sums)/length(rowwise_squared_error_sums))
}
multiple_rmse <- calc_multiple_rmse(df)
multiple_rmse
df <- df %>% gather(key = 'type', value = 'value', test:pred_test12)
ggplot(df, aes(x = time_id, y = value)) + geom_line(aes(color = type, linetype=type))
#######################################################################################
# test on in-range dataset
trend_test <- trend_test_inrange
trend_test_diff <- diff(trend_test)
trend_test_diff <- normalize(trend_test_diff, minval, maxval)
test_matrix <- build_matrix(trend_test_diff, lstm_num_timesteps + lstm_num_predictions)
X_test <- test_matrix[ ,1:4]
y_test <- test_matrix[ ,5:8]
X_test <- reshape_X_3d(X_test)
y_test <- reshape_X_3d(y_test)
pred_test <- model %>% predict(X_test, batch_size = 1)
pred_test <- denormalize(pred_test, minval, maxval)
trend_test_add <- trend_test[(lstm_num_timesteps+1):(length(trend_test)-1)]
trend_test_add_matrix <- build_matrix(trend_test_add, lstm_num_predictions)
pred_test_undiff <- trend_test_add_matrix + pred_test[ , , 1]
df <- data_frame(time_id = 1:20,
test = trend_test)
for(i in seq_len(nrow(pred_test))) {
varname <- paste0("pred_test", i)
df <- mutate(df, !!varname := c(rep(NA, lstm_num_timesteps+1), rep(NA, i-1), pred_test_undiff[i, ], rep(NA, 12-i)))
}
df <- df %>% gather(key = 'type', value = 'value', test:pred_test12)
ggplot(df, aes(x = time_id, y = value)) + geom_line(aes(color = type, linetype=type))
|
4334e0f62389c31dd477ec4fba68bf484d5984c1 | 65cf52c828adc878b56ef555b10e6470eb76b176 | /MechaCarChallenge.R | 1980f3b95820390fbe9f2ebb80cc86d9ca6525cb | [] | no_license | zubair-bakori/MechaCar_Statistical_Analysis | e4958bf85e7bcb5db17a100a2d85cfaf43ce9955 | 99cc142dabe7870e31a601fdb17c6710e51ca48e | refs/heads/main | 2023-06-05T19:42:18.408293 | 2021-06-13T20:27:33 | 2021-06-13T20:27:33 | 376,277,661 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 853 | r | MechaCarChallenge.R | library(dplyr)
mecha_table <- read.csv(file='MechaCar_mpg.csv',check.names=F,stringsAsFactors = F)
mecha_lm <- lm(mpg ~ vehicle_length+vehicle_weight+spoiler_angle+ground_clearance+AWD,data=mecha_table)
summary(mecha_lm)
suspCoil_table <- read.csv(file='Suspension_Coil.csv',check.names=F,stringsAsFactors = F)
suspCoil_summary <- suspCoil_table %>% summarize(Mean=mean(PSI), Median=median(PSI), Variance=var(PSI), SD=sd(PSI))
suspCoil_summary
lot_summary <- suspCoil_table %>% group_by(Manufacturing_Lot)%>% summarize(Mean=mean(PSI), Median=median(PSI), Variance=var(PSI), SD=sd(PSI))
lot_summary
t.test(suspCoil_table$PSI, mu=1500)
t.test(subset(suspCoil_table, Manufacturing_Lot=="Lot1")$PSI, mu=1500)
t.test(subset(suspCoil_table, Manufacturing_Lot=="Lot2")$PSI, mu=1500)
t.test(subset(suspCoil_table, Manufacturing_Lot=="Lot3")$PSI, mu=1500)
|
075482cb51179cf942b61b1baab8381e8483b3ef | 92a0a36d871911d433474d2af7ce02acca90b375 | /make.table-significant.R | a0dcd3fcf5d9cd03e3eff7ac3baaab1f9747298d | [] | no_license | YPARK/ad-multipheno | 07f5c24f6b02034e091da59aeaf625b8e2365579 | aa81caa776cdcad47e680905054a47e7e8187933 | refs/heads/master | 2021-05-15T11:44:48.971948 | 2017-11-02T20:30:32 | 2017-11-02T20:30:32 | 108,270,835 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,059 | r | make.table-significant.R | library(feather)
library(dplyr)
library(readr)
library(xlsx)
library(pander)
options(stringsAsFactors = FALSE)
mwas.tab <- read_feather('table_mwas.ft')
mwas.boot.tab <- mwas.tab %>%
mutate(p.val = pnorm((lodds -boot.lodds)/boot.lodds.se, lower.tail = FALSE)) %>%
mutate(p.val.2 = 2*pnorm(abs(theta -boot.theta)/boot.theta.se, lower.tail = FALSE)) %>%
mutate(p.val = pmax(p.val, p.val.2)) %>%
select(-p.val.2)
n.tot <- mwas.tab %>% filter(pheno == 'NP') %>% nrow()
cutoff <- 0.05 / n.tot / 3
significant <- mwas.boot.tab %>% filter(p.val < cutoff) %>% select(cg) %>% unique()
mwas.sig <- mwas.boot.tab %>% filter(cg %in% significant$cg)
## output to feather for visualization
.write.tab <- function(...) {
## write.table(..., row.names = FALSE, col.names = TRUE, sep = '\t', quote = FALSE)
write_tsv(..., col_names = TRUE)
}
pheno.names <- c('NP', 'NFT', 'Cog')
.write.tab(mwas.boot.tab %>% filter(pheno %in% pheno.names), path = gzfile('table_mwas.txt.gz'))
.write.tab(mwas.sig, path = gzfile('table_mwas_significant.txt.gz'))
|
f63e3eba74633356a3645346c348760510709934 | 5baa70ee7d86d979b210a84f7b872ab4370d8161 | /regularization/linear-regression/script.R | 7fb3919470df030b4a7b3b55b40e606ea281b76f | [] | no_license | abhishek10045/ml-algo | 953fc4f09d82c4366f2c3d8db7dcbdc58062a3af | 8c3e6bf8d17af962f5058db5c749dc380d0625c8 | refs/heads/master | 2020-06-01T12:13:59.267473 | 2019-06-07T16:29:07 | 2019-06-07T16:29:07 | 190,775,858 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 916 | r | script.R | hypothesis <- function(theta, x) {
x %*% theta
}
cost_function <- function(theta, lambda, x, y) {
(sum((hypothesis(theta, x) - y) ^ 2) + (lambda * sum(theta[-1] ^ 2))) / (2 * length(y))
}
partial_derivative_cost_function <- function(theta, lambda, x, y) {
((t(x) %*% (hypothesis(theta, x) - y)) + (lambda * c(0, theta[-1]))) / length(y)
}
batch_gradient_descent <- function(theta, alpha, lambda, itr, x, y) {
cost <- c()
for (i in 1:itr) {
cost <- c(cost, cost_function(theta, lambda, x, y))
theta <- theta - alpha * partial_derivative_cost_function(theta, lambda, x, y)
}
list(theta, cost)
}
x <- matrix(c(rep(1, 3), 1:3), ncol = 2)
y <- c(4, 7, 10)
lambda <- 0.1
theta <- c(1, 1)
alpha <- 0.3
itr <- 500
l <- batch_gradient_descent(theta, alpha, lambda, itr, x, y)
theta <- l[[1]]
cost <- l[[2]]
n_itr <- 1:itr
plot(cost ~ n_itr)
theta
cost_function(theta, x, y)
|
e29eb0d70852ce015619d8fe1c8f0255d6a2f076 | eb6fad9bee922702d9857bab56ea818126145806 | /R/auxiliary_functions.R | 560861c9fc1a3c650990a96444a91623634aee14 | [] | no_license | CodingMyLife/HulC | ee8530eea7d8f20479b45dedfee3c5789944671d | 20ceb936057d52d438907531db5eaf147537e745 | refs/heads/main | 2023-05-31T07:08:18.395157 | 2021-06-20T15:45:52 | 2021-06-20T15:45:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,473 | r | auxiliary_functions.R | ## For non-negative Delta and t, set
## Q(B; Delta, t) = [(1/2 - Delta)^B + (1/2 + Delta)^B]*(1 + t)^{-B+1}
## The following function finds the smallest B for a given t such that
## Q(B; Delta, t) <= alpha.
solve_for_B <- function(alpha, Delta, t){
if(Delta == 0.5 && t == 0){
stop("Delta is 0.5 and t = 0. The estimator lies only on one side of the parameter!")
}
B_low <- max(floor(log((2 + 2*t)/alpha, base = 2 + 2*t)), floor(log((1 + t)/alpha, base = (2 + 2*t)/(1 + 2*Delta))))
B_up <- ceiling(log((2 + 2*t)/alpha, base = (2 + 2*t)/(1 + 2*Delta)))
Q <- function(B){
((1/2 - Delta)^B + (1/2 + Delta)^B)*(1 + t)^(-B + 1)
}
for(B in B_low:B_up){
if(Q(B) <= alpha)
break
}
return(B)
}
## For any estimation function estimate() that returns a
## univariate estimator, subsamp_median_bias() provides an
## estimate of the median bias using subsampling.
## The subsample size used is (sample size)^{subsamp_exp}.
## The input data is a data frame or a matrix.
## nsub is the number of subsamples
subsamp_median_bias <- function(data, estimate, subsamp_exp = 2/3, nsub = 1000){
data <- as.matrix(data)
nn <- nrow(data)
subsamp_size <- round(nn^subsamp_exp)
nsub <- min(nsub, choose(nn, subsamp_size))
fulldata_estimate <- estimate(data)
Delta <- 0
for(b in 1:nsub){
TMP <- estimate(data[sample(nn, subsamp_size, replace = FALSE),,drop=FALSE])
Delta <- Delta + (TMP - fulldata_estimate <= 0)/nsub
}
Delta <- abs(Delta - 1/2)
return(Delta)
} |
fba23c776ba7abf3f3d3106b4fa88b12791aa496 | 2056ddda0938e24584348f0c94bb4b836155c552 | /Toll/Models for Draft 3.R | bb73b5e66b2f77bd190e5ee7df0874dab8abb30f | [] | no_license | KristopherToll/WaterRights | 0a18ebef52d1b46ac7f35737ab19c89468f123ea | 9a55dd76885af6f131466d67f347a4bfbeab6708 | refs/heads/master | 2021-01-22T04:14:49.107955 | 2018-09-22T00:25:40 | 2018-09-22T00:25:40 | 81,521,978 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,449 | r | Models for Draft 3.R | ## Third Edition Revised Models ##
# Sales Models
library(stargazer)
library(lmtest)
library(sandwich)
library(car)
library(plm)
library(readr)
MasterData_Sales <- read_csv("C:/Users/Kristopher/odrive/Google Drive/Water Transfer Project/Modified_Data_Models/MasterData_Sales.csv")
options(scipen=99999)
#MasterData_Sales$X1_1 <- NULL
#MasterData_Sales$LogPrice <- log(MasterData_Sales$InflationAdjustedPricePerAnnualAcreFoot)
#MasterData_Sales$Month <- factor(MasterData_Sales$Month, levels = c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul/Aug", "Sep", "Oct", "Nov", "Dec"))
#MasterData_Sales$season <- ifelse(MasterData_Sales$Month == "Jan" | MasterData_Sales$Month =="Feb" | MasterData_Sales$Month == "Mar", "Qrt1", ifelse(MasterData_Sales$Month == "Apr" | MasterData_Sales$Month == "May"| MasterData_Sales$Month == "Jun", "Qrt2", ifelse(MasterData_Sales$Month == "Jul/Aug"| MasterData_Sales$Month == "Sep", "Qrt3", "Qtr4")))
#write.csv(MasterData_Sales, file = "C:/Users/Kristopher/odrive/Google Drive/Water Transfer Project/Modified_Data_Models/MasterData_sales.csv")
S_OLS <- lm(LogPrice ~ AgtoUrban + AgtoEnivo + UrbantoAg + UrbantoUrban + UrbantoEnviro + PDSI, subset(MasterData_Sales, MasterData_Sales$State != "MT" & MasterData_Sales$State != "WY"))
S_OLS_robust1 <- coeftest(S_OLS, vcov=vcovHC, type = "HC0")
S_OLS_robust1.1 <- coeftest(S_OLS, vcov=vcovHC, type = "HC0")
cov1 <- sqrt(diag(vcovHC(S_OLS, type = "HC0")))
a <- vif(S_OLS)
ncvTest(S_OLS)
S_OLS_nq <- lm(LogPrice ~ AgtoUrban + AgtoEnivo + UrbantoAg + UrbantoUrban + UrbantoEnviro + PDSI, subset(MasterData_Sales, MasterData_Sales$State != "MT" & MasterData_Sales$State != "WY"))
S_OLS_robust <- coeftest(S_OLS_nq, vcov=vcovHC, type = "HC1")
cov_nq <- sqrt(diag(vcovHC(S_OLS_nq, type = "HC0")))
b <- vif(S_OLS_nq)
ncvTest(S_OLS_nq)
S_OLS_state <- lm(LogPrice ~ AgtoUrban + AgtoEnivo + UrbantoAg + UrbantoUrban + UrbantoEnviro + PDSI + relevel(as.factor(State), "CO"), subset(MasterData_Sales, MasterData_Sales$State != "MT" & MasterData_Sales$State != "WY"))
cov2 <- sqrt(diag(vcovHC(S_OLS_state, type = "HC0")))
c <- vif(S_OLS_state)
ncvTest(S_OLS_state)
S_OLS_state_robust <- coeftest(S_OLS_state, vcov=vcovHC, type = "HC0")
S_OLS_Year <- lm(LogPrice ~ AgtoUrban + AgtoEnivo + UrbantoAg + UrbantoUrban + UrbantoEnviro + PDSI + as.factor(Year), subset(MasterData_Sales, MasterData_Sales$State != "MT" & MasterData_Sales$State != "WY"))
cov3 <- sqrt(diag(vcovHC(S_OLS_Year, type = "HC0")))
d <- vif(S_OLS_Year)
ncvTest(S_OLS_Year)
S_OLS_State_Year <- lm(LogPrice ~ AgtoUrban + AgtoEnivo + UrbantoAg + UrbantoUrban + UrbantoEnviro + PDSI + as.factor(Year) + relevel(as.factor(State), "CO"), subset(MasterData_Sales, MasterData_Sales$State != "MT" & MasterData_Sales$State != "WY"))
cov4 <- sqrt(diag(vcovHC(S_OLS_State_Year, type = "HC0")))
e <- vif(S_OLS_State_Year)
ncvTest(S_OLS_State_Year)
S_OLS_NoAgents <- lm(log(InflationAdjustedPricePerAnnualAcreFoot) ~ PDSI + as.factor(Year) + relevel(as.factor(State), "CO"), subset(MasterData_Sales, MasterData_Sales$State != "MT" & MasterData_Sales$State != "WY"))
cov5 <- sqrt(diag(vcovHC(S_OLS_NoAgents, type = "HC0")))
f <- vif(S_OLS_NoAgents)
ncvTest(S_OLS_NoAgents)
S_OLS_Season <- lm(LogPrice ~ AgtoUrban + AgtoEnivo + UrbantoAg + UrbantoUrban + UrbantoEnviro + PDSI + season, subset(MasterData_Sales, MasterData_Sales$State != "MT" & MasterData_Sales$State != "WY"))
cov6 <- sqrt(diag(vcovHC(S_OLS_Season, type = "HC0")))
g <- vif(S_OLS_Season)
ncvTest(S_OLS_Season)
S_OLS_Season_state <- lm(LogPrice ~ AgtoUrban + AgtoEnivo + UrbantoAg + UrbantoUrban + UrbantoEnviro + PDSI + season + relevel(as.factor(State), "CO"), subset(MasterData_Sales, MasterData_Sales$State != "MT" & MasterData_Sales$State != "WY"))
cov7 <- sqrt(diag(vcovHC(S_OLS_Season_state, type = "HC0")))
h <- vif(S_OLS_Season_state)
ncvTest(S_OLS_Season_state)
stargazer(S_OLS_NoAgents, S_OLS_nq, S_OLS_state, S_OLS_Year, S_OLS_State_Year, S_OLS_Season, S_OLS_Season_state, se = list(cov5, cov_nq, cov2, cov3, cov4, cov6, cov7) ,title = "Permanent Transfers", dep.var.labels = c("Log Price per Acre Foot"), column.labels = c("OLS"), type = "html", out = "C:/Users/Kristopher/odrive/Google Drive/Water Transfer Project/Modified_Data_Models/PermanentTransfersDraft3_1NoQ.htm")
## Lease Models
MasterData_Leases <- read_csv("C:/Users/Kristopher/odrive/Google Drive/Water Transfer Project/Modified_Data_Models/MasterData_Leases.csv")
MasterData_Leases$LeaseDuration_a <- factor(as.factor(MasterData_Leases$LeaseDuration_a), c("1", "2", "3", "4", "5-10 Years", "11-20 years", "21-100 years"))
# Lease Models
L_OLS_DurCont <- lm(LogPrice ~ PDSI + LeaseDuration, data = subset(MasterData_Leases, MasterData_Leases$State != "NV"))
Lcov1 <- sqrt(diag(vcovHC(L_OLS_DurCont, type = "HC0")))
i <- vif(L_OLS_DurCont)
ncvTest(L_OLS_DurCont)
L_OLS_DurDis <- lm(LogPrice ~ PDSI + LeaseDuration_a, data = subset(MasterData_Leases, MasterData_Leases$State != "NV"))
Lcov2 <- sqrt(diag(vcovHC(L_OLS_DurDis, type = "HC0")))
j <- vif(L_OLS_DurDis)
ncvTest(L_OLS_DurDis)
L_OLS_agents <- lm(LogPrice ~ AgtoUrban + AgtoEnivo + UrbantoAg + UrbantoUrban + UrbantoEnviro + PDSI + LeaseDuration_a, data = subset(MasterData_Leases, MasterData_Leases$State != "NV"))
Lcov3 <- sqrt(diag(vcovHC(L_OLS_agents, type = "HC0")))
k <- vif(L_OLS_agents)
ncvTest(L_OLS_agents)
L_OLS_state <- lm(LogPrice ~ AgtoUrban + AgtoEnivo + UrbantoAg + UrbantoUrban + UrbantoEnviro + PDSI + LeaseDuration_a + relevel(as.factor(State), "CA"), data = subset(MasterData_Leases, MasterData_Leases$State != "NV"))
Lcov4 <- sqrt(diag(vcovHC(L_OLS_state, type = "HC0")))
l <- vif(L_OLS_state)
ncvTest(L_OLS_state)
L_OLS_year <- lm(LogPrice ~ AgtoUrban + AgtoEnivo + UrbantoAg + UrbantoUrban + UrbantoEnviro + PDSI + LeaseDuration_a + as.factor(Year), data = subset(MasterData_Leases, MasterData_Leases$State != "NV"))
Lcov5 <- sqrt(diag(vcovHC(L_OLS_year, type = "HC0")))
m <- vif(L_OLS_year)
ncvTest(L_OLS_year)
L_OLS_year_state <- lm(LogPrice ~ AgtoUrban + AgtoEnivo + UrbantoAg + UrbantoUrban + UrbantoEnviro + PDSI + LeaseDuration_a + relevel(as.factor(State), "CA") + as.factor(Year), data = subset(MasterData_Leases, MasterData_Leases$State != "NV"))
Lcov6 <- sqrt(diag(vcovHC(L_OLS_year_state, type = "HC0")))
n <- vif(L_OLS_year_state)
ncvTest(L_OLS_year_state)
L_OLS_Season <- lm(LogPrice ~ AgtoUrban + AgtoEnivo + UrbantoAg + UrbantoUrban + UrbantoEnviro + PDSI + LeaseDuration_a + as.factor(season), data = subset(MasterData_Leases, MasterData_Leases$State != "NV"))
Lcov7 <- sqrt(diag(vcovHC(L_OLS_Season, type = "HC0")))
o <- vif(L_OLS_Season)
ncvTest(L_OLS_Season)
L_OLS_Season_state <- lm(LogPrice ~ AgtoUrban + AgtoEnivo + UrbantoAg + UrbantoUrban + UrbantoEnviro + PDSI + LeaseDuration_a + relevel(as.factor(State), "CA") + as.factor(season), data = subset(MasterData_Leases, MasterData_Leases$State != "NV"))
Lcov8 <- sqrt(diag(vcovHC(L_OLS_Season_state, type = "HC0")))
p <- vif(L_OLS_Season_state)
ncvTest(L_OLS_Season_state)
stargazer(L_OLS_DurCont, L_OLS_DurDis, L_OLS_agents, L_OLS_state, L_OLS_year, L_OLS_year_state, L_OLS_Season, L_OLS_Season_state, se = list(Lcov1, Lcov2, Lcov3, Lcov4, Lcov5, Lcov6, Lcov7, Lcov8), title = "Lease Transfers", dep.var.labels = c("Log Price per Acre Foot"), column.labels = c("OLS"), type = "html", out = "C:/Users/Kristopher/odrive/Google Drive/Water Transfer Project/Modified_Data_Models/LeaseTransfersDraft3NoQ.htm")
|
65c29baf8f52156740aeb72ae82fdbceb70970c2 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/condformat/examples/rule_text_color.Rd.R | 807b5271c823d5d5efecbed52f0814f726668433 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 301 | r | rule_text_color.Rd.R | library(condformat)
### Name: rule_text_color
### Title: Give a color to the text according to some expression
### Aliases: rule_text_color
### ** Examples
data(iris)
condformat(iris[c(1:5, 51:55, 101:105),]) %>%
rule_text_color(Species, expression = ifelse(Species == "setosa", "blue", ""))
|
388afe08615e2f8ce247851087c598dce4c381bb | 177b2a2a500f1bba0bbc17931ce44171831eedf3 | /obsolete/GOF.R | 3412eeda1a4f93805f7dbc9173a2bd60f4391652 | [
"MIT"
] | permissive | RossRaymond/srt.core | 4e72f7f95268536bdd5c4458edc78af979c3b6db | b27fbe095abf34eb663f2d2838816250620d19f6 | refs/heads/master | 2021-05-13T11:44:44.993859 | 2017-02-13T19:29:09 | 2017-02-13T19:29:09 | 117,137,286 | 1 | 1 | null | 2018-01-11T18:29:02 | 2018-01-11T18:29:01 | null | UTF-8 | R | false | false | 1,608 | r | GOF.R | #Akaike Information Criterion (p -> number of model parameters and lnL -> log-likelihood value)
AIC <- 2*p - 2*lnL
#PSSE
#data1 is tVecHoldOut and data is tVec
PSSE <- function(n,data,data1){
MVF_PSSE <- aMLE*(1-exp(-bMLE*data1))
n=length(data)
mtFitSum=0
for(i in 1:length(data1)){
mtFitSum= mtFitSum+(mtFitSum-(n+i))^2
}
}
model_bias <- function(x,y){
t <- 0
for(i in 1:length(x)){
t <- ((x[i] - y[i]))/length(x) + t
}
t
}
mean_square_error <- function(x,y){
t <- 0
for(i in 1:length(x)){
t <- ((x[i]-y[i])^2)/length(x) + t
}
t
}
mean_absolute_error <- function(x,y){
t <- 0
for(i in 1:length(x)){
t <- abs((x[i]-y[i]))/length(x) + t
}
t
}
aic <- function(p,lnL){
return (2*p - 2*lnL)
}
psse_times <- function(data, model_params){
t <- 0
mvf_data <- JM_MVF(model_params, data)
for(i in 1:length(data$FT)){
t <- (data$FT[i] - mvf_data$Time[i])^2 + t
}
t
}
psse_failures <- function(d,model_params){
# input raw data IF vector
# input model params
#
# n <- length(d$FT)
# r <-data.frame()
# cumulr <-data.frame()
# for(i in 1:n){
# r[i,1] <- i
# r[i,2] <- 1/(param$Phi*(param$N0-(i-1)))
# cumulr[i,1] <- i
# cumulr[i,2] <- 0
# for(j in 1:length(r[[1]])){
# cumulr[i,2] <- cumulr[i,2]+r[j,2]
# }
# }
# g <- data.frame(cumulr[2],cumulr[1])
# names(g) <- c("Time","Failure")
# #print(g)
# g
n <- length(data$FT)
r <- data.frame()
cumulr <- data.frame()
cumulr[i,1] <- 0
cumulr[i,2] <- 0
for(i in 1:n){
next_delta <- data$IF[i]
r[i,1] <- i
for(j in 1:next_delta){
}
}
}
|
d92c13e8c626efa585e8b9a5e1932934f3236a60 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/msBP/examples/msBP.Gibbs.Rd.R | 182cd52b038ff59d2b383943e16ce15a1d668230 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,808 | r | msBP.Gibbs.Rd.R | library(msBP)
### Name: msBP.Gibbs
### Title: Gibbs sampling for density estimation for msBP model
### Aliases: msBP.Gibbs
### ** Examples
## Not run:
##D data(galaxy)
##D galaxy <- data.frame(galaxy)
##D speeds <- galaxy$speed/1000
##D set.seed(1)
##D #with fixed g0 and random a, b
##D fit.msbp.1 <- msBP.Gibbs(speeds, a = 10, b = 5, g0 = "empirical",
##D mcmc=list(nrep = 10000, nb = 5000, ndisplay = 1000),
##D hyper=list(hyperprior=list(a = TRUE, b = TRUE, g0 = FALSE),
##D hyperpar=list(beta=5,gamma = 1,delta = 1,lambda = 1)),
##D printing = 0, maxS = 7, grid = list(n.points = 150, low = 5, upp = 38))
##D
##D #with random a, b and hyperparameters of g0
##D fit.msbp.2 <- msBP.Gibbs(speeds, a = 10, b=5, g0 = "normal",
##D mcmc=list(nrep = 10000, nb = 5000, ndisplay = 1000),
##D hyper=list(hyperprior = list(a = TRUE, b = TRUE, g0 = TRUE),
##D hyperpar = list(beta = 50, gamma = 5, delta = 10, lambda = 1,
##D gridB = seq(0, 20, length = 30),
##D mu0 = 21, kappa0 = 0.1, alpha0 = 1, beta0 = 20)),
##D printing = 0, maxS = 7, grid = list(n.points = 150, lo w= 5, upp = 38))
##D
##D hist(speeds, prob=TRUE,br=10, ylim=c(0,0.23), main="", col='grey')
##D points(fit.msbp.1$density$postMeanDens~fit.msbp.1$density$xDens, ty='l', lwd=2)
##D points(fit.msbp.1$density$postUppDens~fit.msbp.1$density$xDens, ty='l',lty=2, lwd=2)
##D points(fit.msbp.1$density$postLowDens~fit.msbp.1$density$xDens, ty='l',lty=2, lwd=2)
##D
##D hist(speeds, prob=TRUE,br=10, ylim=c(0,0.23), main="", col='grey')
##D points(fit.msbp.2$density$postMeanDens~fit.msbp.2$density$xDens, ty='l', lwd=2)
##D points(fit.msbp.2$density$postUppDens~fit.msbp.2$density$xDens, ty='l',lty=2, lwd=2)
##D points(fit.msbp.2$density$postLowDens~fit.msbp.2$density$xDens, ty='l',lty=2, lwd=2)
##D
## End(Not run)
|
d0c80f8a6f43330bb1b6f25a05a39b1caed71ea6 | 8105d46b2ae06b7bb76d3c0ab0fc195b687bd750 | /R/trendpostprocess.R | 3f56c91d4fd72064a0d0d55470b9904ac686e492 | [] | no_license | tnkocis/stReamflowstats | c8f0d8b905afccd40fc5a280f17378de4ba800bf | 0fc1c7ff1eb024e8434ee5898884e02e95fa7b51 | refs/heads/master | 2020-04-12T02:25:09.302694 | 2017-07-01T01:43:56 | 2017-07-01T01:43:56 | 34,279,048 | 0 | 3 | null | 2015-04-24T21:38:07 | 2015-04-20T18:37:04 | R | UTF-8 | R | false | false | 18,825 | r | trendpostprocess.R | # TODO: Add comment
#
# Author: tiffn_000
###############################################################################
trend3monfilesdams <- dir("C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\mon3\\dams")
trend3mondams <- vector("list", length(trend3monfilesdams))
for(i in 1:length(trend3monfilesdams)){
trend3mondams[[i]] <- read.csv(file=paste("C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\mon3\\dams\\",trend3monfilesdams[[i]],sep=""), header=TRUE, sep=",")
}
trend3mondamsdf <- do.call(rbind.data.frame,trend3mondams)
trend3mondams_w1_mnpks <- trend3mondamsdf[which(trend3mondamsdf$window==1&trend3mondamsdf$measure=="meanpeaksabv"),]
trend3mondams_w1_numpks <- trend3mondamsdf[which(trend3mondamsdf$window==1&trend3mondamsdf$measure=="numpeaksabv"),]
trend3mondams_w1_totdays <- trend3mondamsdf[which(trend3mondamsdf$window==1&trend3mondamsdf$measure=="totdaysabv"),]
trend3mondams_w1_totpkflw <- trend3mondamsdf[which(trend3mondamsdf$window==1&trend3mondamsdf$measure=="totpeakflwabv"),]
trend3mondams_w1_totvol <- trend3mondamsdf[which(trend3mondamsdf$window==1&trend3mondamsdf$measure=="totvolabv"),]
trend3mondams_w5_mnpks <- trend3mondamsdf[which(trend3mondamsdf$window==5&trend3mondamsdf$measure=="meanpeaksabv"),]
trend3mondams_w5_numpks <- trend3mondamsdf[which(trend3mondamsdf$window==5&trend3mondamsdf$measure=="numpeaksabv"),]
trend3mondams_w5_totdays <- trend3mondamsdf[which(trend3mondamsdf$window==5&trend3mondamsdf$measure=="totdaysabv"),]
trend3mondams_w5_totpkflw <- trend3mondamsdf[which(trend3mondamsdf$window==5&trend3mondamsdf$measure=="totpeakflwabv"),]
trend3mondams_w5_totvol <- trend3mondamsdf[which(trend3mondamsdf$window==5&trend3mondamsdf$measure=="totvolabv"),]
trend3mondams_w10_mnpks <- trend3mondamsdf[which(trend3mondamsdf$window==10&trend3mondamsdf$measure=="meanpeaksabv"),]
trend3mondams_w10_numpks <- trend3mondamsdf[which(trend3mondamsdf$window==10&trend3mondamsdf$measure=="numpeaksabv"),]
trend3mondams_w10_totdays <- trend3mondamsdf[which(trend3mondamsdf$window==10&trend3mondamsdf$measure=="totdaysabv"),]
trend3mondams_w10_totpkflw <- trend3mondamsdf[which(trend3mondamsdf$window==10&trend3mondamsdf$measure=="totpeakflwabv"),]
trend3mondams_w10_totvol <- trend3mondamsdf[which(trend3mondamsdf$window==10&trend3mondamsdf$measure=="totvolabv"),]
write.csv(trend3mondams_w1_mnpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\dams\\trend3mondams_w1_mnpks.csv")
write.csv(trend3mondams_w1_numpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\dams\\trend3mondams_w1_numpks.csv")
write.csv(trend3mondams_w1_totdays, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\dams\\trend3mondams_w1_totdays.csv")
write.csv(trend3mondams_w1_totpkflw, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\dams\\trend3mondams_w1_totpkflw.csv")
write.csv(trend3mondams_w1_totvol, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\dams\\trend3mondams_w1_totvol.csv")
write.csv(trend3mondams_w5_mnpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\dams\\trend3mondams_w5_mnpks.csv")
write.csv(trend3mondams_w5_numpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\dams\\trend3mondams_w5_numpks.csv")
write.csv(trend3mondams_w5_totdays, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\dams\\trend3mondams_w5_totdays.csv")
write.csv(trend3mondams_w5_totpkflw, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\dams\\trend3mondams_w5_totpkflw.csv")
write.csv(trend3mondams_w5_totvol, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\dams\\trend3mondams_w5_totvol.csv")
write.csv(trend3mondams_w10_mnpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\dams\\trend3mondams_w10_mnpks.csv")
write.csv(trend3mondams_w10_numpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\dams\\trend3mondams_w10_numpks.csv")
write.csv(trend3mondams_w10_totdays, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\dams\\trend3mondams_w10_totdays.csv")
write.csv(trend3mondams_w10_totpkflw, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\dams\\trend3mondams_w10_totpkflw.csv")
write.csv(trend3mondams_w10_totvol, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\dams\\trend3mondams_w10_totvol.csv")
#################
trend3monfilesfull <- dir("C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\mon3\\full")
trend3monfull <- vector("list", length(trend3monfilesfull))
for(i in 1:length(trend3monfilesfull)){
trend3monfull[[i]] <- read.csv(file=paste("C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\mon3\\full\\",trend3monfilesfull[[i]],sep=""), header=TRUE, sep=",")
}
trend3monfulldf <- do.call(rbind.data.frame,trend3monfull)
trend3monfull_w1_mnpks <- trend3monfulldf[which(trend3monfulldf$window==1&trend3monfulldf$measure=="meanpeaksabv"),]
trend3monfull_w1_numpks <- trend3monfulldf[which(trend3monfulldf$window==1&trend3monfulldf$measure=="numpeaksabv"),]
trend3monfull_w1_totdays <- trend3monfulldf[which(trend3monfulldf$window==1&trend3monfulldf$measure=="totdaysabv"),]
trend3monfull_w1_totpkflw <- trend3monfulldf[which(trend3monfulldf$window==1&trend3monfulldf$measure=="totpeakflwabv"),]
trend3monfull_w1_totvol <- trend3monfulldf[which(trend3monfulldf$window==1&trend3monfulldf$measure=="totvolabv"),]
trend3monfull_w5_mnpks <- trend3monfulldf[which(trend3monfulldf$window==5&trend3monfulldf$measure=="meanpeaksabv"),]
trend3monfull_w5_numpks <- trend3monfulldf[which(trend3monfulldf$window==5&trend3monfulldf$measure=="numpeaksabv"),]
trend3monfull_w5_totdays <- trend3monfulldf[which(trend3monfulldf$window==5&trend3monfulldf$measure=="totdaysabv"),]
trend3monfull_w5_totpkflw <- trend3monfulldf[which(trend3monfulldf$window==5&trend3monfulldf$measure=="totpeakflwabv"),]
trend3monfull_w5_totvol <- trend3monfulldf[which(trend3monfulldf$window==5&trend3monfulldf$measure=="totvolabv"),]
trend3monfull_w10_mnpks <- trend3monfulldf[which(trend3monfulldf$window==10&trend3monfulldf$measure=="meanpeaksabv"),]
trend3monfull_w10_numpks <- trend3monfulldf[which(trend3monfulldf$window==10&trend3monfulldf$measure=="numpeaksabv"),]
trend3monfull_w10_totdays <- trend3monfulldf[which(trend3monfulldf$window==10&trend3monfulldf$measure=="totdaysabv"),]
trend3monfull_w10_totpkflw <- trend3monfulldf[which(trend3monfulldf$window==10&trend3monfulldf$measure=="totpeakflwabv"),]
trend3monfull_w10_totvol <- trend3monfulldf[which(trend3monfulldf$window==10&trend3monfulldf$measure=="totvolabv"),]
write.csv(trend3monfull_w1_mnpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\full\\trend3monfull_w1_mnpks.csv")
write.csv(trend3monfull_w1_numpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\full\\trend3monfull_w1_numpks.csv")
write.csv(trend3monfull_w1_totdays, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\full\\trend3monfull_w1_totdays.csv")
write.csv(trend3monfull_w1_totpkflw, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\full\\trend3monfull_w1_totpkflw.csv")
write.csv(trend3monfull_w1_totvol, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\full\\trend3monfull_w1_totvol.csv")
write.csv(trend3monfull_w5_mnpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\full\\trend3monfull_w5_mnpks.csv")
write.csv(trend3monfull_w5_numpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\full\\trend3monfull_w5_numpks.csv")
write.csv(trend3monfull_w5_totdays, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\full\\trend3monfull_w5_totdays.csv")
write.csv(trend3monfull_w5_totpkflw, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\full\\trend3monfull_w5_totpkflw.csv")
write.csv(trend3monfull_w5_totvol, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\full\\trend3monfull_w5_totvol.csv")
write.csv(trend3monfull_w10_mnpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\full\\trend3monfull_w10_mnpks.csv")
write.csv(trend3monfull_w10_numpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\full\\trend3monfull_w10_numpks.csv")
write.csv(trend3monfull_w10_totdays, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\full\\trend3monfull_w10_totdays.csv")
write.csv(trend3monfull_w10_totpkflw, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\full\\trend3monfull_w10_totpkflw.csv")
write.csv(trend3monfull_w10_totvol, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\3mon\\full\\trend3monfull_w10_totvol.csv")
#################
trendhyfilesfull <- dir("C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\hy\\full")
trendhyfull <- vector("list", length(trendhyfilesfull))
for(i in 1:length(trendhyfilesfull)){
trendhyfull[[i]] <- read.csv(file=paste("C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\hy\\full\\",trendhyfilesfull[[i]],sep=""), header=TRUE, sep=",")
}
trendhyfulldf <- do.call(rbind.data.frame,trendhyfull)
trendhyfull_w1_mnpks <- trendhyfulldf[which(trendhyfulldf$window==1&trendhyfulldf$measure=="meanpeaksabv"),]
trendhyfull_w1_numpks <- trendhyfulldf[which(trendhyfulldf$window==1&trendhyfulldf$measure=="numpeaksabv"),]
trendhyfull_w1_totdays <- trendhyfulldf[which(trendhyfulldf$window==1&trendhyfulldf$measure=="totdaysabv"),]
trendhyfull_w1_totpkflw <- trendhyfulldf[which(trendhyfulldf$window==1&trendhyfulldf$measure=="totpeakflwabv"),]
trendhyfull_w1_totvol <- trendhyfulldf[which(trendhyfulldf$window==1&trendhyfulldf$measure=="totvolabv"),]
trendhyfull_w5_mnpks <- trendhyfulldf[which(trendhyfulldf$window==5&trendhyfulldf$measure=="meanpeaksabv"),]
trendhyfull_w5_numpks <- trendhyfulldf[which(trendhyfulldf$window==5&trendhyfulldf$measure=="numpeaksabv"),]
trendhyfull_w5_totdays <- trendhyfulldf[which(trendhyfulldf$window==5&trendhyfulldf$measure=="totdaysabv"),]
trendhyfull_w5_totpkflw <- trendhyfulldf[which(trendhyfulldf$window==5&trendhyfulldf$measure=="totpeakflwabv"),]
trendhyfull_w5_totvol <- trendhyfulldf[which(trendhyfulldf$window==5&trendhyfulldf$measure=="totvolabv"),]
trendhyfull_w10_mnpks <- trendhyfulldf[which(trendhyfulldf$window==10&trendhyfulldf$measure=="meanpeaksabv"),]
trendhyfull_w10_numpks <- trendhyfulldf[which(trendhyfulldf$window==10&trendhyfulldf$measure=="numpeaksabv"),]
trendhyfull_w10_totdays <- trendhyfulldf[which(trendhyfulldf$window==10&trendhyfulldf$measure=="totdaysabv"),]
trendhyfull_w10_totpkflw <- trendhyfulldf[which(trendhyfulldf$window==10&trendhyfulldf$measure=="totpeakflwabv"),]
trendhyfull_w10_totvol <- trendhyfulldf[which(trendhyfulldf$window==10&trendhyfulldf$measure=="totvolabv"),]
write.csv(trendhyfull_w1_mnpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\full\\trendhyfull_w1_mnpks.csv")
write.csv(trendhyfull_w1_numpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\full\\trendhyfull_w1_numpks.csv")
write.csv(trendhyfull_w1_totdays, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\full\\trendhyfull_w1_totdays.csv")
write.csv(trendhyfull_w1_totpkflw, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\full\\trendhyfull_w1_totpkflw.csv")
write.csv(trendhyfull_w1_totvol, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\full\\trendhyfull_w1_totvol.csv")
write.csv(trendhyfull_w5_mnpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\full\\trendhyfull_w5_mnpks.csv")
write.csv(trendhyfull_w5_numpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\full\\trendhyfull_w5_numpks.csv")
write.csv(trendhyfull_w5_totdays, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\full\\trendhyfull_w5_totdays.csv")
write.csv(trendhyfull_w5_totpkflw, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\full\\trendhyfull_w5_totpkflw.csv")
write.csv(trendhyfull_w5_totvol, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\full\\trendhyfull_w5_totvol.csv")
write.csv(trendhyfull_w10_mnpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\full\\trendhyfull_w10_mnpks.csv")
write.csv(trendhyfull_w10_numpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\full\\trendhyfull_w10_numpks.csv")
write.csv(trendhyfull_w10_totdays, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\full\\trendhyfull_w10_totdays.csv")
write.csv(trendhyfull_w10_totpkflw, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\full\\trendhyfull_w10_totpkflw.csv")
write.csv(trendhyfull_w10_totvol, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\full\\trendhyfull_w10_totvol.csv")
##############
trendhyfilesdams <- dir("C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\hy\\dams")
trendhydams <- vector("list", length(trendhyfilesdams))
for(i in 1:length(trendhyfilesdams)){
trendhydams[[i]] <- read.csv(file=paste("C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\hy\\dams\\",trendhyfilesdams[[i]],sep=""), header=TRUE, sep=",")
}
trendhydamsdf <- do.call(rbind.data.frame,trendhydams)
trendhydams_w1_mnpks <- trendhydamsdf[which(trendhydamsdf$window==1&trendhydamsdf$measure=="meanpeaksabv"),]
trendhydams_w1_numpks <- trendhydamsdf[which(trendhydamsdf$window==1&trendhydamsdf$measure=="numpeaksabv"),]
trendhydams_w1_totdays <- trendhydamsdf[which(trendhydamsdf$window==1&trendhydamsdf$measure=="totdaysabv"),]
trendhydams_w1_totpkflw <- trendhydamsdf[which(trendhydamsdf$window==1&trendhydamsdf$measure=="totpeakflwabv"),]
trendhydams_w1_totvol <- trendhydamsdf[which(trendhydamsdf$window==1&trendhydamsdf$measure=="totvolabv"),]
trendhydams_w5_mnpks <- trendhydamsdf[which(trendhydamsdf$window==5&trendhydamsdf$measure=="meanpeaksabv"),]
trendhydams_w5_numpks <- trendhydamsdf[which(trendhydamsdf$window==5&trendhydamsdf$measure=="numpeaksabv"),]
trendhydams_w5_totdays <- trendhydamsdf[which(trendhydamsdf$window==5&trendhydamsdf$measure=="totdaysabv"),]
trendhydams_w5_totpkflw <- trendhydamsdf[which(trendhydamsdf$window==5&trendhydamsdf$measure=="totpeakflwabv"),]
trendhydams_w5_totvol <- trendhydamsdf[which(trendhydamsdf$window==5&trendhydamsdf$measure=="totvolabv"),]
trendhydams_w10_mnpks <- trendhydamsdf[which(trendhydamsdf$window==10&trendhydamsdf$measure=="meanpeaksabv"),]
trendhydams_w10_numpks <- trendhydamsdf[which(trendhydamsdf$window==10&trendhydamsdf$measure=="numpeaksabv"),]
trendhydams_w10_totdays <- trendhydamsdf[which(trendhydamsdf$window==10&trendhydamsdf$measure=="totdaysabv"),]
trendhydams_w10_totpkflw <- trendhydamsdf[which(trendhydamsdf$window==10&trendhydamsdf$measure=="totpeakflwabv"),]
trendhydams_w10_totvol <- trendhydamsdf[which(trendhydamsdf$window==10&trendhydamsdf$measure=="totvolabv"),]
write.csv(trendhydams_w1_mnpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\dams\\trendhydams_w1_mnpks.csv")
write.csv(trendhydams_w1_numpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\dams\\trendhydams_w1_numpks.csv")
write.csv(trendhydams_w1_totdays, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\dams\\trendhydams_w1_totdays.csv")
write.csv(trendhydams_w1_totpkflw, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\dams\\trendhydams_w1_totpkflw.csv")
write.csv(trendhydams_w1_totvol, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\dams\\trendhydams_w1_totvol.csv")
write.csv(trendhydams_w5_mnpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\dams\\trendhydams_w5_mnpks.csv")
write.csv(trendhydams_w5_numpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\dams\\trendhydams_w5_numpks.csv")
write.csv(trendhydams_w5_totdays, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\dams\\trendhydams_w5_totdays.csv")
write.csv(trendhydams_w5_totpkflw, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\dams\\trendhydams_w5_totpkflw.csv")
write.csv(trendhydams_w5_totvol, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\dams\\trendhydams_w5_totvol.csv")
write.csv(trendhydams_w10_mnpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\dams\\trendhydams_w10_mnpks.csv")
write.csv(trendhydams_w10_numpks, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\dams\\trendhydams_w10_numpks.csv")
write.csv(trendhydams_w10_totdays, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\dams\\trendhydams_w10_totdays.csv")
write.csv(trendhydams_w10_totpkflw, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\dams\\trendhydams_w10_totpkflw.csv")
write.csv(trendhydams_w10_totvol, file="C:\\Users\\tiffn_000\\Documents\\GIS\\Active_sites_final\\Data\\trends\\grouped\\hy\\dams\\trendhydams_w10_totvol.csv")
################# |
9e3399fbcfb8573f9868ec483e49a314712bec62 | 293678cfb6f4d1adc88d101ce50e3f525065906a | /Pregunta2a/solucion01.r | 2be7d04953cf0e3ea750c1abf91ec44f0e38d596 | [] | no_license | kzapfe/ExamenOPI | 719d026dcb65dcb7b8c7036ef49585df897c2a7c | 3d34fc89a0f2b4d700e7b7a1caf559c5662e0216 | refs/heads/master | 2021-01-12T02:08:50.038377 | 2017-01-11T07:30:08 | 2017-01-11T07:30:08 | 78,477,122 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,032 | r | solucion01.r | ### cargamos los datos
octubre<-read.csv("datos/octubre2016.csv")
noviembre<-read.csv("datos/noviembre2016.csv")
diciembre<-read.csv("datos/diciembre2016.csv")
datos<-rbind(octubre,noviembre,diciembre)
write.csv(datos, "DatosEcoBiciTresMeses.csv")
##funciones que ayudan a agrupar por dia y hora
library(lubridate)
##los voy a necesitar para las otras preguntas asi.
write.csv(datos, "DatosEcoBiciTresMeses.csv")
datos$horaret<-hour(datos$fhretiro)
datos$horarrib<-hour(datos$fharribo)
datos$horaret<-as.factor(datos$horaret)
datos$horarrib<-as.factor(datos$horarrib)
##Ahora si: uso por estacion y por hora
arribohest<-aggregate(uso~horarrib+Ciclo_Estacion_Arribo, datos,sum)
retirohest<-aggregate(uso~horaret+Ciclo_Estacion_Retiro, datos,sum)
##te van a servir para responder la pregunta 3.
write.csv(arribohest, "arribosporhora.csv")
write.csv(retirohest, "retirosporhora.csv")
##y los ordenamos para ver las estaciones mas usadas
attach(arribohest)
arribohest[order(uso),]
attach(retirohest)
retirohest[order(uso),]
|
bed29883117f931de8934cc3013f49bdf68b48e1 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/lhs/examples/maximinLHS.Rd.R | 1d38433a480e4e5a3d6a45d25e2a2bc69824c912 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 378 | r | maximinLHS.Rd.R | library(lhs)
### Name: maximinLHS
### Title: Maximin Latin Hypercube Sample
### Aliases: maximinLHS
### Keywords: design
### ** Examples
maximinLHS(4, 3, dup=2)
maximinLHS(4, 3, method="build", dup=2)
maximinLHS(4, 3, method="iterative", eps=0.05, maxIter=100, optimize.on="grid")
maximinLHS(4, 3, method="iterative", eps=0.05, maxIter=100, optimize.on="result")
|
1bc90e4150b3cae5176e8e592df2e85372add0da | 30021b050897c3735578064e4d77f1b6e39c8e5b | /ggedit/R/aesSlide.R | a8f9e32c83fa00076968cc2b65eba7eed6242152 | [] | no_license | elisendavila/ggedit | f03b2cdcdb6e72356cb3ec0f3026d674f3e7aba2 | cb451efa49f0cc105a6eadf36f03162a41fb043f | refs/heads/master | 2020-12-31T00:01:15.200868 | 2017-02-28T15:06:22 | 2017-02-28T15:06:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 302 | r | aesSlide.R | #' @export
#' @keywords internal
aesSlide=function(type){
list(type=sliderInput,
args=list(inputId = paste0('pop',toupper(type)),
label = type,
min = slideDefaults[[type]][1],
max = slideDefaults[[type]][2],
value = NA)
)
} |
c6f3f79a1ef0abbffa432b6bf2a9bd342ab05a83 | 08b6ca491f91acd9227c76c0f0f844a5fc84366c | /AOI_management/AOI-context.maps.R | 4735bcc0e39ae1a17cf322e3c41482dba012c136 | [] | no_license | ncss-tech/compare-psm | 555575ee05833889be7beff332442017bc1d5fc4 | 642af0dbfac03dbf2508ac7844c5829ac6af43f6 | refs/heads/master | 2023-04-15T05:37:29.734452 | 2022-09-14T19:38:31 | 2022-09-14T19:38:31 | 307,587,398 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,739 | r | AOI-context.maps.R | ## 2020-12-11
## D.E. Beaudette
## AOI context maps, using soil color at 25cm as background
options(stringsAsFactors = FALSE)
library(raster)
library(rasterVis)
library(sp)
library(sf)
library(spData)
library(rgdal)
library(ragg)
# gNATSGO CRS
gNATSGO.crs <- '+proj=aea +lat_0=23 +lon_0=-96 +lat_1=29.5 +lat_2=45.5 +x_0=0 +y_0=0 +datum=NAD83 +units=m +no_defs'
# state outlines are good context
data("us_states")
us_states <- as(us_states, 'Spatial')
us_states <- spTransform(us_states, CRS(gNATSGO.crs))
# AOIs in AEA / gNATSGO CRS
aoi <- readOGR(dsn = 'geom', layer = 'AOI_1_aea')
sub_aoi <- readOGR(dsn = 'geom', layer = 'AOI_0.2_aea')
# 2022 soil color map, 270m resolution
# pixel values are color codes
soilcolor <- brick('E:/gis_data/soil-color/2022/final-025cm-gNATSGO.tif')
# color LUT
soilcolor.lut <- read.csv('E:/gis_data/soil-color/2022/unique-moist-color-LUT.csv')
soilcolor.lut$col <- rgb(soilcolor.lut$r, soilcolor.lut$g, soilcolor.lut$b, maxColorValue = 255)
# color LUT is in the same order as cell values, seems to work with plot method for raster objects
## base graphics for context map
agg_png(file = 'figures/context-map.png', width = 2400, height = 1500, scaling = 2)
par(mar = c(0, 0, 0, 0), bg = 'black')
plot(
soilcolor,
interpolate = TRUE,
# increase for final version
maxpixels = 1e6,
col = soilcolor.lut$col,
colNA = 'black',
legend = FALSE,
axes = FALSE
)
plot(us_states, border = 'white', lwd = 1, add = TRUE)
plot(aoi, border = 'green', lwd = 2, add = TRUE, lend = 1)
text(aoi, labels = aoi$id, font = 2, col = 'green', cex = 1)
dev.off()
## zoomed context maps
# i: single AOI SPDF
# b: buffer in meters
plotZoom <- function(i, b = 50000) {
zoom.ext <- extent(
buffer(
i,
width = 50000
)
)
plot(
soilcolor,
interpolate = TRUE,
legend = FALSE,
axes = FALSE,
ext = zoom.ext,
colNA = 'black',
col = soilcolor.lut$col
)
# state boundaries
plot(us_states, border = 'white', lwd = 1, add = TRUE)
# current AOI
plot(i, border = 'green', lwd = 2, add = TRUE, lend = 1)
# any sub-AOIs within the current AOI
ovr.res <- over(i, sub_aoi)
ovr.res <- na.omit(ovr.res)
if(nrow(ovr.res) > 0) {
plot(sub_aoi[sub_aoi$id == ovr.res$id, ], border = 'green', lwd = 1, add = TRUE, lend = 1)
}
# label AOI
mtext(i[['name']], side = 1, line = -2, col = 'white', font = 2, cex = 1.5)
}
for(i in aoi$id){
f <- sprintf('figures/aoi-%s.png', i)
agg_png(file = f, width = 800, height = 900, res = 90, scaling = 1.5)
par(mar = c(0, 0, 0, 0), bg = 'black')
plotZoom(aoi[which(aoi$id == i), ])
dev.off()
}
## interesting idea, but runs out of memory
# https://stackoverflow.com/questions/16093802/how-to-get-rgb-raster-image-with-utm-coordinates
#
# # copy in which we will store colors
# soilcolor.r <- raster(soilcolor)
#
# # create a color for each cell
# # this is slow
# cols <- factor(rgb(soilcolor[], maxColorValue=255))
#
# # store colors as factor levels
# # essentially a color LUT
# soilcolor.r[] <- cols
#
#
# ## this works for rasterVis::levelplot
# # expand BBOX around the
# b <- bbox(us_states)
# x.lim <- c(b[1, 1] - 1e5, b[1, 2] + 1e5)
# y.lim <- c(b[2, 1] - 1e5, b[2, 2] + 1e5)
#
# pp <- levelplot(soilcolor.r, maxpixels = ncell(soilcolor) + 1,
# margin = FALSE, xlim = x.lim, ylim = y.lim,
# scales = list(draw=FALSE),
# col.regions = as.character(levels(cols))
# panel=function(...) {
# panel.levelplot(...)
# sp.polygons(us_states, col='white', lwd=1)
# sp.polygons(aoi, col='black', lwd=1)
# }
# )
#
|
2506199e7a06544500315b4ac0c4152d9d2d7de7 | 66641f2005a8f958bac216b99413a51c354257d0 | /regresssions.R | 12062321e80cfcb0eb66e89e62b79ad2c5b410db | [] | no_license | anishshah23/IMDb-5000-Data-analysis | b66b4f1b123cd60d1a6d74d82905d8750f6be244 | 6542537e9d1ae2989228fd7f2b35ac5fa22c0f80 | refs/heads/master | 2020-04-07T09:35:56.613023 | 2018-11-30T08:53:47 | 2018-11-30T08:53:47 | 124,200,663 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,509 | r | regresssions.R | library(leaps)
library(lars)
usa <- read.csv('usa_string_genre_dummies.csv',header=TRUE)
usa_train <- usa[1:1000,]
usa_test <- usa[1001:1494,]
y = usa_train$imdb_score
x1 = cbind(usa_train$duration, usa_train$director_facebook_likes, usa_train$adj_gross, usa_train$cast_total_facebook_likes, usa_train$facenumber_in_poster, usa_train$adj_budg, usa_train$title_year)
x2 = cbind(usa_train$duration, usa_train$director_facebook_likes, usa_train$adj_gross, usa_train$cast_total_facebook_likes, usa_train$facenumber_in_poster, usa_train$adj_budg, usa_train$title_year, usa_train$Action, usa_train$Adventure, usa_train$Animation, usa_train$Comedy, usa_train$Crime, usa_train$Family, usa_train$Fantasy, usa_train$Thriller, usa_train$Sci_Fi, usa_train$Drama, usa_train$Mystery, usa_train$Romance, usa_train$Biography, usa_train$History, usa_train$Music, usa_train$War, usa_train$Western, usa_train$Horror, usa_train$Sport, usa_train$Documentary, usa_train$Film_Noir)
x3 = cbind(usa_train$duration, usa_train$director_facebook_likes, usa_train$adj_gross, usa_train$cast_total_facebook_likes, usa_train$facenumber_in_poster, usa_train$adj_budg, usa_train$title_year,
usa_train$Action, usa_train$Adventure, usa_train$Animation, usa_train$Comedy, usa_train$Crime, usa_train$Family, usa_train$Fantasy, usa_train$Thriller, usa_train$Sci_Fi, usa_train$Drama, usa_train$Mystery, usa_train$Romance, usa_train$Biography, usa_train$History, usa_train$Music, usa_train$War, usa_train$Western, usa_train$Horror, usa_train$Sport, usa_train$Documentary, usa_train$Film_Noir,
usa_train$Approved, usa_train$G, usa_train$M, usa_train$NC_17, usa_train$Not_Rated, usa_train$PG, usa_train$PG_13, usa_train$Passed, usa_train$R, usa_train$Unrated, usa_train$rating_x)
#forward stepwise search
res1 = lars(x1, y, type="stepwise")
print(summary(res1))
res1
res2 = lars(x2, y, type="stepwise")
print(summary(res2))
res2
res3 = lars(x3, y, type="stepwise")
print(summary(res3))
res3
#regression
yvar <- usa_test[,"imdb_score"]
xvar1 <- usa_test[,c("duration","director_facebook_likes","adj_gross","cast_total_facebook_likes","facenumber_in_poster")]
xvar2 <- usa_test[,c("duration", "Animation", "Drama", "director_facebook_likes", "adj_gross", "cast_total_facebook_likes", "Horror", "Comedy", "title_year", "Fantasy", "History")]
xvar3 <- usa_test[,c("duration", "Animation", "Drama", "director_facebook_likes", "adj_gross", "cast_total_facebook_likes", "Horror", "G", "Not_Rated", "Comedy", "Approved", "Passed", "Fantasy", "Music", "NC_17")]
reg1 <- lm(imdb_score ~ duration+director_facebook_likes+adj_gross+cast_total_facebook_likes+facenumber_in_poster, data=usa_train)
reg2 <- lm(imdb_score ~ duration+Animation+Drama+director_facebook_likes+adj_gross+cast_total_facebook_likes+Horror+Comedy+title_year+Fantasy, data = usa_train)
reg3 <- lm(imdb_score ~ duration+Animation+Drama+director_facebook_likes+adj_gross+cast_total_facebook_likes+Horror+G+Not_Rated+Comedy+Approved+Passed+Fantasy+Music+NC_17, data = usa_train)
pred1 <- predict(reg1, newdata=data.frame(xvar1),type="response")
pred1
pred2 <- predict(reg2, newdata=data.frame(xvar2),type="response")
pred2
pred3 <- predict(reg3, newdata=data.frame(xvar3),type="response")
pred3
Error1 <- (sum(abs(yvar-pred1)))/nrow(usa_test)
Error1 #0.749
Error2 <- (sum(abs(yvar-pred2)))/nrow(usa_test)
Error2 #0.6937
Error3 <- (sum(abs(yvar-pred3)))/nrow(usa_test)
Error3 #0.6967
summary(reg1) #adj R2: 22.64%
summary(reg2) #adj R2: 29.85%
summary(reg3) #adj R2: 31.27
|
03b5fbe1f7947502c993134953a9c50a17f8e9d4 | 4288bddd9cfcda360e438c1bb5670437dc7d4e15 | /cachematrix.R | d594bfe2d4efc6ff743d33ca0f07e887ae9373d7 | [] | no_license | mgenty/ProgrammingAssignment2 | c9589aacc1014c9e30580abc8274bcc4352a6102 | 3cc6ac2d86fab0f7d0ab900458ec60cd895ab845 | refs/heads/master | 2020-04-05T18:30:38.688944 | 2014-11-10T21:26:27 | 2014-11-10T21:26:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,854 | r | cachematrix.R | ##
## ==========================================================================
## This File Is Comprised Of Two Functions That Cache The Inverse Of A
## Given Matrix. The First Function Creates A Special "Matrix" From The
## Passed In Matrix That Is Capable Of Caching Its Own Inverse. The Second
## Function Computes The Inverse Of The Special "Matrix" Created By The
## First Function. In Other Words, The Second Function Exercises The First
## Function. A Second Call To The Second Function Does Not Recalculate The
## Inverse. Instead It Just Retrieves The Cached Inverse From The Special
## "Matrix" (Assuming, Of Course, That The Special "Matrix" Is Unchanged.)
##
## NOTE: The makeCacheMatrix Function Is A Derivative Of The makeVector
## Function, And The cacheSolve Function Is A Derivative Of The
## cachemean Function, Both Of Which Were Supplied As Example
## Functions For This Programming Assignment.
##
## **************************************************************************
## Coursera: Johns Hopkins Data Science Specialization
## R-Programming: Programming Assignment 2
## Last Update: 10Nov14 By Marc Genty
## **************************************************************************
##
## ==========================================================================
##
##
## --------------------------------------------------------------------------
## Description: Function To Create A Special "Matrix" Object
## That Can Cache Its Inverse.
##
## Example Use: squareMatrix <- matrix(1:4, 2)
## cacheMatrix <- makeCacheMatrix(squareMatrix)
## Note That The Data Contents Of The Matrix Can Now Be
## Viewed With Either squareMatrix Or cacheMatrix$get()
## --------------------------------------------------------------------------
##
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
##
## --------------------------------------------------------------------------
## Description: Function To Compute The Inverse Of The Special
## "Matrix" Returned By makeCacheMatrix (above).
##
## Example Use: squareMatrix <- matrix(1:4, 2)
## cacheMatrix <- makeCacheMatrix(squareMatrix)
## inverseMatrix <- makeCacheMatrix(cacheMatrix)
## --------------------------------------------------------------------------
##
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
} |
fddcefaa41b982341a3f87e1c9f4b53e2c092e6e | 5ada63667fdfb87eaff4a087e70c6f5a4267ea74 | /R/OutcomeImputationCOXMH.R | 076d94462a2bc08c7e5edd24c836694afc6a2e61 | [] | no_license | lbeesleyBIOSTAT/MultiCure | d3a00f23e40c87b4e984e2e315b50ee0a9226056 | f33c994ce2aa2565f0163c9a559fccad2ab277ab | refs/heads/master | 2022-02-03T21:46:32.511343 | 2019-07-08T16:10:44 | 2019-07-08T16:10:44 | 103,183,325 | 3 | 2 | null | 2017-09-11T20:34:18 | 2017-09-11T20:14:41 | null | UTF-8 | R | false | false | 7,317 | r | OutcomeImputationCOXMH.R |
#' UNEQUALCENSIMPUTECOXMH
#' @description The function UNEQUALCENSIMPUTECOXMH will perform an imputation algorithm to handle unequal follow-up for recurrence and death. This function can be applied when we assume COX baseline hazards. This function performs imputation using a Metropolis-Hastings algorithm. The proposal distribution is Uniform with bounds such that the target kernel is nonzero.
#' @param datWIDE defined as in MultiCure
#' @param beta A vector containing the most recent estimates of beta
#' @param alpha A vector containing the most recent estimates of alpha
#' @param ImputeDat This is a list with the following elements:
#' \itemize{
#' \item UnequalCens: A vector taking value 1 if the subject has unequal follow-up. Note: If subject is assumed cured in datWIDE, they are listed as UnequalCens = 0.
#' \item CovMissing: A matrix indicating which elements of Cov are missing. Not needed for this imputation.
#' \item CovImp: A list containing a single imputation of Cov
#' \item GImp: A vector with a recent single imputation of G
#' \item YRImp: A vector with a recent single imputation of Y_R
#' \item deltaRImp: A vector with a recent single imputation of delta_R
#' \item y: The integral of the target kernel over Yr0 to Yd
#' \item Basehaz13: A matrix containing the estimate of the baseline hazard function for the 1->3 transition specified intervals
#' \item Basehaz24: A matrix containing the estimate of the baseline hazard function for the 2->4 transition specified intervals
#' \item Basehaz14: A matrix containing the estimate of the baseline hazard function for the 1->4 transition specified intervals
#' \item Basehaz34: A matrix containing the estimate of the baseline hazard function for the 3->4 transition specified intervals
#' \item YRImpSAVE: A vecotr with the most recent ACCEPTED values of Y_R from the Metropolis-Hastings algorithm
#' }
#' @param TransCov defined as in MultiCure
#'
#' @return a list containing
#' \itemize{
#' \item [[1]]: deltaRImp, A single imputation of delta_R
#' \item [[2]]: YRImp, A single imputation of Y_R
#'}
#' @export
UNEQUALCENSIMPUTECOXMH = function(datWIDE, beta, alpha, ImputeDat, TransCov){
##################
### Initialize ###
##################
UnequalCens = ImputeDat[[1]]
CovImp = as.data.frame(ImputeDat[[3]])
GImp = ImputeDat[[4]]
YRImp = ImputeDat[[5]]
deltaRImp = ImputeDat[[6]]
y = ImputeDat[[7]]
Basehaz13 = ImputeDat[[8]]
Basehaz24 = ImputeDat[[9]]
Basehaz14 = ImputeDat[[10]]
Basehaz34 = ImputeDat[[11]]
YRImpSAVE = ImputeDat[[12]]
Nobs = length(datWIDE[,1])
A1 = length(TransCov$Trans13)
A2 = length(TransCov$Trans24)
A3 = length(TransCov$Trans14)
A4 = length(TransCov$Trans34)
TRANS = c(rep(1,A1), rep(2,A2), rep(3,A3), rep(4,A4))
XB_beta13 = as.numeric(beta[TRANS==1] %*% t(cbind(CovImp[,TransCov$Trans13])))
XB_beta24 = as.numeric(beta[TRANS==2] %*% t(cbind(CovImp[,TransCov$Trans24])))
XB_beta14 = as.numeric(beta[TRANS==3] %*% t(cbind(CovImp[,TransCov$Trans14])))
XB_beta34 = as.numeric(beta[TRANS==4] %*% t(cbind(CovImp[,TransCov$Trans34])))
BasehazFun_13 = stepfun(x= Basehaz13[,2], y = c(Basehaz13[,3],0), right = F)
BasehazFun_24 = stepfun(x= Basehaz24[,2], y = c(Basehaz24[,3],0), right = F)
BasehazFun_14 = stepfun(x= Basehaz14[,2], y = c(Basehaz14[,3],0), right = F)
BasehazFun_34 = stepfun(x= Basehaz34[,2], y = c(Basehaz34[,3],0), right = F)
S1_D = exp(-as.numeric(sapply(datWIDE$Y_D,Baseline_Hazard, Basehaz13))*exp(XB_beta13))*
exp(-as.numeric(sapply(datWIDE$Y_D,Baseline_Hazard, Basehaz14))*exp(XB_beta14))
h14_D = BasehazFun_14(datWIDE$Y_D)*exp(XB_beta14)
YRImp = ifelse(GImp==0,datWIDE$Y_D, ifelse(GImp==1 & UnequalCens == 0,datWIDE$Y_R,rep(NA,Nobs) ))
deltaRImp = ifelse(GImp==0,rep(0,Nobs), ifelse(GImp==1 & UnequalCens == 0,datWIDE$delta_R,rep(NA,Nobs) ))
######################
### Impute Delta R ###
######################
num = y
denom = (h14_D^datWIDE$delta_D)*S1_D
ratio = ifelse(num==0,num,num/(num + denom)) [GImp==1 & UnequalCens == 1]
deltaRImp[GImp==1 & UnequalCens == 1] = apply(matrix(ratio), 1,mSample)
YRImp[GImp==1 & UnequalCens == 1 & deltaRImp==0] = datWIDE$Y_D[GImp==1 & UnequalCens == 1 & deltaRImp==0]
INDICES = which(is.na(YRImp))
########################
### Define Functions ###
########################
if('T_R' %in% TransCov$Trans34){
fdCOX<-function(x){
v = x[1]
m = x[2]
XB_beta34MOD = as.numeric(beta[TRANS==4][TransCov$Trans34!= 'T_R'] %*% t(cbind(CovImp[[i]][m,TransCov$Trans34[TransCov$Trans34!='T_R']])))
XB_beta34MOD = XB_beta34MOD + as.numeric(beta[TRANS==4][TransCov$Trans34== 'T_R'] %*% t(cbind(v)))
Cumhazard13_temp = exp(XB_beta13[m])*as.numeric(Baseline_Hazard(v, Basehaz13 ))
Cumhazard14_temp = exp(XB_beta14[m])*as.numeric(Baseline_Hazard(v, Basehaz14 ))
Cumhazard34_temp = exp(XB_beta34MOD)*as.numeric(Baseline_Hazard(datWIDE$Y_D[m]-v, Basehaz34) )
Surv1_temp = exp(-Cumhazard13_temp-Cumhazard14_temp)
Surv3_temp = exp(-Cumhazard34_temp)
hazard13_temp = exp(XB_beta13[m])*BasehazFun_13(v)
hazard34_temp = ifelse(v == datWIDE$Y_D[m],0,exp(XB_beta34MOD)*BasehazFun_34(datWIDE$Y_D[m]-v))
return(hazard13_temp*Surv1_temp* Surv3_temp*((hazard34_temp)^datWIDE$delta_D[m]))
}
}else{
fdCOX<-function(x){
v = x[1]
m = x[2]
Cumhazard13_temp = exp(XB_beta13[m])*as.numeric(Baseline_Hazard(v, Basehaz13 ))
Cumhazard14_temp = exp(XB_beta14[m])*as.numeric(Baseline_Hazard(v, Basehaz14 ))
Cumhazard34_temp = exp(XB_beta34[m])*as.numeric(Baseline_Hazard(datWIDE$Y_D[m]-v, Basehaz34) )
Surv1_temp = exp(-Cumhazard13_temp-Cumhazard14_temp)
Surv3_temp = exp(-Cumhazard34_temp)
hazard13_temp = exp(XB_beta13[m])*BasehazFun_13(v)
hazard34_temp = ifelse(v == datWIDE$Y_D[m],0,exp(XB_beta34[m])*BasehazFun_34(datWIDE$Y_D[m]-v))
return(hazard13_temp*Surv1_temp* Surv3_temp*((hazard34_temp)^datWIDE$delta_D[m]))
}
}
TAU_R = max(Basehaz13[,1])
current = YRImpSAVE[INDICES]
##############################
### Propose New Imputation ###
##############################
### Limits of proposal distribution determined so the baseline hazard and survival functions in fdCOX are nonzero.
MIN = datWIDE$Y_R[INDICES]
MAX = pmin(datWIDE$Y_D[INDICES], TAU_R)
#For subjects in INDICES, MIN <= MAX. This is a result of setting lambda13(TAU_R) = 0, which assigns subjects at risk after TAU_R to G=0
proposal = apply(cbind(MIN, MAX),1, mHPropose)
#########################################################
### Metropolis-Hastings Method, Accept or Reject Draw ###
#########################################################
logdens_CUR = log(as.numeric(apply(cbind(current, INDICES),1, fdCOX)))
logdens_PRO = log(as.numeric(apply(cbind(proposal, INDICES),1, fdCOX)))
alph<-runif(length(INDICES),0,1)
ACCEPT = log(alph)<(logdens_PRO +log(dunif(current, min = MIN, max = MAX))-logdens_CUR-log(dunif(proposal, min = MIN, max = MAX)))
ACCEPT[is.na(ACCEPT)] = TRUE #should not ever be used. This is to catch errors in which fdCOX is infinite
YRImpSAVE[INDICES][!ACCEPT] = current[!ACCEPT]
YRImpSAVE[INDICES][ACCEPT] = proposal[ACCEPT]
YRImp[INDICES][ACCEPT] = proposal[ACCEPT]
YRImp[INDICES][!ACCEPT] = current[!ACCEPT]
return(list(deltaRImp, YRImp, YRImpSAVE))
}
|
fd9e82982b7e7c984fff2684954dddf1a12f62b2 | 8179582231291aa2cc71e8d17ada982439342e80 | /final_configuration_script.R | afce302b4e67a9a2f847facb039086eda4afbecb | [] | no_license | kmm0155/Final_Project_BIOL7180 | 2df9d99bb82472ae5d2c900a93c6da8b8631df5a | d9e9f55c130d257102bb7707d29e351ae459b443 | refs/heads/master | 2021-05-22T21:41:07.200870 | 2020-04-20T13:15:25 | 2020-04-20T13:15:25 | 253,108,522 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 15,765 | r | final_configuration_script.R | # Final Project- Creating a Professional 3-D Figure Using GGplot within R
# Contributors- Tori Coutts, Kaitlyn Murphy, and Megan Roberts
# This script will be used after starting up the R atmosphere in which GGplot will be ran.
# Sets current working directory to where R will be ran and graphs will be saved
#setwd("/Users/Desktop")
# Installing GGplot, if needed
print("Do you have GGplot previously installed? (Y/N)")
response1 = select.list(c("Y","N"), preselect=NULL, multiple=FALSE)
if(response1=="N") {install.packages('ggplot2')}
# Starting up GGplot
library(ggplot2)
# Reading the CSV file and making sure R uploaded it correctly
datum=read.csv(file.choose())
head (datum)
# Choosing the plotting and output type
cat("Choose which graph and file type you would like:
1-1: Bar graph/Tiff
1-2: Bar graph/PNG
1-3: Bar graph/JPEG
2-1: Point graph/Tiff
2-2: Point graph/PNG
2-3: Point graph/JPEG
3-1: Line graph/Tiff
3-2: Line graph/PNG
3-3: Line graph/JPEG\n")
response2 = select.list(c("1-1", "1-2", "1-3", "2-1", "2-2", "2-3", "3-1", "3-2", "3-3"), preselect=NULL, multiple=FALSE)
#Bar graph saved as a Tiff
if(response2 == "1-1") {
MomVsEggMass <- ggplot(data=datum,aes(x=MOM,y=AVGEGGMASS)) +
geom_bar(stat="identity",fill="steelblue") +
theme_minimal() +
labs(title="", x="Mom ID", y="Average Egg Mass (g)") +
theme(axis.title.x = element_text(size=10,vjust=0),
axis.text.x = element_text(size=8,color="black"),
axis.title.y = element_text(size=10,vjust=3),
axis.text.y = element_text(size=8,color="black"))
MomVsEggMass
ggsave(file="bar_graph.tiff")
}
#Bar graph saved as a PNG
if(response2 == "1-2") {
MomVsEggMass <- ggplot(data=datum,aes(x=MOM,y=AVGEGGMASS)) +
geom_bar(stat="identity",fill="steelblue") +
theme_minimal() +
labs(title="", x="Mom ID", y="Average Egg Mass (g)") +
theme(axis.title.x = element_text(size=10,vjust=0),
axis.text.x = element_text(size=8,color="black"),
axis.title.y = element_text(size=10,vjust=3),
axis.text.y = element_text(size=8,color="black"))
MomVsEggMass
ggsave(file="bar_graph.png")
}
#Bar graph saved as a JPEG
if(response2 == "1-3") {
MomVsEggMass <- ggplot(data=datum,aes(x=MOM,y=AVGEGGMASS)) +
geom_bar(stat="identity",fill="steelblue") +
theme_minimal() +
labs(title="", x="Mom ID", y="Average Egg Mass (g)") +
theme(axis.title.x = element_text(size=10,vjust=0),
axis.text.x = element_text(size=8,color="black"),
axis.title.y = element_text(size=10,vjust=3),
axis.text.y = element_text(size=8,color="black"))
MomVsEggMass
ggsave(file="bar_graph.jpg")
}
#Point graph saved as a Tiff
if (response2 == "2-1") {
#Begin by running a basic linear mixed-effects model and call on the summary to see the results.
#Here I am analyzing the log of hatchling mass to hatchling snout-vent length (SVL).
resultshatchmass=lme(log(MASS)~log(SVL),data=datum,random=~1|MOM,na.action=na.omit)
#Next, we need to convert these values into residuals that we can plot. Do this first by creating a new dataset (datum4)
datum4 <- datum[complete.cases(datum[,"MASS"]),]
#Next, add a column wihtin the dataset that is the residuals of the linear mixed-effects model.
datum4$resids <- resid(resultshatchmass)
#Check out a very basic boxplot of these data.
plot(resids~EGGMASS,datum4)
#Now let's graph this with GGPLOT! I will be making three graphs in one figure.
#Create the varibale that will be section (A) in our figure.
bodycondition <- ggplot() +
geom_point(data=datum4, aes(x=EGGMASS, y=resids), shape=1) +
geom_smooth(data=datum4, aes(x=EGGMASS, y=resids), method=lm,se=FALSE, color="black") +
theme_classic() +
labs(title="", x="Egg mass (g) at oviposition", y="Hatchling body condition") +
scale_x_continuous(breaks=seq(0.85,1.25,0.05), limits=c(0.85,1.25)) +
theme(axis.title.x = element_text(size=10,vjust=0),
axis.text.x = element_text(size=8,color="black"),
axis.title.y = element_text(size=10,vjust=3),
axis.text.y = element_text(size=8,color="black"))
#Now, let's create section (B) for our figure! Again, begin by calling on the tiff function
#Here we are plotting hatchling mass against eff mass
hatchlingmass <- ggplot() +
geom_point(data=datum4, aes(x=EGGMASS, y=MASS), shape=1) +
geom_smooth(data=datum4, aes(x=EGGMASS, y=MASS), method=lm,se=FALSE, color="black") +
theme_classic() +
labs(title="", x="Egg mass (g) at oviposition", y="Hatchling mass (g)") +
scale_x_continuous(breaks=seq(0.85,1.25,0.05), limits=c(0.85,1.25)) +
theme(axis.title.x = element_text(size=10,vjust=0),
axis.text.x = element_text(size=8,color="black"),
axis.title.y = element_text(size=10,vjust=3),
axis.text.y = element_text(size=8,color="black"))
#Lastly, make section (C) of our figure! This will show hatchling snout-vent length (SVL).
hatchlingsvl <- ggplot() +
geom_point(data=datum4, aes(x=EGGMASS, y=SVL), shape=1) +
geom_smooth(data=datum4, aes(x=EGGMASS, y=SVL), method=lm,se=FALSE, color="black") +
theme_classic() +
labs(title="", x="Egg mass (g) at oviposition", y="Hatchling SVL (mm)") +
scale_x_continuous(breaks=seq(0.85,1.25,0.05), limits=c(0.85,1.25)) +
theme(axis.title.x = element_text(size=10,vjust=0),
axis.text.x = element_text(size=8,color="black"),
axis.title.y = element_text(size=10,vjust=3),
axis.text.y = element_text(size=8,color="black"))
#To combine them all to 1 figure, follow this script below.
hatchlingmass1 <- arrangeGrob(hatchlingmass, top = textGrob("a)", x = unit(0.10, "npc")
, y = unit(0, "npc"), just=c("left","top"),
gp=gpar(col="black", fontsize=15)))
hatchlingsvl1 <- arrangeGrob(hatchlingsvl, top = textGrob("b)", x = unit(0.10, "npc")
, y = unit(0, "npc"), just=c("left","top"),
gp=gpar(col="black", fontsize=15)))
bodycondition1 <- arrangeGrob(bodycondition, top = textGrob("c)", x = unit(0.10, "npc")
, y = unit(0, "npc"), just=c("left","top"),
gp=gpar(col="black", fontsize=15)))
tiff("point_graph.tiff", width = 4, height = 6, units = 'in', res = 300)
#Arrange them in the figure using this command.
grid.arrange(hatchlingmass1, hatchlingsvl1, bodycondition1, ncol = 1)
#Run dev.off, and check out the cool figure in your working directory.
dev.off()
}
#Point graph saved as a PNG
if (response2 == "2-2") {
#Begin by running a basic linear mixed-effects model and call on the summary to see the results.
#Here I am analyzing the log of hatchling mass to hatchling snout-vent length (SVL).
resultshatchmass=lme(log(MASS)~log(SVL),data=datum,random=~1|MOM,na.action=na.omit)
#Next, we need to convert these values into residuals that we can plot. Do this first by creating a new dataset (datum4)
datum4 <- datum[complete.cases(datum[,"MASS"]),]
#Next, add a column wihtin the dataset that is the residuals of the linear mixed-effects model.
datum4$resids <- resid(resultshatchmass)
#Check out a very basic boxplot of these data.
plot(resids~EGGMASS,datum4)
#Now let's graph this with GGPLOT! I will be making three graphs in one figure.
#Create the varibale that will be section (A) in our figure.
bodycondition <- ggplot() +
geom_point(data=datum4, aes(x=EGGMASS, y=resids), shape=1) +
geom_smooth(data=datum4, aes(x=EGGMASS, y=resids), method=lm,se=FALSE, color="black") +
theme_classic() +
labs(title="", x="Egg mass (g) at oviposition", y="Hatchling body condition") +
scale_x_continuous(breaks=seq(0.85,1.25,0.05), limits=c(0.85,1.25)) +
theme(axis.title.x = element_text(size=10,vjust=0),
axis.text.x = element_text(size=8,color="black"),
axis.title.y = element_text(size=10,vjust=3),
axis.text.y = element_text(size=8,color="black"))
#Now, let's create section (B) for our figure! Again, begin by calling on the tiff function
#Here we are plotting hatchling mass against eff mass
hatchlingmass <- ggplot() +
geom_point(data=datum4, aes(x=EGGMASS, y=MASS), shape=1) +
geom_smooth(data=datum4, aes(x=EGGMASS, y=MASS), method=lm,se=FALSE, color="black") +
theme_classic() +
labs(title="", x="Egg mass (g) at oviposition", y="Hatchling mass (g)") +
scale_x_continuous(breaks=seq(0.85,1.25,0.05), limits=c(0.85,1.25)) +
theme(axis.title.x = element_text(size=10,vjust=0),
axis.text.x = element_text(size=8,color="black"),
axis.title.y = element_text(size=10,vjust=3),
axis.text.y = element_text(size=8,color="black"))
#Lastly, make section (C) of our figure! This will show hatchling snout-vent length (SVL).
hatchlingsvl <- ggplot() +
geom_point(data=datum4, aes(x=EGGMASS, y=SVL), shape=1) +
geom_smooth(data=datum4, aes(x=EGGMASS, y=SVL), method=lm,se=FALSE, color="black") +
theme_classic() +
labs(title="", x="Egg mass (g) at oviposition", y="Hatchling SVL (mm)") +
scale_x_continuous(breaks=seq(0.85,1.25,0.05), limits=c(0.85,1.25)) +
theme(axis.title.x = element_text(size=10,vjust=0),
axis.text.x = element_text(size=8,color="black"),
axis.title.y = element_text(size=10,vjust=3),
axis.text.y = element_text(size=8,color="black"))
#To combine them all to 1 figure, follow this script below.
hatchlingmass1 <- arrangeGrob(hatchlingmass, top = textGrob("a)", x = unit(0.10, "npc")
, y = unit(0, "npc"), just=c("left","top"),
gp=gpar(col="black", fontsize=15)))
hatchlingsvl1 <- arrangeGrob(hatchlingsvl, top = textGrob("b)", x = unit(0.10, "npc")
, y = unit(0, "npc"), just=c("left","top"),
gp=gpar(col="black", fontsize=15)))
bodycondition1 <- arrangeGrob(bodycondition, top = textGrob("c)", x = unit(0.10, "npc")
, y = unit(0, "npc"), just=c("left","top"),
gp=gpar(col="black", fontsize=15)))
png("point_graph.png", width = 4, height = 6, units = 'in', res = 300)
#Arrange them in the figure using this command.
grid.arrange(hatchlingmass1, hatchlingsvl1, bodycondition1, ncol = 1)
#Run dev.off, and check out the cool figure in your working directory.
dev.off()
}
if (response2 == "2-3") {
#Begin by running a basic linear mixed-effects model and call on the summary to see the results.
#Here I am analyzing the log of hatchling mass to hatchling snout-vent length (SVL).
resultshatchmass=lme(log(MASS)~log(SVL),data=datum,random=~1|MOM,na.action=na.omit)
#Next, we need to convert these values into residuals that we can plot. Do this first by creating a new dataset (datum4)
datum4 <- datum[complete.cases(datum[,"MASS"]),]
#Next, add a column wihtin the dataset that is the residuals of the linear mixed-effects model.
datum4$resids <- resid(resultshatchmass)
#Check out a very basic boxplot of these data.
plot(resids~EGGMASS,datum4)
#Now let's graph this with GGPLOT! I will be making three graphs in one figure.
#Create the varibale that will be section (A) in our figure.
bodycondition <- ggplot() +
geom_point(data=datum4, aes(x=EGGMASS, y=resids), shape=1) +
geom_smooth(data=datum4, aes(x=EGGMASS, y=resids), method=lm,se=FALSE, color="black") +
theme_classic() +
labs(title="", x="Egg mass (g) at oviposition", y="Hatchling body condition") +
scale_x_continuous(breaks=seq(0.85,1.25,0.05), limits=c(0.85,1.25)) +
theme(axis.title.x = element_text(size=10,vjust=0),
axis.text.x = element_text(size=8,color="black"),
axis.title.y = element_text(size=10,vjust=3),
axis.text.y = element_text(size=8,color="black"))
#Now, let's create section (B) for our figure! Again, begin by calling on the tiff function
#Here we are plotting hatchling mass against eff mass
hatchlingmass <- ggplot() +
geom_point(data=datum4, aes(x=EGGMASS, y=MASS), shape=1) +
geom_smooth(data=datum4, aes(x=EGGMASS, y=MASS), method=lm,se=FALSE, color="black") +
theme_classic() +
labs(title="", x="Egg mass (g) at oviposition", y="Hatchling mass (g)") +
scale_x_continuous(breaks=seq(0.85,1.25,0.05), limits=c(0.85,1.25)) +
theme(axis.title.x = element_text(size=10,vjust=0),
axis.text.x = element_text(size=8,color="black"),
axis.title.y = element_text(size=10,vjust=3),
axis.text.y = element_text(size=8,color="black"))
#Lastly, make section (C) of our figure! This will show hatchling snout-vent length (SVL).
hatchlingsvl <- ggplot() +
geom_point(data=datum4, aes(x=EGGMASS, y=SVL), shape=1) +
geom_smooth(data=datum4, aes(x=EGGMASS, y=SVL), method=lm,se=FALSE, color="black") +
theme_classic() +
labs(title="", x="Egg mass (g) at oviposition", y="Hatchling SVL (mm)") +
scale_x_continuous(breaks=seq(0.85,1.25,0.05), limits=c(0.85,1.25)) +
theme(axis.title.x = element_text(size=10,vjust=0),
axis.text.x = element_text(size=8,color="black"),
axis.title.y = element_text(size=10,vjust=3),
axis.text.y = element_text(size=8,color="black"))
#To combine them all to 1 figure, follow this script below.
hatchlingmass1 <- arrangeGrob(hatchlingmass, top = textGrob("a)", x = unit(0.10, "npc")
, y = unit(0, "npc"), just=c("left","top"),
gp=gpar(col="black", fontsize=15)))
hatchlingsvl1 <- arrangeGrob(hatchlingsvl, top = textGrob("b)", x = unit(0.10, "npc")
, y = unit(0, "npc"), just=c("left","top"),
gp=gpar(col="black", fontsize=15)))
bodycondition1 <- arrangeGrob(bodycondition, top = textGrob("c)", x = unit(0.10, "npc")
, y = unit(0, "npc"), just=c("left","top"),
gp=gpar(col="black", fontsize=15)))
jpeg("point_graph.jpeg", width = 4, height = 6, units = 'in', res = 300)
#Arrange them in the figure using this command.
grid.arrange(hatchlingmass1, hatchlingsvl1, bodycondition1, ncol = 1)
#Run dev.off, and check out the cool figure in your working directory.
dev.off()
}
#Line graph saved as a Tiff
if (response2 == "3-1") {
Line_Graph = ggplot() +
geom_line(data=datum, aes(x=EGGMASS,y=SVL)) +
theme_minimal() +
labs(title="",x="Egg mass (g) at oviposition", y="Hatchling SVL (mm)") +
scale_x_continuous(breaks=seq(0.85,1.25,0.05), limits=c(0.85,1.0)) +
ggsave(file="line_graph.tiff")
}
#Line graph saved as a PNG
if (response2 == "3-2") {
Line_Graph = ggplot() +
geom_line(data=datum, aes(x=EGGMASS,y=SVL)) +
theme_minimal() +
labs(title="",x="Egg mass (g) at oviposition", y="Hatchling SVL (mm)") +
scale_x_continuous(breaks=seq(0.85,1.25,0.05), limits=c(0.85,1.0)) +
ggsave(file="line_graph.png")
}
#Line graph saved as a JPEG
if (response2 == "3-3") {
Line_Graph = ggplot() +
geom_line(data=datum, aes(x=EGGMASS,y=SVL)) +
theme_minimal() +
labs(title="",x="Egg mass (g) at oviposition", y="Hatchling SVL (mm)") +
scale_x_continuous(breaks=seq(0.85,1.25,0.05), limits=c(0.85,1.0)) +
ggsave(file="line_graph.jpg")
}
|
f25a0dd5621fd38c2f6f2d052348564ecb8c81cc | e5f9aec08da8ac7eaf3a1fdddd7888e17b6caa84 | /trace_RNASeq.R | 686fb31d16df3d9984e7aa81b6765d27e9770f40 | [] | no_license | aarthitalla10/Makamdop_Talla_Sharma_etal_UgandaInflammation | a9e226eabb6784157f30437e4f473b1794fab484 | 1e70db490048e2dc41cdc5004cfc8e3060cada48 | refs/heads/master | 2022-06-20T13:12:03.645401 | 2020-05-04T23:44:56 | 2020-05-04T23:44:56 | 261,319,465 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 50,315 | r | trace_RNASeq.R | ######################################
# Load required libraries
######################################
suppressPackageStartupMessages(library(package = "gdata"))
suppressPackageStartupMessages(library(package = "EDASeq"))
suppressPackageStartupMessages(library(package = "edgeR"))
suppressPackageStartupMessages(library(package = "ggplot2"))
suppressPackageStartupMessages(library(package = "pheatmap"))
suppressPackageStartupMessages(library(package = "grid"))
suppressPackageStartupMessages(library(package = "WriteXLS"))
suppressPackageStartupMessages(library(package = "dplyr"))
suppressPackageStartupMessages(library(package = "tidyr"))
suppressPackageStartupMessages(library(package = "tibble"))
suppressPackageStartupMessages(library(package = "readxl"))
suppressPackageStartupMessages(library(package = "sva"))
suppressPackageStartupMessages(library(package = "igraph"))
suppressPackageStartupMessages(library(package = "EpiDISH"))
suppressPackageStartupMessages(library(package = "Biobase"))
suppressPackageStartupMessages(library(package = "GEOquery"))
suppressPackageStartupMessages(library(package = "biomaRt"))
suppressPackageStartupMessages(library(package = "mixOmics"))
######################################
# Define the global options of the script
######################################
options(stringsAsFactors = FALSE)
options(useFancyQuotes = FALSE)
######################################
# Initializing directories
######################################
rawDir <- "raw"
dataDir <- "data"
diagnosticDir <- "diagnostic_plot"
exploDir <- "exploratory_plot"
degDir <- "deg_plot"
gseaDir <- "gsea"
regDir <- "reg"
figDir <- "Manuscript/Figures_20190223"
########################################################################
# Create Count Matrix / Samplennotation / Feature annotation
########################################################################
# Read counts files
fileLS <- list.files(path = rawDir,
pattern = "genecounts$",
full.names = TRUE,
recursive = TRUE)
countMat <- lapply(fileLS, FUN = function(file){
return(value = read.table(file = file,
sep = "\t",
col.names = c("id", "value")))
})
# Verify that all the files have the same ids
id <- countMat[[1]][, "id"]
flag <- sapply(countMat, FUN = function(file) {
return(value = all(id == file[, "id"]))})
if (any(!flag)) {
print("warning some tag id missing in some of the count files")
}
# Merge all the count files
countMat <- sapply(countMat, FUN = function(file) {
file[, "value"] })
rownames(countMat) <- id
colnames(countMat) <- gsub(pattern = "_genecounts",
replacement = "",
basename(fileLS))
## Read samplesheet and add in Batch information
sampleSheet <- read_excel("data/Samplesheet.xlsx", sheet = 1) %>% as.data.frame()
batch2Samples <- read_excel("data/Samplesheet.xlsx", sheet = 1) %>%
as.data.frame() %>%
filter(timePoint %in% "M12") %>%
.$SampleID
batch3Samples <- read_excel("data/Samplesheet.xlsx", sheet = 2) %>%
as.data.frame() %>%
.$SampleID
sampleSheet <- sampleSheet %>%
mutate(Batch = ifelse(SampleID %in% batch3Samples, "B3",
ifelse(SampleID %in% batch2Samples, "B2", "B1")))
rownames(sampleSheet) <- sampleSheet$SampleID
## Subset count matrix on samples present in the samplesheet
countMat <- countMat[, rownames(sampleSheet)]
table(rownames(sampleSheet) == colnames(countMat))
countDF <- as.data.frame(countMat)
## Feature Annotation
# name columns for Features Annotation file from 'GTF'
cNames <- c("seqname",
"source",
"feature",
"start",
"end",
"score",
"strand",
"frame",
"attributes")
featuresAnnotationFile <- "data/genes.gtf"
featuresAnnotation <- read.table(file = featuresAnnotationFile,
sep = "\t",
col.names = cNames)
featuresAnnotation$"gene_id" <- gsub(pattern = ".*gene_id ([^;]*);.*",
replacement = "\\1",
featuresAnnotation$"attributes")
featuresAnnotation$"gene_name" <- gsub(pattern = ".*gene_name ([^;]*);.*",
replacement = "\\1",
featuresAnnotation$"attributes")
featuresAnnotation <- unique(featuresAnnotation[, c("seqname",
"strand",
"gene_id",
"gene_name")])
rownames(featuresAnnotation) <- featuresAnnotation$"gene_id"
featuresAnnotation <- featuresAnnotation[rownames(countDF), ]
######################################
# Build Expression Set
######################################
# Raw Counts
esetRaw <- newSeqExpressionSet(counts = as.matrix(countDF),
featureData = featuresAnnotation,
phenoData = sampleSheet)
## Average technical replicates
pData(esetRaw)$repID <- interaction(esetRaw$subjectID,
esetRaw$timePoint,
esetRaw$cellSubset,
esetRaw$Batch) %>% as.character()
replicates <- names(which(table(pData(esetRaw)[["repID"]]) > 1))
replicates <- replicates[-length(replicates)]
for(RID in replicates) {
dupSample <- sampleNames(esetRaw)[pData(esetRaw)[["repID"]] == RID]
counts(esetRaw)[, sampleNames(esetRaw) %in% dupSample[1]] <-
apply(counts(esetRaw[, sampleNames(esetRaw) %in% dupSample]),
MARGIN = 1,
FUN = mean)
esetRaw <- esetRaw[, !(sampleNames(esetRaw) %in% dupSample[2:length(dupSample)])]
}
# Normalized counts
dge <- DGEList(counts = counts(esetRaw), remove.zeros = TRUE)
# note : Removed 842 out of 29881 rows with all zero counts
dge <- calcNormFactors(object = dge, method = "TMM")
normCounts <- cpm(dge, normalized.lib.sizes = TRUE)
# Build SeqExpressionSet with normalized counts
eset <- newSeqExpressionSet(counts = as.matrix(normCounts),
featureData = featuresAnnotation[rownames(normCounts), ],
phenoData = pData(esetRaw))
############################################################
# FIG S5A and B
############################################################
# PCA
esetTemp <- eset
mat <- log2(counts(esetTemp) + 0.25)
matpca <- prcomp(mat, center=TRUE, scale. = TRUE)
pcaDF <- as.data.frame(matpca[[2]]) %>% dplyr::select(PC1, PC2) %>%
rownames_to_column() %>%
dplyr::rename(SampleID = rowname) %>%
mutate(cellSubset = esetTemp$type[match(SampleID,
table = esetTemp$SampleID)],
timePoint = esetTemp$timePoint[match(SampleID,
table = esetTemp$SampleID)],
Batch = esetTemp$Batch[match(SampleID,table = esetTemp$SampleID)])
Scols <- c("Tcells" = "royalblue3",
"mDC" = "springgreen",
"Monocytes" = "orchid",
"NK" = "gold",
"WATERCONTROL" = "black")
plotScat <- ggplot(data = pcaDF,
mapping = aes(x = PC1,
y = PC2,
color = cellSubset)) +
scale_color_manual(values=Scols) +
geom_point(size = 2) +
theme_bw() +
theme(axis.text.x = element_text(size=8, color = "black"),
axis.text.y = element_text(size=8, color = "black"),
axis.line = element_line(color = "grey"),
panel.background = element_blank(),
panel.grid = element_blank())
pdf(file = file.path(figDir, "PCA_withWATERCONTROL_V2.pdf"), width = 4, height = 3)
print(plotScat)
dev.off()
# PCA without water control
sampleSheet2 <- pData(eset) %>% filter(!cellSubset %in% "WATERCONTROL")
rownames(sampleSheet2) <- sampleSheet2$SampleID
countDF2 <- countDF[, rownames(sampleSheet2)]
table(colnames(countDF2) == rownames(sampleSheet2))
# re-normalize counts (post removal of water control samples)
dge <- DGEList(counts = countDF2, remove.zeros = TRUE)
dge <- calcNormFactors(object = dge, method = "TMM")
normCounts <- cpm(dge, normalized.lib.sizes = TRUE)
# PCA
mat <- log2(normCounts + 0.25)
matpca <- prcomp(mat, center=TRUE, scale. = TRUE)
pcaDF <- as.data.frame(matpca[[2]]) %>% dplyr::select(PC1, PC2) %>%
rownames_to_column() %>%
dplyr::rename(SampleID = rowname) %>%
mutate(cellSubset = sampleSheet2$type[match(SampleID,
table = sampleSheet2$SampleID)],
timePoint = sampleSheet2$timePoint[match(SampleID,
table = sampleSheet2$SampleID)],
Batch = sampleSheet2$Batch[match(SampleID,table = sampleSheet2$SampleID)])
Scols2 <- Scols[!names(Scols) %in% "WATERCONTROL"]
plotScat <- ggplot(data = pcaDF,
mapping = aes(x = PC1,
y = PC2,
label = SampleID,
color = cellSubset)) +
geom_text(size = 3) +
scale_color_manual(values=Scols2) +
geom_point(size = 4) +
theme_bw() +
theme(axis.text.x = element_text(size=8, color = "black"),
axis.text.y = element_text(size=8, color = "black"),
axis.line = element_line(color = "grey"),
panel.background = element_blank(),
panel.grid = element_blank())
pdf(file = file.path(figDir, "PCA_withoutWATERCONTROL.pdf"), width = 6, height = 6)
print(plotScat)
dev.off()
## remove outlier samples based on visual inspection and replot PCA
removeSamples <- pcaDF %>% filter(PC1 > -0.05) %>% .$SampleID
removeSamples <- c(removeSamples, c("Monocytes_86","Monocytes_78","Monocytes_94",
"CD3T_RAL07_Mo3", "C306L4I16"))
sampleSheet3 <- sampleSheet2 %>% filter(!SampleID %in% removeSamples)
rownames(sampleSheet3) <- sampleSheet3$SampleID
countDF3 <- countDF2[, rownames(sampleSheet3)]
table(colnames(countDF3) == rownames(sampleSheet3))
# Normalize counts
dge <- DGEList(counts = countDF3, remove.zeros = TRUE)
dge <- calcNormFactors(object = dge, method = "TMM")
normCounts <- cpm(dge, normalized.lib.sizes = TRUE)
# PCA
mat <- log2(normCounts + 0.25)
matpca <- prcomp(mat, center=TRUE, scale. = TRUE)
pcaDF <- as.data.frame(matpca[[2]]) %>% dplyr::select(PC1, PC2) %>%
rownames_to_column() %>%
dplyr::rename(SampleID = rowname) %>%
mutate(cellSubset = sampleSheet3$type[match(SampleID,
table = sampleSheet3$SampleID)],
timePoint = sampleSheet3$timePoint[match(SampleID,
table = sampleSheet3$SampleID)],
Batch = sampleSheet3$Batch[match(SampleID,table = sampleSheet3$SampleID)])
# summary PCA
summaryPCA <- summary(matpca)$importance
plotScat <- ggplot(data = pcaDF,
mapping = aes(x = PC1,
y = PC2,
color = cellSubset)) +
scale_color_manual(values = Scols2) +
geom_point(size = 3) +
labs(x = paste("PC1(",
round(summaryPCA["Proportion of Variance", "PC1"] * 100),
"%)", sep = ""),
y = paste("PC2(",
round(summaryPCA["Proportion of Variance", "PC2"] * 100),
"%)", sep = "")) +
theme_bw() +
theme(axis.text.x = element_text(size = 10, color = "black"),
axis.text.y = element_text(size = 10, color = "black"),
axis.line = element_line(color = "grey"),
panel.background = element_blank(),
panel.grid = element_blank())
pdf(file = file.path(figDir, "Fig_S5b.pdf"),
width = 4, height = 4, useDingbats = F)
print(plotScat)
dev.off()
# save raw/cpm count matrix and eset after all outliers removed
write.table(countDF3, file = file.path(dataDir, "rawCounts.txt"), sep = "\t")
write.table(normCounts, file = file.path(dataDir, "cpmCounts.txt"), sep = "\t")
# save as esetRaw and eset
esetRaw <- newSeqExpressionSet(counts = as.matrix(countDF3),
featureData = featuresAnnotation,
phenoData = sampleSheet3)
save(esetRaw, file = file.path(dataDir, "esetRaw_V2.RData"))
eset <- newSeqExpressionSet(counts = as.matrix(normCounts),
featureData = featuresAnnotation[rownames(normCounts), ],
phenoData = sampleSheet3)
save(eset, file = file.path(dataDir, "eset.RData"))
############################################################
# FIG S5C
############################################################
### PCA by cell subset (pre-batch correction)
esetTemp <- eset
tCols <- c("D0" = "black",
"M3" = "red",
"M12" = "coral",
"M24" = "royalblue3")
for(CS in unique(esetTemp$cellSubset)) {
esetTemp2 <- esetTemp[, esetTemp$cellSubset %in% CS]
mat <- log2(counts(esetTemp2) + 0.25)
matpca <- prcomp(mat, center = TRUE, scale. = TRUE)
pcaDF <- as.data.frame(matpca[[2]]) %>% dplyr::select(PC1, PC2) %>%
rownames_to_column() %>%
dplyr::rename(SampleID = rowname) %>%
mutate(timePoint = esetTemp2$timePoint[match(SampleID,
table = esetTemp2$SampleID)],
Batch = esetTemp2$Batch[match(SampleID,table = esetTemp2$SampleID)])
# summary PCA
summaryPCA <- summary(matpca)$importance
outFile <- paste("PCA_", CS, "_preCombat.pdf", sep = "")
plotScat <- ggplot(data = pcaDF,
mapping = aes(x = PC1,
y = PC2,
color = timePoint,
shape = Batch)) +
scale_color_manual(values = tCols) +
geom_point(size = 3) +
labs(x = paste("PC1(",
round(summaryPCA["Proportion of Variance", "PC1"] * 100),
"%)", sep = ""),
y = paste("PC2(",
round(summaryPCA["Proportion of Variance", "PC2"] * 100),
"%)", sep = "")) +
theme_bw() +
theme(axis.text.x = element_text(size = 8, color = "black"),
axis.text.y = element_text(size = 8, color = "black"),
axis.line = element_line(color = "grey"),
panel.background = element_blank(),
panel.grid = element_blank())
pdf(file = file.path(figDir, outFile), width = 4, height = 3, useDingbats = FALSE)
print(plotScat)
dev.off()
}
############################################################
# FIG S5D
############################################################
### PCA post comBat batch correction
############ combat for batch correction (per subset) ############
esetTemp <- eset
for(CS in unique(esetTemp$cellSubset)) {
esetTemp2 <- esetTemp[, esetTemp$cellSubset %in% CS]
esetTemp2 <- esetTemp2[, !esetTemp2$SampleID %in% "H5GYL2I21"]
# build design matrix
group1 <- factor((esetTemp2$"timePoint"))
designMat <- model.matrix(~group1)
rownames(designMat) <- sampleNames(esetTemp2)
colnames(designMat) <- gsub(pattern = "group1",
replacement = "",
colnames(designMat))
# transform count data into a normal distribution by Voom, since comBat expects normally distributed data
v <- voom(counts(esetTemp2), design = designMat, plot = FALSE)
# combat
batch <- esetTemp2$Batch
mod <- model.matrix(~ 0 + timePoint, data = pData(esetTemp2))
colnames(mod) <- gsub("timePoint", "", colnames(mod))
# remove batch effect using combat
rmBatch <- ComBat(dat = v$E, batch = esetTemp2$Batch)
mat <- rmBatch
matpca <- prcomp(mat, center = TRUE, scale. = TRUE)
pcaDF <- as.data.frame(matpca[[2]]) %>% dplyr::select(PC1, PC2) %>%
rownames_to_column() %>%
dplyr::rename(SampleID = rowname) %>%
mutate(timePoint = esetTemp2$timePoint[match(SampleID,
table = esetTemp2$SampleID)],
Batch = esetTemp2$Batch[match(SampleID, table = esetTemp2$SampleID)])
# summary PCA
summaryPCA <- summary(matpca)$importance
outFile <- paste("PCA_", CS, "_postBatchCorrection.pdf", sep = "")
plotScat <- ggplot(data = pcaDF,
mapping = aes(x = PC1,
y = PC2,
color = timePoint,
shape = Batch)) +
scale_color_manual(values = tCols) +
geom_point(size = 3) +
labs(x = paste("PC1(",
round(summaryPCA["Proportion of Variance", "PC1"] * 100),
"%)", sep = ""),
y = paste("PC2(",
round(summaryPCA["Proportion of Variance", "PC2"] * 100),
"%)", sep = "")) +
theme_bw() +
theme(axis.text.x = element_text(size = 8, color = "black"),
axis.text.y = element_text(size = 8, color = "black"),
axis.line = element_line(color = "grey"),
panel.background = element_blank(),
panel.grid = element_blank())
pdf(file = file.path(figDir, outFile), width = 4, height = 3)
print(plotScat)
dev.off()
}
############################################################
# FIG S5E
############################################################
### Which subsets are making these cytokines at baseline
markers <- c(clusters[2], clusters[4], clusters[3]) %>% unlist() %>% unname()
markers <- c(markers, "TGFB1", "CXCL10")
esetTemp <- eset
pData(esetTemp)$BatchTimePoint <- interaction(esetTemp$Batch,
esetTemp$timePoint, sep = "_")
esetTemp <- esetTemp[, !esetTemp$BatchTimePoint %in% "B3_D0"]
esetTemp <- esetTemp[, esetTemp$timePoint %in% "D0"]
# replace cytokines with HGNC symbols
markers[markers == "IL1b"] <- "IL1B"
markers[markers == "MIP1b"] <- "CCL4"
markers[markers == "GCSF"] <- "CSF3"
markers[markers == "MCP1"] <- "CCL2"
markers[markers == "TNFa"] <- "TNF"
markers[markers == "IFNg"] <- "IFNG"
markers[markers == "IL12"] <- "IL12A"
## for markers
markers <- rownames(counts(esetTemp))[grep("^CCL|^CXCL", rownames(counts(esetTemp)))]
# plot heatmap of markers genes
filter <- apply(counts(esetTemp), 1, function(x) mean(x)>0)
esetTemp <- esetTemp[filter, ]
mat <- counts(esetTemp)
comm <- intersect(markers, rownames(mat))
mat <- mat[comm, ]
# rename HGNC symbols back to what cytokine was names
rownames(mat)[rownames(mat) %in% "CCL4"] <- "MIP1B"
rownames(mat)[rownames(mat) %in% "CSF3"] <- "GCSF"
rownames(mat)[rownames(mat) %in% "CCL2"] <- "MCP1"
rownames(mat)[rownames(mat) %in% "CXCL10"] <- "IP10"
# scale and set breaks
mat <- t(scale(t(mat)))
limit <- range(mat)
limit <- c(ceiling(limit[1]), floor(limit[2]))
limit <- min(abs(limit))
# phenotype annotation
matAnnot <- pData(esetTemp) %>%
dplyr::select(cellSubset, timePoint) %>%
as.data.frame()
matAnnot <- matAnnot[order(factor(matAnnot$cellSubset, levels = c("Tcells",
"mDC",
"Monocytes",
"NK"))), ]
ann_colors = list(cellSubset = c(Tcells = "royalblue3",
mDC = "springgreen",
Monocytes = "orchid",
NK = "gold"),
timePoint = c(D0 = "black"))
# plot
outFile <- "Markers_GeneExpression_Heatmap_D0_V2.pdf"
print(outFile)
pdf(file = file.path(figDir, outFile), width = 8, height = 5)
colorPalette <- c("blue", "white", "red")
colorPalette <- colorRampPalette(colors = colorPalette)(100)
pheatmap(mat = mat[, rownames(matAnnot)],
color = colorPalette,
breaks = c(min(mat),
seq(from = -1 * limit,
to = limit,
length.out = 99),
max(mat)),
cellwidth = 6,
cellheight = 6,
cluster_cols = FALSE,
cluster_rows = TRUE,
treeheight_row = 0,
annotation = matAnnot,
annotation_colors = ann_colors,
show_rownames = TRUE,
show_colnames = FALSE,
border_color = NA,
fontsize_row = 6,
fontsize = 7)
dev.off()
############################################################
# FIG S5E
############################################################
######## Pathway expression at baseline across all subsets
# do SLEA (Sample Level Enrichment Analysis) function(z-score per sample)
doSLEA <- function(expressionSet, geneSet) {
# scale expression
exprsMat <- expressionSet
exprsMat <- t(scale(t(exprsMat)))
# extract expression of leGenes of each geneset
comm <- intersect(geneSet, rownames(expressionSet))
gsDF <- exprsMat[comm, ]
# calculate mean expression per sample
gsM <- colMeans(gsDF)
# extract random genes of size of the geneSet from full probeset and calculate mean
# and perform this for 'n' permutations
nperm <- lapply(1:1000, function(j) {
# set seed for every permutation
set.seed(j)
rGSDF <- exprsMat[sample.int(nrow(exprsMat), length(comm)), ]
rGSM <- colMeans(rGSDF)
return(value = rGSM)
})
permDF <- do.call(rbind, nperm)
zscore <- (gsM - colMeans(permDF)) / apply(permDF,2,sd)
sleaDF <- zscore %>% as.data.frame()
return(value = sleaDF)
}
# define geneSet background
gmx <- "h.all.v6.1.symbols.gmt"
dirName <- "gsea"
gmxFile <- file.path(dirName, gmx)
colNames <- max(count.fields(file = gmxFile, sep = "\t"))
colNames <- seq(from = 1, to = colNames)
colNames <- as.character(colNames)
gmx <- read.table(file = gmxFile,
sep = "\t",
quote = "\"",
fill = TRUE,
col.names = colNames,
row.names = 1)
gmx <- gmx[, -1]
gmx <- apply(gmx, MARGIN = 1, FUN = function(x) {
return(value = setdiff(unname(x), ""))
})
names(gmx) <- toupper(names(gmx))
## expression baseline
esetTemp <- eset
pData(esetTemp)$BatchTimePoint <- interaction(esetTemp$Batch,
esetTemp$timePoint, sep = "_")
esetTemp <- esetTemp[, !esetTemp$BatchTimePoint %in% "B3_D0"]
pData(esetTemp)$DonorID_TimePoint <- interaction(esetTemp$subjectID,
esetTemp$timePoint, sep = "_")
esetTemp <- esetTemp[, esetTemp$timePoint %in% "D0"]
filter <- apply(counts(esetTemp), 1, function(x) mean(x)>0)
esetTemp <- esetTemp[filter, ]
# call SLEA
sleaLS <- lapply(1:length(gmx), function(l) {
expressionSet = counts(esetTemp)
geneSet <- gmx[[l]] %>% strsplit(",") %>% unlist(.)
sDF <- doSLEA(expressionSet = expressionSet, geneSet = geneSet)
names(sDF) <- names(gmx[l])
return(value = sDF)
})
sleaDF <- do.call(cbind, sleaLS)
colnames(sleaDF) <- gsub("HALLMARK_", "", colnames(sleaDF))
sleaDF <- sleaDF %>% t() %>% as.data.frame()
# annotation
matAnnot <- data.frame(SampleID = colnames(sleaDF)) %>%
mutate(cellSubset = esetTemp$cellSubset[match(SampleID,
table = esetTemp$SampleID)],
TimePoint = "D0") %>%
column_to_rownames(var = "SampleID")
# annotation colors
matAnnot <- matAnnot[order(factor(matAnnot$cellSubset, levels = c("Tcells",
"mDC",
"Monocytes",
"NK"))), ]
ann_colors = list(cellSubset = c(Tcells = "royalblue3",
mDC = "springgreen",
Monocytes = "orchid",
NK = "gold"),
TimePoint = c(D0 = "black"))
# scale and set breaks
mat <- t(scale(t(sleaDF)))
limit <- range(mat)
limit <- c(ceiling(limit[1]), floor(limit[2]))
limit <- min(abs(limit))
mat <- mat[, rownames(matAnnot)]
colorPalette <- c("blue", "white", "red")
colorPalette <- colorRampPalette(colors = colorPalette)(100)
outFile <- "Pathways at baseline across subsets.pdf"
pdf(file = file.path(figDir,outFile), width = 12, height = 12)
pheatmap(mat = mat,
color = colorPalette,
breaks = c(min(mat),
seq(from = -1 * limit,
to = limit,
length.out = 99),
max(mat)),
cellwidth = 6,
cellheight = 6,
cluster_cols = FALSE,
cluster_rows = TRUE,
treeheight_row = 0,
annotation = matAnnot,
annotation_colors = ann_colors,
show_rownames = TRUE,
show_colnames = FALSE,
border_color = NA,
fontsize_row = 6,
fontsize = 7)
dev.off()
############################################################
# Identify Differential expressed genes between timepoints
############################################################
fits <- list()
SUBSETS <- c("mDC", "Monocytes", "NK", "Tcells")
for(C in SUBSETS) {
esetTemp <- eset
esetTemp <- esetTemp[, esetTemp$cellSubset %in% C]
pData(esetTemp)$BatchTimePoint <- interaction(esetTemp$Batch,
esetTemp$timePoint, sep = "_")
esetTemp <- esetTemp[, !esetTemp$BatchTimePoint %in% "B3_D0"]
pData(esetTemp)$DonorID_TimePoint <- interaction(esetTemp$subjectID,
esetTemp$timePoint, sep = "_")
# remove rows with mean expression 0
filter <- apply(counts(esetTemp), 1, function(x) mean(x)>0)
esetTemp <- esetTemp[filter, ]
# build design matrix
group <- factor(esetTemp$"timePoint")
designMat <- model.matrix(~ 0 + group)
rownames(designMat) <- sampleNames(esetTemp)
colnames(designMat) <- gsub(pattern = "group",
replacement = "",
colnames(designMat))
attr(designMat, "assign") <- attr(designMat, "contrasts") <- NULL
# transform count data into log2 CPM - a normal distribution by Voom
v <- voom(counts(esetTemp), design = designMat, plot = FALSE)
# lmFit
fit <- lmFit(v, design = designMat)
# define contrast
contrastLS <- c(paste(setdiff(group, "D0"), "-D0", sep = ""),
"M12-M3", "M24-M12", "M24-M3")
contrastMat <- makeContrasts(contrasts = contrastLS, levels = designMat)
fit2 <- contrasts.fit(fit, contrastMat)
fit2 <- eBayes(fit2)
fit2$genes <- fData(esetTemp)[rownames(fit$coef), ]
modelName <- paste(C,"_FTest",sep="")
fits[[modelName]] <- list(fit = fit, fit2 = fit2)
# print number of genes differently expressed and make heatmaps
fitsTemp <- fits
fit2 <- fitsTemp[[modelName]][["fit2"]]
coefName <- colnames(fit2)
# save sigTags
sigTags <- topTable(fit = fit2, coef = coefName, n = Inf)
sigTags <- sigTags[order(sigTags$adj.P.Val, decreasing = FALSE), ]
# write results to degDir
outputFile <- paste(modelName, ".FTest.topTable.txt", sep="")
write.table(sigTags,
file = file.path(degDir, outputFile),
quote = FALSE,
sep = "\t",
row.names = FALSE)
# print # DEGs up and dn
print(paste("NomPval = ", dim(sigTags %>% filter(`P.Value` < 0.05))[1], sep = ""))
print(paste("FDR = ", dim(sigTags %>% filter(`adj.P.Val` < 0.05))[1], sep = ""))
# make heatmaps of top 50 DEGs
sigTags <- sigTags[1:50, ]
mat <- v$E %>% as.data.frame()
mat <- mat[rownames(sigTags), ]
# scale expression and define limits for color gradient on heatmaps
mat <- t(scale(t(mat)))
limit <- range(mat)
limit <- c(ceiling(limit[1]), floor(limit[2]))
limit <- min(abs(limit))
# phenotype annotation
matAnnot <- pData(esetTemp)[, c("cellSubset","timePoint","subjectID")]
ordercol <- matAnnot[order(match(matAnnot$timePoint, table = c("D0", "M3", "M12", "M24"))), ]
ordercol <- rownames(ordercol)
# colors
ann_colors = list(cellSubset = c(mDC = "springgreen", Monocytes = "orchid",
NK = "gold", Tcells = "royalblue3"),
timePoint = c(D0 = "black",
M3 = "red",
M12 = "coral",
M24 = "royalblue3"))
# plot heatmap
fileName <- paste(C,"_FTest_heatmap.pdf",sep = "")
pdf(file = file.path(degDir, fileName), width = 10, height = 10)
colorPalette <- c("blue", "white", "red")
colorPalette <- colorRampPalette(colors = colorPalette)(100)
pheatmap(mat = mat[, ordercol],
color = colorPalette,
breaks = c(min(mat),
seq(from = -1 * limit,
to = limit,
length.out = 99),
max(mat)),
cellwidth = 5,
cellheight = 5,
cluster_cols = FALSE,
clustering_distance_cols = "euclidean",
treeheight_row = 0,
annotation = matAnnot,
annotation_colors = ann_colors,
show_rownames = TRUE,
border_color = NA,
fontsize = 5)
dev.off()
}
############################################################
###### Perform Fisher pathway enrichment among FTest genes
############################################################
gmx <- "TH17.gmt"
dirName <- "gsea"
gmxFile <- file.path(dirName, gmx)
colNames <- max(count.fields(file = gmxFile, sep = "\t"))
colNames <- seq(from = 1, to = colNames)
colNames <- as.character(colNames)
gmx <- read.table(file = gmxFile,
sep = "\t",
quote = "\"",
fill = TRUE,
col.names = colNames,
row.names = 1)
gmx <- gmx[, -1]
gmx <- apply(gmx, MARGIN = 1, FUN = function(x) {
return(value = setdiff(unname(x), ""))
})
names(gmx) <- toupper(names(gmx))
### making a GMT of the monocyte subset signatures
### Monocyte subsets signatures
#gmx <- lapply(c(2:4), function(i) {
# signatures <- read_excel("data/TableS3_gse25913.xlsx", sheet = i) %>%
# as.data.frame()
# colnames(signatures) <- signatures[1,]
# signatures <- signatures[-1,] %>% .$SYMBOL %>% unique(.)
# return(value = signatures)
# })
#names(gmx) <- c("Classical", "Intermediate", "NonClassical")
# read FTest genes
## 1. Genes : M3 and M12 > D0 and M24 < M3 and M12 (Monocytes and Tcells)
fileDF <- read.delim("deg_plot/Tcells_FTest.FTest.topTable.txt") # output from identifying DEGs
gs <- fileDF %>%
filter(`adj.P.Val` < 0.05) %>%
filter(`M3.D0` > 0 & M12.D0 > 0 & M24.M3 < 0, M24.M12 < 0) %>%
.$gene_name %>% unique(.)
## 2. Genes : M3 and M12 < D0 and M24 > M3 and M12 (Monocytes and Tcells)
gs <- fileDF %>%
filter(`adj.P.Val` < 0.05) %>%
filter(`M3.D0` < 0 & M12.D0 < 0 & M24.M3 > 0, M24.M12 > 0) %>%
.$gene_name %>% unique(.)
### do Fisher
# obtain background
bg <- unique(unlist(gmx))
fisherIndex <- NULL
output <- mclapply(gmx, function(x) {
tab <- table(factor(bg %in% gs, levels = c(TRUE, FALSE)),
factor(bg %in% x, levels = c(TRUE, FALSE)))
fit <- fisher.test(tab, alternative = "greater")
interSection <- intersect(gs, x)
interSection <- paste(interSection, collapse = ",")
return(value = c(RATIO = as.numeric((diag(tab) / colSums(tab))[1]),
NOM_pval = fit$p.value,
INTERSECT = interSection))
})
output <- do.call(what = rbind, args = output)
output <- cbind(output, NAME = names(gmx))
pAdj <- p.adjust(as.numeric(output[, "NOM_pval"]), method = "BH")
output <- cbind(output, ADJ_pval = pAdj)
output <- output[, c("NAME",
"RATIO",
"NOM_pval",
"ADJ_pval",
"INTERSECT")]
output2 <- output[order(as.numeric(output[, "NOM_pval"])), ] %>% as.data.frame() %>%
mutate(NOM_pval = as.numeric(NOM_pval),
ADJ_pval = as.numeric(ADJ_pval)) %>%
filter(NOM_pval < 0.05)
fileName= "Tcells_FTest_Pathways_TH17_M3_M12_higher.txt"
write.table(output2,
file = file.path("deg_plot", fileName),
row.names=FALSE,
sep="\t",
quote = FALSE)
#### MAKE HEATMAPS of the significant fisher enrichment pathways
###### Th17 signatures heatmap
fileDF <- read.delim("deg_plot/Tcells_FTest_Pathways_TH17_M3_M12_higher.txt")
genes <- fileDF %>%
filter(ADJ_pval < 0.05) %>%
.$INTERSECT %>%
strsplit(",") %>%
unlist() %>%
unique()
# subset on eset
esetTemp <- eset
esetTemp <- esetTemp[, esetTemp$cellSubset %in% "Tcells"]
pData(esetTemp)$DonorID_TimePoint <- interaction(esetTemp$subjectID,
esetTemp$timePoint, sep = "_")
pData(esetTemp)$BatchTimePoint <- interaction(esetTemp$Batch,
esetTemp$timePoint, sep = "_")
esetTemp <- esetTemp[, !esetTemp$BatchTimePoint %in% "B3_D0"]
# remove rows with mean expression 0
filter <- apply(counts(esetTemp), 1, function(x) mean(x)>0)
esetTemp <- esetTemp[filter, ]
mat <- counts(esetTemp)[genes, , drop = F]
mat <- log2(mat + 0.25)
mat <- t(scale(t(mat)))
limit <- range(mat)
limit <- c(ceiling(limit[1]), floor(limit[2]))
limit <- min(abs(limit))
#### mat annotation
matAnnot <- pData(esetTemp)[, c("cellSubset", "timePoint", "DonorID_TimePoint"), drop = FALSE]
## add serratia ratio annotation
matAnnot$Serratia <- pathogenratioDF$serratia_otherbacteria[match(matAnnot$DonorID_TimePoint,
table = pathogenratioDF$DonorID_TimePoint)]
cOrder <- matAnnot[order(match(matAnnot$timePoint, table=c("D0", "M3", "M12", "M24"))),] %>%
rownames_to_column() %>%
.$rowname
# annotation colors
ann_colors = list(cellSubset = c(Tcells = "royalblue3"),
timePoint = c(D0 = "black",
M3 = "red",
M12 = "coral",
M24 = "royalblue3"),
Serratia = c("blue", "white", "red"))
# plot heatmap
fileName <- "TH17signatures.pdf"
pdf(file = file.path(figDir, fileName), width = 10, height = 20)
colorPalette <- c("blue", "white", "red")
colorPalette <- colorRampPalette(colors = colorPalette)(100)
pheatmap(mat = mat[, cOrder],
color = colorPalette,
scale = "none",
breaks = c(min(mat),
seq(from = -1 * limit,
to = limit,
length.out = 99),
max(mat)),
cellwidth = 12,
cellheight = 12,
cluster_cols = FALSE,
cluster_rows = FALSE,
annotation = matAnnot,
annotation_colors = ann_colors,
show_rownames = TRUE,
show_colnames = FALSE,
treeheight_row = 0,
border_color = NA,
fontsize_row = 3)
dev.off()
## cytokine cluster annotation over heatmap
datDF <- read_excel("PlasmaBiomarkers.xlsx", sheet = 1) %>% as.data.frame()
markers <- clusters[c(2,4,3)] %>% unlist(.) %>% unname()
cytoDF1 <- datDF %>%
dplyr::filter(TimePoint %in% c("d0", "M3", "M12", "M24")) %>%
dplyr::select_(.dots = c("SampleID", "TimePoint", unname(markers))) %>%
gather(Cytokine, value, -SampleID, -TimePoint) %>%
mutate(Cluster = ifelse(Cytokine %in% clusters[[2]], "C1",
ifelse(Cytokine %in% clusters[[3]], "C3", "C2"))) %>%
spread(TimePoint, value) %>%
mutate(D0_FC = log2(d0/d0),
M3_FC = log2(M3/d0),
M12_FC = log2(M12/M3),
M24_FC = log2(M24/M12)) %>%
dplyr::select(-d0, -M3, -M12, -M24) %>%
gather(TimePoint, value, -SampleID, -Cytokine, -Cluster) %>%
dplyr::group_by(SampleID, Cluster, TimePoint) %>%
dplyr::summarize(mFC = mean(value, na.rm = TRUE)) %>%
as.data.frame() %>%
mutate(TimePoint = gsub("_FC", "", TimePoint),
DonorID_TimePoint = interaction(SampleID, TimePoint, sep = "_"))
cytokineClusterDF <- cytoDF1 %>%
dplyr::select(-SampleID, -TimePoint) %>%
spread(Cluster, mFC)
# cytokine bar plot for annotation above heatmap
plotDF <- cytokineClusterDF %>%
mutate(SampleID = esetTemp$SampleID[match(DonorID_TimePoint,
table = esetTemp$DonorID_TimePoint)],
TimePoint = esetTemp$timePoint[match(DonorID_TimePoint,
table = esetTemp$DonorID_TimePoint)]) %>%
filter(SampleID %in% cOrder) %>%
dplyr::select(SampleID, DonorID_TimePoint, C3, TimePoint) %>%
column_to_rownames(var = "SampleID")
plotDF$SampleID = rownames(plotDF)
plotDF <- plotDF[cOrder, ]
table(rownames(plotDF) == cOrder)
cols <- c("D0" = "black", "M3" = "red", M12 = "coral", M24 = "royalblue3")
outFile <- "Tcells_Heatmap_barplotannotations_C3.pdf"
pdf(file = file.path(figDir, outFile), width = 7, height = 8)
barpl <- ggplot(data = plotDF,
mapping = aes(x = SampleID, y = C3, color = TimePoint)) +
geom_bar(stat = "identity", width = 0.1) +
scale_color_manual(values = cols) +
scale_x_discrete(limits = rownames(plotDF)) +
labs(x = NULL, y = "Cluster 3: Mean - log2(Fold Change)") +
theme_bw() +
theme(panel.grid = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_text(color = "black", size = 10),
axis.ticks.x = element_blank())
print(barpl)
dev.off()
#### Same procedure is followed for identifying DEGs and pathways enriched in other cell subsets and plotting figures
###########################################################################
# FIG. S6
###########################################################################
############### In-silico deconvolution of Monocytes ###############
#### Get monocyte subset frequencies (classical, non-c, int) from Monocyte RNA-Seq subset data using CiberSort
# create reference expression levels of each monocyte subset
load("data/eset.gse25913.RData")
pData(eset) <- pData(eset) %>%
rownames_to_column() %>%
mutate(Subset2 = ifelse(Subset %in% "CD14++CD16-", "Classical",
ifelse(Subset %in% "CD14++CD16+", "Intermediate",
ifelse(Subset %in% "CD14+CD16+", "NonClassical", NA)))) %>%
column_to_rownames(var = "rowname")
# fetch monocyte subset signatures
monoSigs <- lapply(c(2:4), function(i) {
signatures <- read_excel("data/TableS3_gse25913.xlsx", sheet = i) %>%
as.data.frame()
colnames(signatures) <- signatures[1,]
signatures <- signatures[-1,] %>% .$SYMBOL %>% unique(.)
return(value = signatures)
})
names(monoSigs) <- c("Classical", "Intermediate", "NonClassical")
sapply(monoSigs, function(x) length(x))
genes <- unique(unlist(monoSigs) %>% as.character())
# get mean gene expression from each subset using the 'GSE25913' gene expression data
mat <- exprs(eset)
rownames(mat) <- make.unique(fData(eset)$Symbol[match(rownames(mat), table = fData(eset)$ID)])
comm <- intersect(rownames(mat), genes)
ref.m <- mat[comm, , drop = FALSE] %>%
as.data.frame() %>%
rownames_to_column(var = "SYMBOL") %>%
gather(SampleID, value, -SYMBOL) %>%
mutate(cellSubset = eset$Subset2[match(SampleID, table = rownames(pData(eset)))]) %>%
dplyr::group_by(cellSubset,SYMBOL) %>%
dplyr::summarize(mValue = mean(value)) %>%
as.data.frame() %>%
spread(cellSubset, mValue) %>%
column_to_rownames(var = "SYMBOL")
##### Get monocyte subset frequencies across time (cibersort)
cellsubset = "Monocytes"
esetTemp <- eset
pData(esetTemp)$BatchTimePoint <- interaction(esetTemp$Batch,
esetTemp$timePoint, sep = "_")
esetTemp <- esetTemp[, !esetTemp$BatchTimePoint %in% "B3_D0"]
pData(esetTemp)$typeTimePoint <- interaction(esetTemp$type,
esetTemp$timePoint, sep = "_")
pData(esetTemp)$DonorID_TimePoint <- interaction(esetTemp$subjectID,
esetTemp$timePoint, sep = "_")
esetTemp <- esetTemp[, !esetTemp$typeTimePoint %in% "mDC_Mo4"]
esetTemp <- esetTemp[, esetTemp$cellSubset %in% cellsubset]
mat <- counts(esetTemp)
# cibersort
out.l <- epidish(mat, as.matrix(ref.m), method = "CBS")
freqDF <- as.data.frame(out.l$estF) %>%
rownames_to_column() %>%
mutate(DonorID = esetTemp$subjectID[match(rowname,table=esetTemp$SampleID)],
TimePoint = esetTemp$timePoint[match(rowname,table=esetTemp$SampleID)]) %>%
dplyr::select(-rowname) %>%
gather(Monocyte, freq, -DonorID, -TimePoint) %>%
mutate(freq = freq *100,
DonorID_TimePoint = interaction(DonorID, TimePoint, sep = "_"))
# median of each type
xorder <- c("D0", "M3", "M12", "M24")
# plot
cols <- c("Intermediate" = "darkgreen", "Classical" = "red", "NonClassical" = "blue")
pdf(file = file.path(figDir, "Monocyte subset frequencies across time.pdf"), height = 4, width = 5)
plotJit <- ggplot(data = freqDF,
mapping = aes(x = TimePoint, y = freq)) +
geom_boxplot(aes(fill = Monocyte), outlier.colour = NA) +
scale_fill_manual(values = cols) +
scale_x_discrete(limits = xorder) +
labs(x = NULL, y = "Monocyte subset frequency") +
theme_bw() +
theme(panel.grid = element_blank(),
axis.text.x = element_text(color = "black", size = 10),
axis.text.y = element_text(color = "black", size = 10))
print(plotJit)
dev.off()
######## Correlating gene expression signatures with pathogen ratio ########
## gene expression set
esetTemp <- eset
pData(esetTemp)$BatchTimePoint <- interaction(esetTemp$Batch,
esetTemp$timePoint, sep = "_")
esetTemp <- esetTemp[, !esetTemp$BatchTimePoint %in% "B3_D0"]
pData(esetTemp)$DonorID_TimePoint <- interaction(esetTemp$subjectID,
esetTemp$timePoint, sep = "_")
#esetTemp <- esetTemp[, !esetTemp$timePoint %in% c("D0")]
filter <- apply(counts(esetTemp), 1, function(x) mean(x)>0)
esetTemp <- esetTemp[filter, ]
### Th17 signatures
# extract fisher genes
esetTemp2 <- esetTemp[, esetTemp$cellSubset %in% "Tcells"]
filter <- apply(counts(esetTemp2), 1, function(x) mean(x)>0)
esetTemp2 <- esetTemp2[filter, ]
sigs <- read.delim("deg_plot/Tcells_FTest_Pathways_TH17_M3_M12_higher.txt") %>%
.$INTERSECT %>%
strsplit(",") %>%
unlist() %>% unique()
# call SLEA
expressionSet = counts(esetTemp2)
geneSet <- sigs
sDF1 <- doSLEA(expressionSet = expressionSet, geneSet = geneSet)
names(sDF1) <- "TH17"
sDF1 <- sDF1 %>%
rownames_to_column() %>%
mutate(DonorID = esetTemp2$subjectID[match(rowname, table = esetTemp2$SampleID)],
timePoint = esetTemp2$timePoint[match(rowname, table = esetTemp2$SampleID)],
SampleID = interaction(DonorID, timePoint, sep = "_")) %>%
dplyr::select(-rowname, -DonorID, -timePoint)
#Th1 + Th2
fileDF1 <- read.delim("deg_plot/g_th1.txt") # refer to supplementary table for Th1 signatures
fileDF2 <- read.delim("deg_plot/g_th2.txt") # refer to supplementary table for Th1 signatures
sigs <- c(fileDF1$gene_name, fileDF2$gene_name) %>% unique()
# call SLEA
expressionSet = counts(esetTemp2)
geneSet <- sigs
sDF2 <- doSLEA(expressionSet = expressionSet, geneSet = geneSet)
names(sDF2) <- "Th1_Th2"
sDF2 <- sDF2 %>%
rownames_to_column() %>%
mutate(DonorID = esetTemp2$subjectID[match(rowname, table = esetTemp2$SampleID)],
timePoint = esetTemp2$timePoint[match(rowname, table = esetTemp2$SampleID)],
SampleID = interaction(DonorID, timePoint, sep = "_")) %>%
dplyr::select(-rowname, -DonorID, -timePoint)
### GATA3 + TCF7 taregts signatures
# extract fisher genes
sigs <- read.delim("deg_plot/Tcells_FTest_Pathways_CHEA_M24_higher.txt") %>%
filter(NAME %in% c("GATA3", "TCF7")) %>%
.$INTERSECT %>%
strsplit(",") %>%
unlist() %>% unique()
# call SLEA
expressionSet = counts(esetTemp2)
geneSet <- sigs
sDF3 <- doSLEA(expressionSet = expressionSet, geneSet = geneSet)
names(sDF3) <- "GATA3_TCF7"
sDF3 <- sDF3 %>%
rownames_to_column() %>%
mutate(DonorID = esetTemp2$subjectID[match(rowname, table = esetTemp2$SampleID)],
timePoint = esetTemp2$timePoint[match(rowname, table = esetTemp2$SampleID)],
SampleID = interaction(DonorID, timePoint, sep = "_")) %>%
dplyr::select(-rowname, -DonorID, -timePoint)
## monocyte inflamatory sigs
# extract fisher genes
esetTemp2 <- esetTemp[, esetTemp$cellSubset %in% "Monocytes"]
filter <- apply(counts(esetTemp2), 1, function(x) mean(x)>0)
esetTemp2 <- esetTemp2[filter, ]
sigs <- read.delim("deg_plot/Monocyte_Pathways/Monocytes_FTest_Pathways_Hallmark_M3_M12_higher.txt") %>%
.$INTERSECT %>%
strsplit(",") %>%
unlist() %>% unique()
# call SLEA
expressionSet = counts(esetTemp2)
geneSet <- sigs
sDF4 <- doSLEA(expressionSet = expressionSet, geneSet = geneSet)
names(sDF4) <- "Monocytes_Inf"
sDF4 <- sDF4 %>%
rownames_to_column() %>%
mutate(DonorID = esetTemp2$subjectID[match(rowname, table = esetTemp2$SampleID)],
timePoint = esetTemp2$timePoint[match(rowname, table = esetTemp2$SampleID)],
SampleID = interaction(DonorID, timePoint, sep = "_")) %>%
dplyr::select(-rowname, -DonorID, -timePoint)
## Non-Classical monocytes
sigs <- read.delim("deg_plot/Monocyte_Pathways/Monocytes_FTest_Monocyte Subsets_M3 and M12_higher.txt") %>%
filter(NAME %in% "NonClassical") %>%
.$INTERSECT %>%
strsplit(",") %>%
unlist() %>% unique()
# call SLEA
expressionSet = counts(esetTemp2)
geneSet <- sigs
sDF5 <- doSLEA(expressionSet = expressionSet, geneSet = geneSet)
names(sDF5) <- "Monocytes_NC"
sDF5 <- sDF5 %>%
rownames_to_column() %>%
mutate(DonorID = esetTemp2$subjectID[match(rowname, table = esetTemp2$SampleID)],
timePoint = esetTemp2$timePoint[match(rowname, table = esetTemp2$SampleID)],
SampleID = interaction(DonorID, timePoint, sep = "_")) %>%
dplyr::select(-rowname, -DonorID, -timePoint)
## serratia ratio
serratia <- pathogenratioDF[, c("DonorID_TimePoint", "serratia_otherbacteria")] %>%
dplyr::rename(SampleID = DonorID_TimePoint)
# merge all tables
colnames(cytokineClusterDF)[1] <- "SampleID"
corDF <- (merge(merge(merge(merge(sDF1, sDF2, by = "SampleID"),
sDF3, by = "SampleID"),
sDF4, by = "SampleID"),
sDF5, by = "SampleID")
serratia, by = "SampleID") %>%
mutate(timepoint = gsub(".+_(.+)", "\\1", SampleID))
corDF <- merge(corDF, cytokineClusterDF, by = "SampleID")
## CD4/CD8 data frame
CD4 <- read_excel("RALTcellcounts.xlsx", sheet = 1) %>%
as.data.frame() %>%
dplyr::rename(CD4CD8Ratio = `CD4:CD8 Ratio`) %>%
dplyr::select(DonorID, TimePoint, CD4CD8Ratio) %>%
mutate(CD4CD8Ratio = as.numeric(CD4CD8Ratio)) %>%
spread(TimePoint, CD4CD8Ratio) %>%
gather(TimePoint, CD4CD8, -DonorID) %>%
mutate(TimePoint = gsub("d", "D", TimePoint),
SampleID = interaction(DonorID, TimePoint, sep = "_")) %>%
dplyr::select(-DonorID, -TimePoint)
# correlation between intervals
corDF2 <- merge(corDF, CD4, by = "SampleID", all.x = TRUE) %>%
mutate(TimePoint = gsub(".+_(.+)", "\\1", SampleID)) %>%
filter(TimePoint %in% c("D0", "M12"))
cor.test(corDF2$CD4CD8, corDF2$C2, method = "spearman")
|
d32293145a9a1c6c99f81a8ab5412a74ac6d4151 | 4c2a1cd436b7b490ada40eecd50daa97d0fa7874 | /man/permtest.Rd | 8d7a614143618d8862c2e8eeaefcb52761274360 | [] | no_license | cran/skills | 69332c93124af975f4f85f9387d3c3c2d1066f1b | 0e7719442f9e181cb83e3fffb6a3f607a29735b7 | refs/heads/master | 2021-04-26T16:49:32.033515 | 2011-02-18T00:00:00 | 2011-02-18T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,314 | rd | permtest.Rd | \name{permtest}
\Rdversion{1.1}
\alias{permtest}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Permutation test of random assignment of items to skill sets
}
\description{
The function \code{permtest} computes the p-value of a permutation test.
}
\usage{
permtest(eKS, tKS, model)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{eKS}{
Empirical knowledge structure (cf. \link[skills]{skills-package})
in "\link[sets]{gset} of sets" - representation with
memberships equal to observed frequencies.
}
\item{tKS}{
Theoretical knowledge structure (cf. \link[skills]{skills-package})
in "\link[sets]{set} of sets" - represtentation
}
\item{model}{
Model for Skill Assignment (either "disjunctive" or "conjunctive")
}
}
\details{
The function \code{permtest} computes the p-value of a permutation test with the associated null hypothesis:
\code{w_cind(eKS,tKS)} is the result of a random assignment of problems to skill sets.
}
\value{
p-value
}
\references{
Duentsch, I., Gediga, G. (2002), \emph{Skill Set Analysis in Knowledge Structures}. British Journal of Mathematical and
Statistical Psychology, 55(2), 361 - 384.
}
\author{
Angela Haidinger \email{angela.ulrike.haidinger@student.uni-augsburg.de},\cr
Ali Uenlue \email{uenlue@statistik.tu-dortmund.de}
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
The single weighted consistency indeces are computed by \code{\link[skills]{w_cind}}.
}
\examples{
tKS1 = set(set(), set(2), set(1,3,4), set(1,2,3,4), set(1,2,4,5),
set(1,2,3,4,5))
eKS1 = gset(set(set(), set(2,3), set(1,3,4), set(1,2,3,4), set(1,2,4,5),
set(1,2,3,4,5)), memberships = c(1,2,3,4,5,6))
permtest(eKS1, tKS1, "disjunctive")
tKS2 = set(set(), set(3), set(5), set(2,5), set(1,3,4,5), set(1,2,3,4,5))
eKS2 = gset(set(set(), set(3), set(3,5), set(2,4,5), set(1,3,5),
set(1,3,4,5), set(1,2,3,4,5)),
memberships = c(80,96,4,7,14,74,55))
permtest(eKS2, tKS2, "conjunctive")
eKS3 = as.KS(as.gset(pisa))
tKS3 = as.KS(as.relation(eKS3, empirical = TRUE, v = 1))
permtest(eKS3, tKS3, "disjunctive")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{permutation}
\keyword{empirical}
\keyword{knowledge structure}
|
1e877c527dda8cb43336586e556fd02ebb0bd31a | 5ffbd32cadcb617d432bf2b6944a8ee29ba94956 | /plot4.R | 7b36e1fccefdfd82d6b05ce0501ebca27a830d69 | [] | no_license | masroorrizvi/ExData_Plotting1 | 78de0a57fd8b680dff211356341e2c93b8ac9bcb | 0ea646b1e23df820941264de9b3edeb4b6eb18a7 | refs/heads/master | 2021-01-17T21:31:14.584951 | 2015-02-06T10:09:13 | 2015-02-06T10:09:13 | 30,302,136 | 0 | 0 | null | 2015-02-04T14:18:36 | 2015-02-04T14:18:35 | null | UTF-8 | R | false | false | 1,464 | r | plot4.R | library(datasets)
library(data.table)
#1 read and subset the data
df.project <- fread("./household_power_consumption.txt", sep = ";", header = T, colClasses = 'character')
df.project <- subset(df.project, df.project$Date =="1/2/2007" | df.project$Date =="2/2/2007")
#4 Create a png file with required dimentions
png<-png(file = "plot4.png",480,480)
#3 Give the par value for the plot
par(mfcol = c(2,2),mar = c(4,4,2,2))
#5 Let us create the Global Active power line plot
dateTime <- as.POSIXlt(paste(as.Date(df.project$Date,format="%d/%m/%Y"), df.project$Time, sep=" "))
plot(dateTime,df.project$Global_active_power,type = "l", xlab = " ", ylab = "Global Active Power (Kilowatts)")
#6 Let us now create the energy submetering plot
plot(dateTime,df.project$Sub_metering_1,type="l",xlab=" ",ylab="Energy sub metering",ylim=c(0,40))
lines(dateTime,y=as.numeric(df.project$Sub_metering_2),ylim=c(0,40),col="red")
lines(dateTime,y=as.numeric(df.project$Sub_metering_3),ylim=c(0,40),col="blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),pch=c(NA,NA,NA),col=c("black","red","blue"),lty=c(1,1,1),xjust=1)
#7 Time to create the 3rd plot for voltage
plot(dateTime,y=as.numeric(df.project$Voltage),type="l",ylab="Voltage")
#8 Lastly, create the plot for Global Reactive Power
plot(dateTime,y=as.numeric(df.project$Global_reactive_power),type="l",ylab="Global_reactive_power")
# Dont forget to close the connection
dev.off() |
38db2827ac152d629e687d24cd412a71a442b99d | 4964e91d693fb65cb576e7a804629e4e5fa09c4d | /Project_Jongpil.R | 9b7816abf6b5afc6ca4bde3371c0f574fff77ad8 | [] | no_license | Oscar-Rydh/R_Project | 9d9dafe82c3cde1ee6580e3081c4277358737f6b | 90833520b0cc1416d4c774c5567c0ba888d3f714 | refs/heads/master | 2020-03-18T09:30:54.399133 | 2018-06-08T09:46:29 | 2018-06-08T09:46:29 | 134,567,062 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,668 | r | Project_Jongpil.R | # read libraries and possible variables
library(tidyr)
library(reshape)
library(lfe)
# -------------------------Mean years of schooling----------------------------- #
meanschooling <- read.csv("Data_Jongpil/Mean years of schooling (years).csv", header = T, skip = 1)
meanschooling <- meanschooling[,apply(meanschooling, 2, function(x) {sum(!is.na(x)) > 0})]
meanschooling <- meanschooling[, !(colnames(meanschooling) %in% c("X.5","HDI.Rank..2015."))]
meanschooling <- melt(meanschooling, id.vars=1)
colnames(meanschooling) <- c('Country','Year','meanschooling')
# -------------------------Inequality in income----------------------------- #
inequalityincome <- read.csv("Data_Jongpil/Inequality in income (%).csv", header = T, skip = 1)
inequalityincome <- inequalityincome[,apply(inequalityincome, 2, function(x) {sum(!is.na(x)) > 0})]
inequalityincome <- inequalityincome[, !(colnames(inequalityincome) %in% c("X.5","HDI.Rank..2015."))]
inequalityincome <- melt(inequalityincome, id.vars=1)
colnames(inequalityincome) <- c('Country','Year','inequalityincome')
# -------------------------Inequality in education----------------------------- #
inequalityeducation <- read.csv("Data_Jongpil/Inequality in education (%).csv", header = T, skip = 1)
inequalityeducation <- inequalityeducation[,apply(inequalityeducation, 2, function(x) {sum(!is.na(x)) > 0})]
inequalityeducation <- inequalityeducation[, !(colnames(inequalityeducation) %in% c("X.5","HDI.Rank..2015."))]
inequalityeducation <- melt(inequalityeducation, id.vars=1)
colnames(inequalityeducation) <- c('Country','Year','inequalityeducation')
# -------------------------Internet users----------------------------- #
internetusers <- read.csv("Data_Jongpil/Internet users (% of population).csv", header = T, skip = 1)
internetusers <- internetusers[,apply(internetusers, 2, function(x) {sum(!is.na(x)) > 0})]
internetusers <- internetusers[, !(colnames(internetusers) %in% c("X.5","HDI.Rank..2015."))]
internetusers <- melt(internetusers, id.vars=1)
colnames(internetusers) <- c('Country','Year','internetusers')
# -------------------------Mobile phone subscriptions----------------------------- #
mobilephone <- read.csv("Data_Jongpil/Mobile phone subscriptions (per 100 people).csv", header = T, skip = 1)
mobilephone <- mobilephone[,apply(mobilephone, 2, function(x) {sum(!is.na(x)) > 0})]
mobilephone <- mobilephone[, !(colnames(mobilephone) %in% c("X.5","HDI.Rank..2015."))]
mobilephone <- melt(mobilephone, id.vars=1)
colnames(mobilephone) <- c('Country','Year','mobilephone')
# -------------------------Total unemployment rate----------------------------- #
unemploymentrate <- read.csv("Data_Jongpil/Total unemployment rate (% of labour force).csv", header = T, skip = 1)
unemploymentrate <- unemploymentrate[,apply(unemploymentrate, 2, function(x) {sum(!is.na(x)) > 0})]
unemploymentrate <- unemploymentrate[, !(colnames(unemploymentrate) %in% c("X.5","HDI.Rank..2015."))]
unemploymentrate <- melt(unemploymentrate, id.vars=1)
colnames(unemploymentrate) <- c('Country','Year','unemploymentrate')
# -------------------------Government expenditure on education----------------------------- #
expenditureonedu <- read.csv("Data_Jongpil/Government expenditure on education (% of GDP).csv", header = T, skip = 1)
expenditureonedu <- expenditureonedu[,apply(expenditureonedu, 2, function(x) {sum(!is.na(x)) > 0})]
expenditureonedu <- expenditureonedu[, !(colnames(expenditureonedu) %in% c("X.5","HDI.Rank..2015."))]
expenditureonedu <- melt(expenditureonedu, id.vars=1)
colnames(expenditureonedu) <- c('Country','Year','expenditureonedu')
# -------------------------GDP total----------------------------- #
gdptotal <- read.csv("Data_Jongpil/Gross domestic product (GDP), total (2011 PPP $ billions).csv", header = T, skip = 1)
gdptotal <- gdptotal[,apply(gdptotal, 2, function(x) {sum(!is.na(x)) > 0})]
gdptotal <- gdptotal[, !(colnames(gdptotal) %in% c("X.5","HDI.Rank..2015."))]
gdptotal <- melt(gdptotal, id.vars=1)
colnames(gdptotal) <- c('Country','Year','gdptotal')
# -------------------------data merge----------------------------- #
meanschooling_inequalityincome = merge(x=meanschooling, y=inequalityincome, by=c('Year','Country'), all=FALSE)
meanschooling_inequalityincome <- meanschooling_inequalityincome[complete.cases(meanschooling_inequalityincome),]
meanschooling_inequalityincome$meanschooling <- as.numeric(meanschooling_inequalityincome$meanschooling)
meanschooling_inequalityincome$inequalityincome <- as.numeric(meanschooling_inequalityincome$inequalityincome)
meanschooling_inequalityeducation = merge(x=meanschooling, y=inequalityeducation, by=c('Year','Country'), all=FALSE)
meanschooling_inequalityeducation <- meanschooling_inequalityeducation[complete.cases(meanschooling_inequalityeducation),]
meanschooling_inequalityeducation$meanschooling <- as.numeric(meanschooling_inequalityeducation$meanschooling)
meanschooling_inequalityeducation$inequalityeducation <- as.numeric(meanschooling_inequalityeducation$inequalityeducation)
meanschooling_internetusers = merge(x=meanschooling, y=internetusers, by=c('Year','Country'), all=FALSE)
meanschooling_internetusers <- meanschooling_internetusers[complete.cases(meanschooling_internetusers),]
meanschooling_internetusers$meanschooling <- as.numeric(meanschooling_internetusers$meanschooling)
meanschooling_internetusers$internetusers <- as.numeric(meanschooling_internetusers$internetusers)
meanschooling_mobilephone = merge(x=meanschooling, y=mobilephone, by=c('Year','Country'), all=FALSE)
meanschooling_mobilephone <- meanschooling_mobilephone[complete.cases(meanschooling_mobilephone),]
meanschooling_mobilephone$meanschooling <- as.numeric(meanschooling_mobilephone$meanschooling)
meanschooling_mobilephone$mobilephone <- as.numeric(meanschooling_mobilephone$mobilephone)
meanschooling_unemploymentrate = merge(x=meanschooling, y=unemploymentrate, by=c('Year','Country'), all=FALSE)
meanschooling_unemploymentrate <- meanschooling_unemploymentrate[complete.cases(meanschooling_unemploymentrate),]
meanschooling_unemploymentrate$meanschooling <- as.numeric(meanschooling_unemploymentrate$meanschooling)
meanschooling_unemploymentrate$unemploymentrate <- as.numeric(meanschooling_unemploymentrate$unemploymentrate)
meanschooling_expenditureonedu = merge(x=meanschooling, y=expenditureonedu, by=c('Year','Country'), all=FALSE)
meanschooling_expenditureonedu <- meanschooling_expenditureonedu[complete.cases(meanschooling_expenditureonedu),]
meanschooling_expenditureonedu$meanschooling <- as.numeric(meanschooling_expenditureonedu$meanschooling)
meanschooling_expenditureonedu$expenditureonedu <- as.numeric(meanschooling_expenditureonedu$expenditureonedu)
meanschooling_gdptotal = merge(x=meanschooling, y=gdptotal, by=c('Year','Country'), all=FALSE)
meanschooling_gdptotal <- meanschooling_gdptotal[complete.cases(meanschooling_gdptotal),]
meanschooling_gdptotal$meanschooling <- as.numeric(meanschooling_gdptotal$meanschooling)
meanschooling_gdptotal$gdptotal <- as.numeric(meanschooling_gdptotal$gdptotal)
# -------------------------time series regression----------------------------- #
model = felm(meanschooling_inequalityincome$meanschooling ~ meanschooling_inequalityincome$inequalityincome + G(meanschooling_inequalityincome$Year) + G(meanschooling_inequalityincome$Country))
summary(model)
model = felm(meanschooling_inequalityeducation$meanschooling ~ meanschooling_inequalityeducation$inequalityeducation + G(meanschooling_inequalityeducation$Year) + G(meanschooling_inequalityeducation$Country))
summary(model)
model = felm(meanschooling_internetusers$meanschooling ~ meanschooling_internetusers$internetusers + G(meanschooling_internetusers$Year) + G(meanschooling_internetusers$Country))
summary(model)
model = felm(meanschooling_mobilephone$meanschooling ~ meanschooling_mobilephone$mobilephone + G(meanschooling_mobilephone$Year) + G(meanschooling_mobilephone$Country))
summary(model)
model = felm(meanschooling_unemploymentrate$meanschooling ~ meanschooling_unemploymentrate$unemploymentrate + G(meanschooling_unemploymentrate$Year) + G(meanschooling_unemploymentrate$Country))
summary(model)
model = felm(meanschooling_expenditureonedu$meanschooling ~ meanschooling_expenditureonedu$expenditureonedu + G(meanschooling_expenditureonedu$Year) + G(meanschooling_expenditureonedu$Country))
summary(model)
model = felm(meanschooling_gdptotal$meanschooling ~ meanschooling_gdptotal$gdptotal + G(meanschooling_gdptotal$Year) + G(meanschooling_gdptotal$Country))
summary(model)
#model = lm(meanschooling_gdptotal$meanschooling ~ meanschooling_gdptotal$gdptotal)
#summary(model)
|
8e4b834d8ab2ccd3de0021f909a203f16c728994 | 7fab620b05791ba4c3cdbd55a1ebba30cc3aedce | /plot1.R | 721734014748d09cf3cbcebd7d078f16be467a8d | [] | no_license | datasciencecg/ExData_Plotting1 | edac36df8e33b28251bbb331565ae0aeb3ce2629 | db61be483c556e82255b931d7e431c33b0ff5c01 | refs/heads/master | 2020-04-01T21:52:44.248742 | 2015-01-11T19:53:21 | 2015-01-11T19:53:21 | 29,093,799 | 0 | 0 | null | 2015-01-11T14:12:49 | 2015-01-11T14:12:49 | null | UTF-8 | R | false | false | 570 | r | plot1.R | temp <- tempfile()
fileUrl <- "http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, temp, mode="wb")
unzip(temp)
data <- read.csv("household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors=FALSE, na.strings = "?")
data <- subset(data,Date=="1/2/2007" | Date=="2/2/2007")
data[,1] <- as.Date(data[,1], "%d/%m/%Y")
png(filename = "plot1.png", width = 480, height = 480)
hist(data$Global_active_power, col = "red", main = "Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
|
68d71442ed2b075ac2d00489c80c71ed9dca70d9 | 6f9bf76b4b15278c8852d57309c9aa9ebcb0849e | /R/Backtrajectory.r | 7ab8a8f06dbf59ae7f1ca22a018e0addfc28742c | [] | no_license | songnku/COVID-19-AQ | 7d82aad2d43c0671513635b841515c098579641b | ebfbc16db532e65d7762d65b86ce302344bd6bfa | refs/heads/master | 2023-06-23T09:01:04.975371 | 2021-07-15T11:21:40 | 2021-07-15T11:21:40 | 277,182,517 | 8 | 4 | null | null | null | null | UTF-8 | R | false | false | 6,460 | r | Backtrajectory.r | library (openair)
library(lubridate)
library(latticeExtra)
library(ggplot2)
# require(devtools)
# install_github('davidcarslaw/worldmet')
library(worldmet) ## download_met_data
library(mapdata)
dataDir="D:\\Hysplit"
setwd(dataDir) ### Set the working directory
workingDirectory<<-dataDir ### Shortcut for the working directory
getMet <- function (year = 2013:2020, month = 1:12, path_met = "D:\\Hysplit\\TrajData\\") {
for (i in seq_along(year)) {
for (j in seq_along(month)) {
download.file(url = paste0("ftp://arlftp.arlhq.noaa.gov/archives/reanalysis/RP",
year[i], sprintf("%02d", month[j]), ".gbl"),
destfile = paste0(path_met, "RP", year[i],
sprintf("%02d", month[j]), ".gbl"), mode = "wb")}}}
getMet(year = 2020:2020, month = 1:10) ### GET data for sepecific time
library (openair)
library(lubridate)
library(latticeExtra)
library(ggplot2)
# require(devtools)
# install_github('davidcarslaw/worldmet')
library(worldmet) ## download_met_data
#library(mapdata)
dataDir="D:\\OneDrive - mail.nankai.edu.cn\\Hysplit"
setwd(dataDir) ### Set the working directory
workingDirectory<<-dataDir ### Shortcut for the working directory
hy.path<-"c:\\hysplit4\\" ### Install the Hysplit model into the computer
read.files <- function(hours = 96, hy.path) {
## find tdump files
files <- Sys.glob("tdump*")
output <- file('Rcombined.txt', 'w')
## read through them all, ignoring 1st 7 lines
for (i in files){
input <- readLines(i)
input <- input[-c(1:7)] # delete header
writeLines(input, output)
}
close(output)
traj <- read.table(paste0(hy.path, "working\\Rcombined.txt"), header = FALSE)
traj <- subset(traj, select = -c(V2, V7, V8))
traj <- rename(traj, c(V1 = "receptor", V3 = "year", V4 = "month", V5 = "day",
V6 = "hour", V9 = "hour.inc", V10 = "lat", V11 = "lon",
V12 = "height", V13 = "pressure"))
## hysplit uses 2-digit years ...
year <- traj$year[1]
if (year < 50) traj$year <- traj$year + 2000 else traj$year <- traj$year + 1900
traj$date2 <- with(traj, ISOdatetime(year, month, day, hour, min = 0, sec = 0,
tz = "GMT"))
## arrival time
traj$date <- traj$date2 - 3600 * traj$hour.inc
traj
}
add.met <- function(month, Year, met, bat.file) {
## if month is one, need previous year and month = 12
if (month == 0) {
month <- 12
Year <- as.numeric(Year) - 1
}
if (month < 10) month <- paste("0", month, sep = "")
## add first line
write.table(paste("echo", met, " >>CONTROL"),
bat.file, col.names = FALSE,
row.names = FALSE, quote = FALSE, append = TRUE)
x <- paste("echo RP", Year, month, ".gbl >>CONTROL", sep = "")
write.table(x, bat.file, col.names = FALSE,
row.names = FALSE, quote = FALSE, append = TRUE)
}
procTraj <- function(lat = 48.9, lon = 2.2, year = 2019, name = "paris",
met = "D:\\COVID-19\\Hysplit\\TrajData\\", out = "D:\\COVID-19\\Hysplit\\TrajProc\\",
hours = 24, height = 100, hy.path = "C:\\hysplit4\\") {
## hours is the back trajectory time e.g. 96 = 4-day back trajectory
## height is start height (m)
lapply(c("openair", "plyr", "reshape2"), require, character.only = TRUE)
## function to run 12 months of trajectories
## assumes 96 hour back trajectories, 1 receptor
setwd(paste0(hy.path, "working\\"))
## remove existing "tdump" files
path.files <- paste0(hy.path, "working\\")
bat.file <- paste0(hy.path, "working\\test.bat") ## name of BAT file to add to/run
files <- list.files(path = path.files, pattern = "tdump")
lapply(files, function(x) file.remove(x))
start <- paste(year, "-01-01", sep = "")
end <- paste(year, "-12-31 23:00", sep = "")
dates <- seq(as.POSIXct(start, "GMT"), as.POSIXct(end, "GMT"), by = "1 hour")
for (i in 1:length(dates)) {
year <- format(dates[i], "%y")
Year <- format(dates[i], "%Y") # long format
month <- format(dates[i], "%m")
day <- format(dates[i], "%d")
hour <- format(dates[i], "%H")
x <- paste("echo", year, month, day, hour, " >CONTROL")
write.table(x, bat.file, col.names = FALSE,
row.names = FALSE, quote = FALSE)
x <- "echo 1 >>CONTROL"
write.table(x, bat.file, col.names = FALSE,
row.names = FALSE, quote = FALSE, append = TRUE)
x <- paste("echo", lat, lon, height, " >>CONTROL")
write.table(x, bat.file, col.names = FALSE,
row.names = FALSE, quote = FALSE, append = TRUE)
x <- paste("echo ", "-", hours, " >>CONTROL", sep = "")
write.table(x, bat.file, col.names = FALSE,
row.names = FALSE, quote = FALSE, append = TRUE)
x <- "echo 0 >>CONTROL
echo 10000.0 >>CONTROL
echo 3 >>CONTROL"
write.table(x, bat.file, col.names = FALSE,
row.names = FALSE, quote = FALSE, append = TRUE)
## processing always assumes 3 months of met for consistent tdump files
months <- as.numeric(unique(format(dates[i], "%m")))
months <- c(months, months + 1:2)
months <- months - 1 ## to make sure we get the start of the previous year
months <- months[months <= 12]
if (length(months) == 2) months <- c(min(months) - 1, months)
for (i in 1:3)
add.met(months[i], year, met, bat.file)
x <- "echo ./ >>CONTROL"
write.table(x, bat.file, col.names = FALSE,
row.names = FALSE, quote = FALSE, append = TRUE)
x <- paste("echo tdump", year, month, day, hour, " >>CONTROL", sep = "")
write.table(x, bat.file, col.names = FALSE,
row.names = FALSE, quote = FALSE, append = TRUE)
x <- "C:\\hysplit4\\exec\\hyts_std"
write.table(x, bat.file, col.names = FALSE,
row.names = FALSE, quote = FALSE, append = TRUE)
## run the file
system(paste0(hy.path, 'working\\test.bat'))
}
## combine files and make data frame
traj <- read.files(hours, hy.path)
## write R object to file
file.name <- paste(out, name, Year, ".RData", sep = "")
save(traj, file = file.name)
}
for (i in 2009:2020) { ### Change the date in
procTraj(lat = 51.5, lon = -0.2, year = i,
name = "London", hours = 72,height = 100,
met = "D:\\Hysplit\\TrajData\\", out = "D:\\Hysplit\\TrajProc\\",
hy.path = "C:\\hysplit4\\") }
|
f744b4dd163e2692ec6fa385ab73ea44468bef9c | 4b5035e6a1c7f31c66e0641c8ff9ce3ad8e8421d | /R/plotting.R | 154dab2a566fbf91d8dd8c55d6b9ce3bf76566ed | [] | no_license | alanrupp/rnaseq | 6a398eb474fdf5da2c825b781542191b31af3626 | ed440183dd2c582d3c49b01434cc370b0a897dbf | refs/heads/master | 2021-06-26T08:59:10.595555 | 2021-02-24T20:38:09 | 2021-02-24T20:38:09 | 210,405,308 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,463 | r | plotting.R | library(tidyverse)
library(ggrepel)
library(wesanderson)
library(ggdendro)
# - Read depth ----------------------------------------------------------------
# ENCODE suggests read depth > 30M
read_depth <- function(counts) {
depth <- colSums(select_if(counts, is.numeric))
return(depth)
}
# - Plot count data -----------------------------------------------------------
counts_plot <- function(counts, gene_names, info, samples = NULL,
group = NULL, color = NULL, shape = NULL, dodge = NULL,
pair = FALSE, cpm = TRUE, n_col = NULL) {
# subset samples
if (!is.null(samples)) { counts <- select(counts, gene_id, samples) }
# make CPM
if (cpm) { df <- make_cpm(counts) } else { df <- counts }
# add metadata
df <- left_join(df, genes, by = "gene_id") %>%
filter(gene_name %in% gene_names) %>%
select(-gene_id, -gene_biotype) %>%
gather(-gene_name, key = "Sample_ID", value = "expr") %>%
left_join(., info, by = "Sample_ID")
# plot
p <- ggplot(df, aes(y = expr)) +
scale_y_continuous(trans = "log2") +
theme_classic() +
ylab("Expression (CPM)") +
xlab(NULL)
# custom color aesthetic
if (!is.null(dodge)) {
p <- p +
geom_boxplot(aes(x = !!sym(dodge))) +
geom_jitter(aes(x = !!sym(dodge)), height = 0, width = 0.2)
} else {
p <- p +
geom_boxplot(aes(x = '')) +
geom_jitter(aes(x = ''), height = 0, width = 0.2)
}
if (!is.null(color)) {
p <- p + geom_jitter(aes(color = !!sym(color)))
}
# facet for multiple genes
if (length(gene_names) > 1) {
p <- p + facet_wrap(~gene_name, ncol = n_col, scales = "free_y")
}
return(p)
}
# - Volcano plot --------------------------------------------------------------
volcano_plot <- function(results, label = NULL, xmax = NULL, ymax = NULL) {
# set up axis scales
if (is.null(xmax)) {
if (sum(results$padj < 0.05, na.rm = TRUE) > 0) {
xmax <- max(abs(filter(results, padj < 0.05)$log2FoldChange),
na.rm = TRUE)
} else {
xmax <- max(abs(results$log2FoldChange), na.rm = TRUE)
}
}
if (is.null(ymax)) {
pvals <- -log10(results$padj)
pvals <- pvals[is.finite(pvals)]
ymax <- max(pvals)
}
# remove NA values from plot
results <- filter(results, !is.na(padj))
# make plot
p <-
ggplot(results,
aes(x = log2FoldChange, y = -log10(padj), color = padj < 0.05)) +
geom_hline(aes(yintercept = -log10(0.05)), linetype = "dashed") +
geom_vline(aes(xintercept = 0)) +
geom_vline(aes(xintercept = log2(1.5)), linetype = "dashed") +
geom_vline(aes(xintercept = -log2(1.5)), linetype = "dashed") +
geom_point(show.legend = FALSE, stroke = 0, alpha = 0.4) +
scale_color_manual(values = c("gray", "firebrick3")) +
theme_bw() +
theme(panel.grid = element_blank()) +
labs(x = expression("Fold change (log"[2]*")"),
y = expression(italic("P")*" value (-log"[10]*")")) +
scale_x_continuous(expand = c(0, 0), limits = c(-xmax, xmax)) +
scale_y_continuous(expand = c(0, 0), limits = c(0, ymax))
# add gene labels
if (!is.null(label)) {
df <- left_join(results, genes, by = "gene_id") %>%
mutate(gene_name = ifelse(is.na(gene_name), gene_id, gene_name)) %>%
filter(gene_name %in% label)
p <- p + geom_text_repel(data = df, aes(label = gene_name),
show.legend = FALSE)
}
return(p)
}
# - Spectral color palette ----------------------------------------------------
# from colorlover python package
make_spectral <- function(n = 100) {
colors <- c(rgb(158/255, 1/255, 66/255, 1),
rgb(213/255, 62/255, 79/255, 1),
rgb(244/255, 109/255, 67/255, 1),
rgb(253/255, 174/255, 97/255, 1),
rgb(254/255, 224/255, 139/255, 1),
rgb(255/255, 255/255, 191/255, 1),
rgb(230/255, 245/255, 152/255, 1),
rgb(171/255, 221/255, 164/255, 1),
rgb(102/255, 194/255, 165/255, 1),
rgb(50/255, 136/255, 189/255, 1),
rgb(94/255, 79/255, 162/255, 1))
colorRampPalette(colors)(n)
}
# - Heatmap plot --------------------------------------------------------------
heatmap_plot <- function(counts, gene_ids = NULL,
info = NULL, annotation = NULL,
max_cpm = 10, make_cpm = TRUE,
label_genes = FALSE,
cluster_genes = TRUE, cluster_samples = TRUE,
draw_tree = FALSE,
color_palette = make_spectral(),
tree_scaling = 1) {
if (make_cpm) { counts <- make_cpm(counts, log2 = TRUE) }
if (!is.null(gene_ids)) {
counts <- filter(counts, gene_id %in% gene_ids)
} else {
gene_ids <- unique(counts$gene_id)
}
# make tidy
df <- gather(counts, -gene_id, key = "Sample_ID", value = "counts")
# cluster genes and samples
if (cluster_genes) {
gene_clust <- counts %>% as.data.frame() %>%
column_to_rownames("gene_id") %>% dist() %>% hclust()
gene_levels <- gene_clust$labels[gene_clust$order]
} else {
gene_levels <- gene_ids
}
if (cluster_samples) {
sample_clust <- hclust(dist(t(counts[, 2:ncol(counts)])))
sample_levels <- sample_clust$labels[sample_clust$order]
} else {
sample_levels <- colnames(counts)[colnames(counts) != "gene_id"]
}
df <- df %>%
mutate(gene_id = factor(gene_id, levels = gene_levels),
Sample_ID = factor(Sample_ID, levels = sample_levels))
# clip max at given max value
df <- mutate(df, counts = ifelse(counts > max_cpm, max_cpm, counts))
# plot
plt <- ggplot(df, aes(x = Sample_ID, y = as.numeric(gene_id), fill = counts)) +
geom_tile() +
xlab(NULL) + ylab(NULL) +
scale_y_continuous(expand = c(0, 0)) +
theme_void() +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5,
color = "black"),
axis.text.y = element_blank())
if (label_genes) {
plt <- plt + theme(axis.text.y = element_text(color = "black"))
}
if (!is.null(color_palette)) {
plt <- plt +
scale_fill_gradientn(colors = color_palette,
name = expression(underline("CPM\n(log"[2]*")")))
}
# add annotation
if (!is.null(info)) {
anno <- left_join(df, info, by = "Sample_ID")
anno <- anno %>% mutate(
Sample_ID = factor(Sample_ID, levels = sample_levels)
) %>% as.data.frame()
}
# add annotation (optional)
add_annotation_rect <- function(i, j) {
annotate("rect",
xmin = which(levels(anno$Sample_ID) == samples[j]) - 0.5,
xmax = which(levels(anno$Sample_ID) == samples[j]) + 0.5,
ymin = max_yval + block_size * (i - 1),
ymax = max_yval + block_size * i,
fill = colors[anno[anno$Sample_ID == samples[j], annotation[i]]]
)
}
add_annotation_text <- function(i) {
annotate("label", x = length(samples)/2 + 0.5,
y = max_yval + block_size * (i - 0.5),
label = annotation[i], hjust = 0.5, color = "gray90",
fill = "gray10", alpha = 0.5, label.size = NA)
}
if (!is.null(info) & !is.null(annotation)) {
max_yval <- length(levels(df$gene_id))
block_size <- max_yval * 0.05
samples <- unique(df$Sample_ID)
brewer_palettes <- c("Accent", "Dark2", "Paired", "Pastel1", "Pastel2",
"Set1", "Set2", "Set3")
palettes <- base::sample(brewer_palettes, length(annotation))
for (i in 1:length(annotation)) {
classes <- unique(anno[, annotation[i]])
colors <- RColorBrewer::brewer.pal(n = length(classes),
name = palettes[i])
names(colors) <- classes
for (j in 1:length(samples)) {
plt <- plt + add_annotation_rect(i, j)
}
plt <- plt + add_annotation_text(i)
}
} else if (!is.null(annotation) & is.null(info)) {
message("Cannot add annotation to a plot without an info object")
} else {
block_size <- 0
}
# return plot
if (draw_tree) {
# draw tree
sticks <- dendro_data(sample_clust)$segments
scaling <- length(gene_ids) * 0.1
for (i in 1:nrow(sticks)) {
plt <- plt +
annotate("segment",
x = sticks[i, "x"], xend = sticks[i, "xend"],
y = (sticks[i, "y"] * tree_scaling) +
(block_size * length(annotation)) +
length(gene_ids) + 0.5,
yend = (sticks[i, "yend"] * tree_scaling) +
(block_size * length(annotation)) +
length(gene_ids) + 0.5)
}
}
return(plt)
}
# - TSNE ----------------------------------------------------------------------
tsne_plot <- function(pca, pcs, info = NULL, sample_name = "Sample_ID",
color = NULL, shape = NULL, text = NULL) {
# run TSNE from PCA
set.seed(32)
tsne <- Rtsne::Rtsne(pca$x[, pcs], perplexity = (nrow(pca$x) - 1) / 3)
coord <- tsne$Y %>%
as.data.frame() %>%
mutate(Sample_ID = rownames(pca$x)) %>%
set_names("TSNE1", "TSNE2", sample_name)
if (!is.null(info)) coord <- left_join(coord, info, by = sample_name)
# plot
plt <- ggplot(coord, aes(x = TSNE1, y = TSNE2)) +
theme_bw() +
theme(panel.grid = element_blank())
plt <- plt + geom_point()
if (!is.null(color)) plt <- plt %+% geom_point(aes(color = !!sym(color)))
return(plt)
}
# - Correlation ---------------------------------------------------------------
# ENCODE suggests correlation of replicates should have Spearman > 0.9
correlation_plot <- function(counts, genes = NULL, info = NULL,
annotation = NULL, threshold = NULL,
cluster_samples = TRUE,
draw_tree = FALSE) {
# grab data
corr <- correlation(counts, genes)
# cluster genes and samples
if (cluster_samples) {
sample_clust <- corr %>%
spread(key = "Sample_B", value = "corr") %>%
as.data.frame() %>%
column_to_rownames("Sample_A") %>%
dist() %>%
hclust()
corr <- corr %>% mutate(
Sample_A = factor(Sample_A, levels = sample_clust$labels[sample_clust$order]),
Sample_B = factor(Sample_B, levels = sample_clust$labels[sample_clust$order])
)
}
# plot
plt <- ggplot(corr, aes(x = Sample_A, y = Sample_B)) +
theme_minimal() +
theme(axis.text.x = element_text(angle = 90, hjust = 0, vjust = 1),
axis.title = element_blank(),
plot.background = element_blank(),
panel.background = element_blank())
if (is.null(threshold)) {
plt <- plt + geom_tile(aes(fill = corr)) +
scale_fill_gradient2(low = "#ca562c", mid = "#f6edbd", high = "#008080",
midpoint = 0.9,
name = expression(underline("Correlation")))
} else {
label <- paste("Correlation >", threshold)
plt <- plt + geom_tile(aes(fill = corr > threshold)) +
scale_fill_manual(values = c("#273046", "#FAD510"),
name = substitute(underline(label))
)
}
# add annotation
if (!is.null(info)) {
anno <- left_join(corr, info, by = c("Sample_A" = "Sample_ID"))
anno <- anno %>% mutate(
Sample_A = factor(Sample_A, levels = sample_clust$labels[sample_clust$order])
)
}
# add annotation (optional)
add_annotation_rect <- function(i, j) {
annotate("rect",
xmin = which(levels(anno$Sample_A) == samples[j]) - 0.5,
xmax = which(levels(anno$Sample_A) == samples[j]) + 0.5,
ymin = max_yval + i - 0.5,
ymax = max_yval + i + 0.5,
fill = colors[anno[anno$Sample_A == samples[j], annotation[i]]]
)
}
add_annotation_text <- function(i) {
annotate("label", x = length(samples)/2 + 0.5, y = max_yval + i,
label = annotation[i], hjust = 0.5, color = "gray90",
fill = "gray10", alpha = 0.5, label.size = NA)
}
if (!is.null(info) & !is.null(annotation)) {
max_yval <- length(unique(corr$Sample_B))
samples <- unique(corr$Sample_A)
brewer_palettes <- c("Accent", "Dark2", "Paired", "Pastel1", "Pastel2",
"Set1", "Set2", "Set3")
palettes <- base::sample(brewer_palettes, length(annotation))
for (i in 1:length(annotation)) {
classes <- unique(anno[,annotation[i]])
colors <- RColorBrewer::brewer.pal(n = length(classes),
name = palettes[i])
names(colors) <- classes
for (j in 1:length(samples)) {
plt <- plt + add_annotation_rect(i, j)
}
plt <- plt + add_annotation_text(i)
}
} else if (!is.null(annotation) & is.null(info)) {
message("Cannot add annotation to a plot without an info object")
}
# return plot
if (draw_tree) {
n_samples <- length(sample_clust$labels)
tree_plot <- ggdendrogram(dendro_data(sample_clust)) +
theme_void() +
theme(plot.margin = unit(c(0, 1.1-(0.008*n_samples),
0, 0.2-(0.008*n_samples)), "in"))
return(cowplot::plot_grid(plotlist = list(tree_plot, plt), ncol = 1,
rel_heights = c(0.2, 0.8))
)
} else {
return(plt)
}
}
# - Expression vs. enrichment plot --------------------------------------------
ma_plot <- function(results, cpm, label = NULL,
ymax = NULL, ylim = NULL,
cpm_subset = NULL,
plot_genes = NULL) {
if (!is.null(cpm_subset)) {
cpm <- cpm[, c("gene_id", cpm_subset)]
}
if (!is.null(plot_genes)) {
cpm <- filter(cpm, gene_id %in% filter(genes, gene_name %in% plot_genes)$gene_id)
}
# combine enrichment and expression data
cpm <- data.frame("gene_id" = cpm$gene_id,
"expr" = rowMeans(select(cpm, -gene_id))
)
df <- left_join(results, cpm, by = "gene_id") %>% filter(!is.na(padj))
# set axis limits
if (is.null(ymax)) {
if (sum(df$padj < 0.05) > 0) {
ymax <- max(abs(filter(df, padj < 0.05)$log2FoldChange))
} else {
ymax <- max(abs(df$log2FoldChange))
}
}
if (is.null(ylim)) {
ylim <- c(1, 6000)
}
# plot
p <- ggplot(df, aes(x = expr, y = log2FoldChange, color = padj < 0.05)) +
geom_hline(aes(yintercept = -log2(1.5)), linetype = "dashed") +
geom_hline(aes(yintercept = log2(1.5)), linetype = "dashed") +
geom_point(alpha = 0.4, stroke = 0, show.legend = FALSE) +
scale_x_continuous(breaks = c(1, 16, 256, 4096), limits = ylim,
expand = c(0, 0), trans = "log2") +
scale_y_continuous(limits = c(-ymax, ymax), expand = c(0, 0)) +
scale_color_manual(values = c("gray50", "firebrick3")) +
geom_hline(aes(yintercept = 0)) +
theme_bw() +
labs(y = expression("Fold change (log"[2]*")"),
x = "Expression (FPM)") +
theme(panel.grid = element_blank())
# label genes
if (!is.null(label)) {
df <- left_join(df, genes, by = "gene_id") %>%
mutate(gene_name = ifelse(is.na(gene_name), gene_id, gene_name)) %>%
filter(gene_name %in% label)
p <- p + geom_text_repel(data = df, aes(label = gene_name),
show.legend = FALSE)
}
return(p)
}
|
c1345a4d5208dfdfb2955cf278c26e6e9dfae655 | c51ca43d4e5be4cce45281acd89f2bb3c0294ea2 | /binomialConfidenceIntervalStrip.R | 4c2b4e40046c536bde2cd71f8e2c7f3902aa2e47 | [] | no_license | professorbeautiful/bioinf2118 | e9d6adf2c8b596a1fab239301e511999e0d9f401 | aa70f61333119372680fb66d34be350aa9947c39 | refs/heads/master | 2022-07-19T02:22:20.667851 | 2022-06-27T20:35:11 | 2022-06-27T20:35:11 | 116,890,225 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,021 | r | binomialConfidenceIntervalStrip.R | nFlips = 10
thVec = seq(0,1,length=1000)
sampleSpace = 0:nFlips
plot(sampleSpace, sampleSpace/nFlips, xlab="# heads", ylab="theta")
alpha = 0.05
title(expression( paste("Set product: ", bolditalic(X) ~~ X ~~ Phi ) ))
for(theta in thVec) {
upperTailProb = 1-pbinom(q = (0:nFlips) - 1, size = nFlips, prob = theta)
valuesTooBig = (upperTailProb < alpha/2)
points(sampleSpace, rep(theta, nFlips+1), pch=valuesTooBig+1, cex=valuesTooBig)
lowerTailProb = pbinom(q = (0:nFlips), size = nFlips, prob = theta)
valuesTooSmall = (lowerTailProb < alpha/2)
points(sampleSpace, rep(theta, nFlips+1), pch=valuesTooSmall+1, cex=valuesTooSmall)
acceptanceRegion = c(max(0, sampleSpace[valuesTooSmall]),
min(nFlips,sampleSpace[valuesTooBig]))
lines(acceptanceRegion, rep(theta, 2), col='yellow')
}
for(k in 0:nFlips)
lines(x = c(k,k), y=binom.confint.new(k, nFlips), col="lightgreen", lwd=2)
nHeads = 8
lines(x = c(nHeads,nHeads), y=binom.confint.new(nHeads, nFlips), col="green", lwd=4)
|
63ccf1cca23df7fce2609753f10d42ab628f180a | f32dbf645fa99d7348210951818da2275f9c3602 | /man/GETARAIC.Rd | 8527e01e945414f27ab37b4fa694c711a8b02c84 | [] | no_license | cran/RSEIS | 68f9b760cde47cb5dc40f52c71f302cf43c56286 | 877a512c8d450ab381de51bbb405da4507e19227 | refs/heads/master | 2023-08-25T02:13:28.165769 | 2023-08-19T12:32:32 | 2023-08-19T14:30:39 | 17,713,884 | 2 | 4 | null | null | null | null | UTF-8 | R | false | false | 1,068 | rd | GETARAIC.Rd |
\name{GETARAIC}
\alias{GETARAIC}
\title{Auto-Regressive AIC estimate of arrival time}
\description{
Auto-Regressive AIC for arrival estimate, signal detection
}
\usage{
GETARAIC(z4, DT = 0.008, Mar = 8, O1 = 2, O2 = 0.2, WW = 2, T1 = 1, PLOT = FALSE)
}
\arguments{
\item{z4}{signal time series}
\item{DT}{sample rate,s}
\item{Mar}{AR Model Order}
\item{O1}{window before, s}
\item{O2}{window after, s}
\item{WW}{window length, s}
\item{T1}{initial guess, number of samples from beginning of trace}
\item{PLOT}{logical, TRUE =plot}
}
\details{
Method of Sleeman for automatic phase determination.
}
\value{
\item{Taic}{Arrival time of wave}
}
\references{Sleeman}
\author{Jonathan M. Lees<jonathan.lees.edu>}
\seealso{PSTLTcurve}
\examples{
data(CE1)
plot(CE1$x, CE1$y, type='l')
Xamp = CE1$y[CE1$x>4.443754 & CE1$x<6.615951]
Mar=8
z4 = Xamp
DT = CE1$dt
T1 = 50
O1 = 10*DT
O2 = 10*DT
WW = 10*DT
Nz4 = length(z4)
araict = GETARAIC(Xamp, DT=CE1$dt, Mar=8, T1=T1, O1=O1, O2=O2, WW=WW, PLOT=TRUE)
}
\keyword{misc}
\keyword{hplot}
|
f37ce4a0cf93d2fb8a048ecc64bd970d595ea5a9 | 787a1e3d204941edbb9b7f1e8aab0646acb4c810 | /vignettes/titan2-intro.R | e444c6d4694493b6792053e64ca37a6214cf3bf5 | [] | no_license | dkahle/TITAN2 | 34088052b5e96efa8994883eeaf3290dc00acac6 | 142f4fdade5b295cd4687edc907a651b6aa55568 | refs/heads/master | 2021-03-27T20:29:59.582472 | 2020-12-07T15:31:01 | 2020-12-07T15:31:01 | 54,793,609 | 11 | 4 | null | 2019-05-23T17:13:51 | 2016-03-26T18:36:11 | R | UTF-8 | R | false | false | 5,246 | r | titan2-intro.R | ## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo=TRUE, collapse=TRUE, error=TRUE, comment = "#")
## -----------------------------------------------------------------------------
library("TITAN2")
## -----------------------------------------------------------------------------
data(glades.taxa)
str(glades.taxa, list.len = 5)
## -----------------------------------------------------------------------------
data(glades.env)
str(glades.env)
## ---- eval = FALSE------------------------------------------------------------
# glades.titan <- titan(glades.env, glades.taxa)
## ---- eval = FALSE------------------------------------------------------------
# glades.titan <- titan(glades.env, glades.taxa,
# minSplt = 5, numPerm = 250, boot = TRUE, nBoot = 500, imax = FALSE,
# ivTot = FALSE, pur.cut = 0.95, rel.cut = 0.95, ncpus = 1, memory = FALSE
# )
## -----------------------------------------------------------------------------
data(glades.titan)
str(glades.titan, 1)
## ---- echo = FALSE------------------------------------------------------------
message("100% occurrence detected 1 times (0.8% of taxa), use of TITAN less than ideal for this data type")
message("Taxa frequency screen complete")
## ---- echo = FALSE------------------------------------------------------------
message("Determining partitions along gradient")
message("Calculating observed IndVal maxima and class values")
message("Calculating IndVals using mean relative abundance")
message("Permuting IndVal scores")
message("IndVal $z$ score calculation complete")
message("Summarizing Observed Results")
message("Estimating taxa change points using z-score maxima")
## ---- echo = FALSE------------------------------------------------------------
message("Bootstrap resampling in sequence...")
message(1*1)
message(2*1)
message(3*1)
## ---- echo = FALSE------------------------------------------------------------
message("Bootstrap resampling in parallel using 2 CPUs...no index will be printed to screen")
## -----------------------------------------------------------------------------
glades.titan$sumz.cp
## -----------------------------------------------------------------------------
head(glades.titan$sppmax)
## -----------------------------------------------------------------------------
str(glades.titan, max.level = 1, give.attr = FALSE)
## ---- fig.height = 6,fig.width = 8--------------------------------------------
plot_sumz_density(glades.titan)
## ---- fig.height = 6,fig.width = 8--------------------------------------------
plot_sumz_density(glades.titan, ribbon = FALSE, points = TRUE)
## ---- fig.height = 6,fig.width = 8--------------------------------------------
plot_sumz_density(glades.titan,
ribbon = TRUE, points = FALSE, sumz1 = FALSE, change_points = FALSE,
xlabel = expression(paste("Surface Water Total Phosphorus ("*mu*"g/l)"))
)
## ---- fig.height = 6,fig.width = 8--------------------------------------------
plot_sumz(glades.titan, filter = TRUE)
## ---- fig.height = 10,fig.width = 8-------------------------------------------
plot_taxa_ridges(glades.titan, axis.text.y = 8)
## ---- fig.height = 10,fig.width = 8-------------------------------------------
plot_taxa_ridges(glades.titan,
xlabel = expression(paste("Surface water total phosphorus ("*mu*"g/l)")),
n_ytaxa = 50
)
## ---- fig.height = 6,fig.width = 8--------------------------------------------
plot_taxa_ridges(glades.titan,
xlabel = expression(paste("Surface water total phosphorus ("*mu*"g/l)")),
z2 = FALSE
)
## ---- fig.height = 6,fig.width = 8--------------------------------------------
plot_taxa_ridges(glades.titan,
xlabel = expression(paste("Surface water total phosphorus ("*mu*"g/l)")),
z2 = FALSE, grid = FALSE
)
## ---- fig.height = 8,fig.width = 8--------------------------------------------
plot_taxa(glades.titan, xlabel = "Surface Water TP (ug/l)")
## ---- fig.height = 8,fig.width = 8--------------------------------------------
plot_taxa(glades.titan, xlabel = "Surface Water TP (ug/l)", z.med = TRUE)
## ---- fig.height = 8,fig.width = 8--------------------------------------------
plot_taxa(glades.titan, xlabel = "Surface Water TP (ug/l)", z.med = FALSE, prob95 = TRUE)
## ---- fig.height = 10, fig.width = 10-----------------------------------------
plot_cps(glades.titan)
## ---- fig.height = 5,fig.width = 8--------------------------------------------
plot_cps(glades.titan, taxaID = "ENALCIVI", xlabel = "Surface Water TP (ug/l)")
## ---- fig.height = 5,fig.width = 8--------------------------------------------
plot_cps(glades.titan, taxaID = "ENALCIVI", cp.trace = TRUE, xlabel = "Surface Water TP (ug/l)")
## ---- fig.height = 5,fig.width = 8--------------------------------------------
plot_cps(glades.titan, taxaID = "OSTRASP5", cp.trace = TRUE, xlabel = "Surface Water TP (ug/l)")
## ---- fig.height = 6,fig.width = 8--------------------------------------------
plot_cps(glades.titan, taxa.dist = FALSE, xlabel = "Surface Water TP (ug/l)")
## ---- fig.height = 6,fig.width = 8--------------------------------------------
plot_cps(glades.titan, taxa.dist = FALSE, xlabel = "Surface Water TP (ug/l)", stacked = TRUE)
|
ca14e13118e9fda328aa70d758f9d29e6308b2ce | 277dbb992966a549176e2b7f526715574b421440 | /R_training/실습제출/신부근/chartExam2.R | b1fd024b5c2b191660345a3205f8e8d186cda453 | [] | no_license | BaeYS-marketing/R | 58bc7f448d7486510218035a3e09d1dd562bca4b | 03b500cb428eded36d7c65bd8b2ee3437a7f5ef1 | refs/heads/master | 2020-12-11T04:30:28.034460 | 2020-01-17T08:47:38 | 2020-01-17T08:47:38 | 227,819,378 | 0 | 0 | null | 2019-12-13T12:06:33 | 2019-12-13T10:56:18 | C++ | UTF-8 | R | false | false | 302 | r | chartExam2.R | mpg <- as.data.frame(ggplot2::mpg)
#1
plot(mpg %>% select(cty,hwy),pch=3,xlab="도시연비", ylab="고속도로연비")
#2
barplot(table(mpg$drv),col=c("red","green","blue"))
#3
boxplot(hwy~manufacturer,data=mpg,las=2,col=heat.colors(5),main="*제조사별 고속도로 연비*",col.main="hotpink")
|
87b4ca7c4a296c93606560055cf489cb74680688 | a2b6ffe232365dee6652e8c705ff44bdc8afd95f | /R/ridge_regression.R | 0e3b8036b19e4b8f238caab363cff9af1e4f0f94 | [
"MIT"
] | permissive | maggiegreco/regress431 | b8bb7cbea9739d55805e197aff2a3aa41ed4a0d9 | cf4006515f7b0af232cdedf491b156c46baf0488 | refs/heads/master | 2022-07-26T21:15:08.058075 | 2020-05-14T01:37:22 | 2020-05-14T01:37:22 | 264,350,489 | 0 | 0 | MIT | 2020-05-16T03:25:42 | 2020-05-16T03:25:42 | null | UTF-8 | R | false | false | 1,626 | r | ridge_regression.R | #' Implements ridge regression with many predictors
#'
#' This function computes coefficients for ridge regression
#' All columns of the provided data frame are used as predictors, except the
#' one specified as a response.
#'
#' No interaction terms are included.
#'
#'
#' @param dat A data frame
#' @param response The name of a response variable in the data frame (unquoted)
#' @param lambda A vector of penalty terms to try
#'
#' @return A data frame of coefficients
#'
#' @import dplyr
#'
#' @export
ridge_regression <- function(dat, response, lambda) {
results <- 0
### This should be a data frame, with columns named
### "Intercept" and the same variable names as dat, and also a column
### called "lambda".
return(results)
}
#' Determines the best penalty term from a set of options
#'
#' This function uses a randomly chosen test and training set
#'
#' No interaction terms are included.
#'
#'
#' @param train_dat A data frame to construct the model from
#' @param test_dat A data frame to test the model on
#' @param response The name of a response variable in the data frame (unquoted)
#' @param lambda A vector of penalty terms to try
#'
#' @return A data frame of penalty terms and resulting errors
#'
#' @import dplyr
#'
#' @export
find_best_lambda <- function(train_dat, test_dat, response, lambdas) {
### lambda_errors should be a data frame with two columns: "lambda" and "error"
### For each lambda, you should record the resulting Sum of Squared error
### (i.e., the predicted value minus the real value squared) from prediction
### on the test dataset.
return(lambda_errors)
}
|
420555be89759bcf39684f4f394c146b14f46078 | 5a492644406411750fde2f78ff9aa3a2030c6297 | /YKL_Upregulated_Radar_Plot.R | a252fd90287239b9a4c9ac93898e17cb39bbd9dc | [] | no_license | linlabcode/olson_YKL5124 | e8155fe3f974cbbbf3bfcbb83af4888d9421a01d | 6f69fa77cff5421001186a5e3b9f27c33d7d74f2 | refs/heads/master | 2020-04-11T09:17:03.248361 | 2018-12-20T17:38:20 | 2018-12-20T17:38:20 | 161,671,952 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,821 | r | YKL_Upregulated_Radar_Plot.R | ### YKL UPREGULATED RADAR PLOT ###
### INSTALLING PACKAGES FOR RADAR PLOTS ###
install.packages('fmsb')
library(fmsb)
### IMPORTING CUFFNORM DATA ###
setwd('/Users/DanPark1116/Desktop/BCM_Materials/Year_Two/CDK7_Analysis/CDK7_GRAY_cuffnorm/log2fc_pvals/')
ykl_dmso_wt_low = read.delim('CDK7_GRAY_HAP1_WT_DMSO_R_vs_HAP1_WT_YKL_LO_R_exprs_matrix.txt', stringsAsFactors = F)
thz531_dmso_wt = read.delim('CDK7_GRAY_HAP1_WT_531_R_vs_HAP1_WT_DMSO_R_exprs_matrix.txt', stringsAsFactors = F)
thz531_dmso_wt$LOG2_FOLD_CHANGE = thz531_dmso_wt$LOG2_FOLD_CHANGE * -1
thz1_dmso_wt = read.delim('CDK7_GRAY_HAP1_WT_DMSO_R_vs_HAP1_WT_THZ1_R_exprs_matrix.txt', stringsAsFactors = F)
combo_dmso_wt_low = read.delim('CDK7_GRAY_HAP1_WT_COMBO_LO_R_vs_HAP1_WT_DMSO_R_exprs_matrix.txt', stringsAsFactors = F)
combo_dmso_wt_low$LOG2_FOLD_CHANGE = combo_dmso_wt_low$LOG2_FOLD_CHANGE * -1
ykl_dmso_mut_low = read.delim('CDK7_GRAY_HAP1_MUT_DMSO_R_vs_HAP1_MUT_YKL_LO_R_exprs_matrix.txt', stringsAsFactors = F)
thz531_dmso_mut = read.delim('CDK7_GRAY_HAP1_MUT_531_R_vs_HAP1_MUT_DMSO_R_exprs_matrix.txt', stringsAsFactors = F)
thz531_dmso_mut$LOG2_FOLD_CHANGE = thz531_dmso_mut$LOG2_FOLD_CHANGE * -1
thz1_dmso_mut = read.delim('CDK7_GRAY_HAP1_MUT_DMSO_R_vs_HAP1_MUT_THZ1_R_exprs_matrix.txt', stringsAsFactors = F)
combo_dmso_mut_low = read.delim('CDK7_GRAY_HAP1_MUT_COMBO_LO_R_vs_HAP1_MUT_DMSO_R_exprs_matrix.txt', stringsAsFactors = F)
combo_dmso_mut_low$LOG2_FOLD_CHANGE = combo_dmso_mut_low$LOG2_FOLD_CHANGE * -1
### ISOLATING GENES THAT ARE UPREGULATED IN YKL VS DMSO ###
ykl_dmso_wt_upreg = ykl_dmso_wt_low[ykl_dmso_wt_low$LOG2_FOLD_CHANGE > 1,]
ykl_dmso_wt_upreg = ykl_dmso_wt_upreg[ykl_dmso_wt_upreg$P_VALUE < 0.05,]
ykl_dmso_wt_upreg_background = ykl_dmso_wt_low[!rownames(ykl_dmso_wt_low) %in% rownames(ykl_dmso_wt_upreg), ]
### GENERATING RADAR PLOTS FOR UPREGULATED GENES
ykl_dmso_wt_upreg_genes = rownames(ykl_dmso_wt_upreg)
thz531_dmso_wt_upreg = thz531_dmso_wt[rownames(thz531_dmso_wt) %in% ykl_dmso_wt_upreg_genes,]
thz1_dmso_wt_upreg = thz1_dmso_wt[rownames(thz1_dmso_wt) %in% ykl_dmso_wt_upreg_genes,]
combo_dmso_wt_upreg = combo_dmso_wt_low[rownames(combo_dmso_wt_low) %in% ykl_dmso_wt_upreg_genes,]
mean(ykl_dmso_wt_upreg$LOG2_FOLD_CHANGE) #1.54
mean(thz531_dmso_wt_upreg$LOG2_FOLD_CHANGE) #-0.02
mean(thz1_dmso_wt_upreg$LOG2_FOLD_CHANGE) #0.74
mean(combo_dmso_wt_upreg$LOG2_FOLD_CHANGE) #1.27
ykl_upreg_log2fc_values = data.frame(c(1.5, -0.02, 0.74, 1.3))
ykl_upreg_log2fc_values = t(ykl_upreg_log2fc_values)
colnames(ykl_upreg_log2fc_values) = c('YKL', 'THZ531', 'THZ1', 'COMBO')
ykl_upreg_log2fc_values = as.data.frame(ykl_upreg_log2fc_values)
rownames(ykl_upreg_log2fc_values) = 'Log2FC'
ykl_upreg_log2fc_values[2,] = -0.2
ykl_upreg_log2fc_values[3,] = 1.6
ykl_upreg_log2fc_values = ykl_upreg_log2fc_values[c(3,2,1),]
### CREATING RADAR PLOT FOR GENES UPREGULATED WITH YKL ###
par(mar=c(0.5, 0.5, 0.5, 0.5))
radarchart( ykl_upreg_log2fc_values , axistype=1 ,
#custom polygon
pcol=rgb(1, 0.2, 0.2, 0.8) , pfcol=rgb(1, 0.2, 0.2, 0.5) , plwd=4 ,
#custom the grid
cglcol="grey", cglty=1, axislabcol="grey", caxislabels=seq(0,1.6,0.4), cglwd=0.8
#custom labels
#vlcex=0.8
)
par(new = T)
ykl_dmso_mut_upreg = ykl_dmso_mut[rownames(ykl_dmso_mut) %in% ykl_dmso_wt_upreg_genes,]
thz531_dmso_mut_upreg = thz531_dmso_mut[rownames(thz531_dmso_mut) %in% ykl_dmso_wt_upreg_genes,]
thz1_dmso_mut_upreg = thz1_dmso_mut[rownames(thz1_dmso_mut) %in% ykl_dmso_wt_upreg_genes,]
combo_dmso_mut_upreg = combo_dmso_mut[rownames(combo_dmso_mut) %in% ykl_dmso_wt_upreg_genes,]
mean(ykl_dmso_mut_upreg$LOG2_FOLD_CHANGE) #0.12
mean(thz531_dmso_mut_upreg$LOG2_FOLD_CHANGE) #-0.14
mean(thz1_dmso_mut_upreg$LOG2_FOLD_CHANGE) #-0.10
mean(combo_dmso_mut_upreg$LOG2_FOLD_CHANGE) #-0.04
ykl_upreg_mut_log2fc_values = data.frame(c(0.12, -0.14, -0.10, -0.04))
ykl_upreg_mut_log2fc_values = t(ykl_upreg_mut_log2fc_values)
colnames(ykl_upreg_mut_log2fc_values) = c('YKL', 'THZ531', 'THZ1', 'COMBO')
ykl_upreg_mut_log2fc_values = as.data.frame(ykl_upreg_mut_log2fc_values)
rownames(ykl_upreg_mut_log2fc_values) = 'Log2FC'
ykl_upreg_mut_log2fc_values[2,] = -0.2
ykl_upreg_mut_log2fc_values[3,] = 1.6
ykl_upreg_mut_log2fc_values = ykl_upreg_mut_log2fc_values[c(3,2,1),]
radarchart( ykl_upreg_mut_log2fc_values , axistype=1 ,
#custom polygon
pcol=rgb(0, 0, 0, 0.3) , pfcol=rgb(0,0,0,0.5) , plwd=4 ,
#custom the grid
cglcol="grey", cglty=1, axislabcol="grey", caxislabels=seq(0,1.6,0.4), cglwd=0.8
#custom labels
#vlcex=0.8
)
legend(1.0,-0.5,
legend=c("WT","Mut"),
pch=c(15,15),
col=c("red","grey"))
|
82fba0fe96b8e93a0caf5a2a666ba1ab5ce1b8a1 | cabba0f2805396625c8a900422fbd1ada79c368b | /datascience-master/functions/generate_logit_probability.R | d5cdf99a2d075382b5652e5c6df350b81e42480d | [] | no_license | goughgorski/clusteryourself | b7184e79c1da8b905441298d34a87e8455db4e0e | ba207e05d3478ebf01dc21c258e15d49064a4061 | refs/heads/master | 2020-09-07T06:53:24.829770 | 2019-11-22T20:30:56 | 2019-11-22T20:30:56 | 220,578,163 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 263 | r | generate_logit_probability.R | generate_logit_probability <- function(cluster, coef_table){
tmp <- coef_table[, c('model_name', as.character(cluster))]
tmp$score <- 1/(1 + exp(-coef_table[, as.character(cluster)]))
tmp$cluster <- cluster
return(tmp[, c('model_name', 'cluster', 'score')])
} |
6124f2df62ea9d7f8d73a6d9f503d3ccbc0a01f4 | ef567cee0d5cdce6b6e6a0b4d517acb97c1974af | /R/list_unindexed_files.R | 6f0cb54793d46b74fecd4d2ef177a92117551ca0 | [] | no_license | znerp/labBook | 4086b5779374d2d9726466f1665e87d96f0a89e6 | 455c89d025bb51e59b5efddf222bec83ae5a5b29 | refs/heads/master | 2021-05-11T06:37:59.619526 | 2018-01-18T14:56:07 | 2018-01-18T14:56:07 | 117,993,672 | 0 | 0 | null | 2018-01-18T14:30:40 | 2018-01-18T14:30:40 | null | UTF-8 | R | false | false | 3,663 | r | list_unindexed_files.R |
labBook_listProjects <- function(index_content){
# Strip stuff before project section
index_content <- labBook_isolateProjectSection(index_content)
# Strip project names
project_names <- strsplit(x = index_content, split = '<div class="project">')[[1]][-1]
project_names <- gsub(".*<h3>(.*?)</h3>.*$", "\\1", project_names)
project_names
}
labBook_listLinkedPages <- function(index_content){
# Strip stuff before project section
index_content <- labBook_isolateProjectSection(index_content)
# Get page links
page_links <- strsplit(x = index_content, split = 'href=("|\')')[[1]][-1]
page_links <- gsub('("|\').*$', '', page_links)
page_links
}
labBook_listCodePageFiles <- function(labBook_dir){
# Hold page files
code_files <- c()
page_files <- c()
# Cycle through dirs
labBook_dirs <- list.dirs(labBook_dir, full.names = TRUE)
for(test_dir in labBook_dirs){
code_dir <- file.path(test_dir, "code")
page_dir <- file.path(test_dir, "pages")
if(file.exists(code_dir)){
code_files <- c(code_files, list.files(code_dir, full.names = TRUE))
}
if(file.exists(page_dir)){
page_files <- c(page_files, list.files(page_dir, full.names = TRUE))
}
}
# Return file list
output <- c()
output$code_files <- code_files
output$page_files <- page_files
output
}
labBook_isolateProjectSection <- function(index_content){
gsub("^.*<!-- PROJECTS \\/\\/-->", "", index_content)
}
labBook_listUnidexedFiles <- function(labBook_dir){
# Assume you are in a project folder if labBook dir not specified
if(missing(labBook_dir)){ labBook_dir <- "../" }
labBook_dir <- normalizePath(labBook_dir)
# Read the index page
index_content <- labBook_getFileContents(file.path(labBook_dir, "index.html"))
# Get the page links
page_links <- file.path(labBook_dir, labBook_listLinkedPages(index_content))
code_links <- gsub("/pages/(.*?)\\.html", "/code/\\1.R", page_links)
# Get all code and page files
existing_files <- labBook_listCodePageFiles(labBook_dir)
# Work out pages not linked
unlinked_pages <- existing_files$page_files[!existing_files$page_files %in% page_links]
unlinked_code <- existing_files$code_files[!existing_files$code_files %in% code_links]
# Work out which page links are broken
broken_page_links <- page_links[!page_links %in% existing_files$page_files]
# Return output
output <- c()
output$unlinked_pages <- unlinked_pages
output$unlinked_code <- unlinked_code
output$broken_page_links <- broken_page_links
output
}
# unindexed_files <- labBook_listUnidexedFiles()
# unlinked_pages <- unindexed_files$unlinked_pages
#
# index_page <- labBook_getFileContents("~/Desktop/LabBook/index.html")
#
# for(file_num in 16){
# subtitle <- "Visualising data"
# page_name <- gsub(".*/", "", unlinked_pages[file_num])
# page_name <- gsub("\\.html$", "", page_name)
# page_name <- gsub("_", " ", page_name)
# substr(page_name, 1, 1) <- toupper(substr(page_name, 1, 1))
# project_name <- gsub(".*/LabBook/", "", unlinked_pages[file_num])
# project_name <- gsub("/.*$", "", project_name)
# project_name <- gsub("_", " ", project_name)
#
# # Add page link to index page.
# index_page <- labBook_appendLink(index_page = index_page,
# project_name = project_name,
# page_name = page_name,
# subtitle = subtitle)
#
# # Tidy index page
# index_page <- gsub("</html>\n*","</html>",index_page)
#
# }
#
#
# # Write index page
# write(index_page, file = "~/Desktop/LabBook/index.html")
|
abebac2af778b75bf239595a66919475568958bc | e127968280caa872ea7479f88a5b9b1c29849cf1 | /R/examples/get_subsamp.R | 0141ff3150311d008acf20380d30aec5155b07ca | [] | no_license | gilesjohnr/hmob | 7a03882f8487027e5bf775aa3729f45ab0acc3b2 | a999c1f83d55b96b30cb8d48e4a90ac0036268df | refs/heads/master | 2021-07-07T23:58:00.771489 | 2020-08-06T18:49:40 | 2020-08-06T18:49:40 | 168,587,776 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 225 | r | get_subsamp.R | load('./data/duration_data_arrays_3day_full.Rdata') # y.month, y.route, y.pop
subsamp <- get.subsamp(y.route, min.locations=30, min.samp=10)
district.subset <- dimnames(subsamp)$origin
trip.map(as.numeric(district.subset)) |
024e8b1a306cdaa15cad7dea5781ed6b222e6bc8 | 59cf96a46be440960fb6eb384bdcf110630272f3 | /R/getMetaData.r | 281d558c75afa9b0e76675df357857c2ca0ba032 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] | permissive | Kevin-M-Smith/dataRetrieval | bd750ac230f1012cec75de65dbc0d41047c062fe | 7e939ef1e2bd471cb2d7578f43e3b423ce758a69 | refs/heads/master | 2021-01-24T21:18:47.464196 | 2014-11-04T00:29:09 | 2014-11-04T00:29:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,612 | r | getMetaData.r | #' Import Metadata for USGS Data
#'
#' Populates INFO data frame for EGRET study. If either station number or parameter code supplied, imports data about a particular USGS site from NWIS web service.
#' This function gets the data from here: \url{http://waterservices.usgs.gov/}
#' A list of parameter codes can be found here: \url{http://nwis.waterdata.usgs.gov/nwis/pmcodes/}
#' If either station number or parameter code is not supplied, the user will be asked to input data.
#' Additionally, the user will be asked for:
#' staAbbrev - station abbreviation, will be used in naming output files and for structuring batch jobs
#' constitAbbrev - constitute abbreviation
#'
#' @param siteNumber string USGS site number. This is usually an 8 digit number
#' @param parameterCd string USGS parameter code. This is usually an 5 digit number.
#' @param interactive logical Option for interactive mode. If true, there is user interaction for error handling and data checks.
#' @keywords data import USGS web service
#' @export
#' @return INFO dataframe with at least param.nm, param.units, parameShortName, paramNumber
#' @examples
#' # These examples require an internet connection to run
#' # Automatically gets information about site 05114000 and temperature, no interaction with user
#' INFO <- getNWISInfo('05114000','00010')
getNWISInfo <- function(siteNumber, parameterCd,interactive=TRUE){
if (nzchar(siteNumber)){
INFO <- getNWISSiteInfo(siteNumber)
} else {
INFO <- as.data.frame(matrix(ncol = 2, nrow = 1))
names(INFO) <- c('site.no', 'shortName')
}
INFO <- populateSiteINFO(INFO, siteNumber, interactive=interactive)
if (nzchar(parameterCd)){
parameterData <- getNWISPcodeInfo(parameterCd,interactive=interactive)
INFO$param.nm <- parameterData$parameter_nm
INFO$param.units <- parameterData$parameter_units
INFO$paramShortName <- parameterData$srsname
INFO$paramNumber <- parameterData$parameter_cd
}
INFO <- populateParameterINFO(parameterCd, INFO, interactive=interactive)
INFO$paStart <- 10
INFO$paLong <- 12
return(INFO)
}
#' Import Metadata for Water Quality Portal Data
#'
#' Populates INFO data frame for EGRET study. If siteNumber or parameter code (for USGS) or characteristic name
#' (for non-USGS) is provided, the function will make a call to the Water Quality Portal to get metadata information.
#' staAbbrev - station abbreviation, will be used in naming output files and for structuring batch jobs
#' constitAbbrev - constitute abbreviation
#'
#' @param siteNumber string site number.
#' @param parameterCd string USGS parameter code or characteristic name.
#' @param interactive logical Option for interactive mode. If true, there is user interaction for error handling and data checks.
#' @keywords data import USGS web service WRTDS
#' @export
#' @return INFO dataframe with agency, site, dateTime, value, and code columns
#' @examples
#' # These examples require an internet connection to run
#' # Automatically gets information about site 01594440 and temperature, no interaction with user
#' nameToUse <- 'Specific conductance'
#' pcodeToUse <- '00095'
#' \dontrun{
#' INFO <- getWQPInfo('USGS-04024315',pcodeToUse,interactive=TRUE)
#' INFO2 <- getWQPInfo('WIDNR_WQX-10032762',nameToUse)
#' # To adjust the label names:
#' INFO$shortName <- "Little"
#' INFO$paramShortName <- "SC"
#' }
getWQPInfo <- function(siteNumber, parameterCd, interactive=FALSE){
#Check for pcode:
pCodeLogic <- (all(nchar(parameterCd) == 5) & suppressWarnings(all(!is.na(as.numeric(parameterCd)))))
if (pCodeLogic){
siteInfo <- getWQPSites(siteid=siteNumber, pCode=parameterCd)
parameterData <- getNWISPcodeInfo(parameterCd = parameterCd)
siteInfo$param.nm <- parameterData$parameter_nm
siteInfo$param.units <- parameterData$parameter_units
siteInfo$paramShortName <- parameterData$srsname
siteInfo$paramNumber <- parameterData$parameter_cd
siteInfo$constitAbbrev <- parameterData$parameter_cd
} else {
siteInfo <- getWQPSites(siteid=siteNumber, characteristicName=parameterCd)
siteInfo$param.nm <- parameterCd
siteInfo$param.units <- ""
siteInfo$paramShortName <- parameterCd
siteInfo$paramNumber <- ""
siteInfo$constitAbbrev <- parameterCd
}
siteInfo$station.nm <- siteInfo$MonitoringLocationName
siteInfo$shortName <- siteInfo$station.nm
siteInfo$site.no <- siteInfo$MonitoringLocationIdentifier
if(interactive){
cat("Your site for data is", as.character(siteInfo$site.no),".\n")
if (!nzchar(siteInfo$station.nm)){
cat("No station name was listed for site: ", siteInfo$site.no, ". Please enter a station name here(no quotes): \n")
siteInfo$station.nm <- readline()
}
cat("Your site name is", siteInfo$station.nm,",")
cat("but you can modify this to a short name in a style you prefer. \nThis name will be used to label graphs and tables. \n")
cat("If you want the program to use the name given above, just do a carriage return, otherwise enter the preferred short name(no quotes):\n")
siteInfo$shortName <- readline()
if (!nzchar(siteInfo$shortName)) siteInfo$shortName <- siteInfo$station.nm
cat("Your water quality data are for parameter number", siteInfo$paramNumber, "which has the name:'", siteInfo$param.nm, "'.\n")
cat("Typically you will want a shorter name to be used in graphs and tables. The suggested short name is:'", siteInfo$paramShortName, "'.\n")
cat("If you would like to change the short name, enter it here, otherwise just hit enter (no quotes):")
shortNameTemp <- readline()
if (nchar(shortNameTemp)>0) siteInfo$paramShortName <- shortNameTemp
cat("The units for the water quality data are: ", siteInfo$param.units, ".\n")
cat("It is helpful to set up a constiuent abbreviation when doing multi-constituent studies, enter a unique id (three or four characters should work something like tn or tp or NO3).\nIt is case sensitive. Even if you don't feel you need an abbreviation you need to enter something (no quotes):\n")
siteInfo$constitAbbrev <- readline()
}
if (interactive){
cat("It is helpful to set up a station abbreviation when doing multi-site studies, enter a unique id (three or four characters should work).\nIt is case sensitive. Even if you don't feel you need an abbreviation for your site you need to enter something(no quotes):\n")
siteInfo$staAbbrev <- readline()
} else {
siteInfo$staAbbrev <- NA
}
if(siteInfo$DrainageAreaMeasure.MeasureUnitCode == "sq mi"){
siteInfo$drainSqKm <- as.numeric(siteInfo$DrainageAreaMeasure.MeasureValue) * 2.5899881
} else {
warning("Please check the units for drainage area. The value for INFO$drainSqKm needs to be in square kilometers,")
siteInfo$drainSqKm <- as.numeric(siteInfo$DrainageAreaMeasure.MeasureValue)
}
if(interactive){
if(is.na(siteInfo$drainSqKm)){
cat("No drainage area was listed in the WQP site file for this site.\n")
cat("Please enter the drainage area, you can enter it in the units of your choice.\nEnter the area, then enter drainage area code, \n1 is square miles, \n2 is square kilometers, \n3 is acres, \n4 is hectares.\n")
cat("Area(no quotes):\n")
siteInfo$drain.area.va <- readline()
siteInfo$drain.area.va <- as.numeric(siteInfo$drain.area.va)
cat("Unit Code (1-4, no quotes):")
qUnit <- readline()
qUnit <- as.numeric(qUnit)
conversionVector <- c(2.5899881, 1.0, 0.0040468564, 0.01)
siteInfo$drainSqKm <- siteInfo$drain.area.va * conversionVector[qUnit]
}
}
siteInfo$queryTime <- Sys.time()
siteInfo$paStart <- 10
siteInfo$paLong <- 12
return(siteInfo)
}
#' Import Metadata from User-Generated File
#'
#' Populates INFO data frame for EGRET study. Accepts a user generated file with any metadata that might
#' be important for the analysis.
#' Additionally, EGRET analysis requires:"drainSqKm", "staAbbrev", "constitAbbrev",
#' "param.units", "paramShortName","shortName". If interactive=TRUE, the function will ask for these
#' fields if they aren't supplied in the file.
#'
#' @param filePath string specifying the path to the file
#' @param fileName string name of file to open
#' @param hasHeader logical true if the first row of data is the column headers
#' @param separator string character that separates data cells
#' @param interactive logical Option for interactive mode. If true, there is user interaction for error handling and data checks.
#' @keywords data import USGS web service WRTDS
#' @export
#' @return INFO dataframe with agency, site, dateTime, value, and code columns
#' @examples
#' filePath <- system.file("extdata", package="dataRetrieval")
#' filePath <- paste(filePath,"/",sep="")
#' fileName <- 'infoTest.csv'
#' INFO <- getUserInfo(filePath,fileName, separator=",",interactive=FALSE)
getUserInfo <- function(filePath,fileName,hasHeader=TRUE,separator=",",interactive=FALSE){
totalPath <- paste(filePath,fileName,sep="");
siteInfo <- read.delim(
totalPath,
header = hasHeader,
sep=separator,
colClasses=c('character'),
fill = TRUE,
comment.char="#")
if(interactive){
if (!nzchar(siteInfo$station.nm)){
cat("No station name was listed. Please enter a station name here(no quotes): \n")
siteInfo$station.nm <- readline()
}
cat("Your site name is", siteInfo$station.nm,",")
cat("but you can modify this to a short name in a style you prefer. \nThis name will be used to label graphs and tables. \n")
cat("If you want the program to use the name given above, just do a carriage return, otherwise enter the preferred short name(no quotes):\n")
siteInfo$shortName <- readline()
if (!nzchar(siteInfo$shortName)) siteInfo$shortName <- siteInfo$station.nm
if (!nzchar(siteInfo$param.nm)){
cat("No water quality parameter name was listed.\nPlease enter the name here(no quotes): \n")
siteInfo$param.nm <- readline()
}
cat("Your water quality data are for '", siteInfo$param.nm, "'.\n")
cat("Typically you will want a shorter name to be used in graphs and tables. The suggested short name is:'", siteInfo$paramShortName, "'.\n")
cat("If you would like to change the short name, enter it here, otherwise just hit enter (no quotes):")
shortNameTemp <- readline()
if (nchar(shortNameTemp)>0) siteInfo$paramShortName <- shortNameTemp
if (!nzchar(siteInfo$param.units)){
cat("No water quality parameter unit was listed.\nPlease enter the units here(no quotes): \n")
siteInfo$param.nm <- readline()
}
cat("The units for the water quality data are: ", siteInfo$param.units, ".\n")
cat("It is helpful to set up a constiuent abbreviation when doing multi-constituent studies, enter a unique id (three or four characters should work something like tn or tp or NO3).\nIt is case sensitive. Even if you don't feel you need an abbreviation you need to enter something (no quotes):\n")
siteInfo$constitAbbrev <- readline()
cat("It is helpful to set up a station abbreviation when doing multi-site studies, enter a unique id (three or four characters should work).\nIt is case sensitive. Even if you don't feel you need an abbreviation for your site you need to enter something(no quotes):\n")
siteInfo$staAbbrev <- readline()
if(is.na(siteInfo$drainSqKm)){
cat("No drainage area was listed as a column named 'drainSqKm'.\n")
cat("Please enter the drainage area, you can enter it in the units of your choice.\nEnter the area, then enter drainage area code, \n1 is square miles, \n2 is square kilometers, \n3 is acres, \n4 is hectares.\n")
cat("Area(no quotes):\n")
siteInfo$drain.area.va <- readline()
siteInfo$drain.area.va <- as.numeric(siteInfo$drain.area.va)
cat("Unit Code (1-4, no quotes):")
qUnit <- readline()
qUnit <- as.numeric(qUnit)
conversionVector <- c(2.5899881, 1.0, 0.0040468564, 0.01)
siteInfo$drainSqKm <- siteInfo$drain.area.va * conversionVector[qUnit]
}
} else {
requiredColumns <- c("drainSqKm", "staAbbrev", "constitAbbrev",
"param.units", "paramShortName","shortName")
if(!all(requiredColumns %in% names(siteInfo))){
message("The following columns are expected in the EGRET package:\n")
message(requiredColumns[!(requiredColumns %in% names(siteInfo))])
}
}
siteInfo$queryTime <- Sys.time()
siteInfo$paStart <- 10
siteInfo$paLong <- 12
return(siteInfo)
}
|
115e7889f87f3c0956b9447d78b66159e2879c78 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/staRdom/examples/eem_read_csv.Rd.R | 6cdc4cbfdeb35d643eb347148e76083d47dd6fd9 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 222 | r | eem_read_csv.Rd.R | library(staRdom)
### Name: eem_read_csv
### Title: Import EEMs from generic csv tables
### Aliases: eem_read_csv
### ** Examples
eems <- system.file("extdata/EEMs",package="staRdom")
eem_list <- eem_read_csv(eems)
|
f4a0b90e8fbfbc683b2210e4a5adcf1292d1a2ad | 0bfc2cd362f202b5afa608592e4731787ff54a0a | /Iso_DE_Analysis.R | e65de323c29c2637c1b158a993271540137e23ad | [] | no_license | atrassierra/DE-Analysis | 1b2f2726bad9eecbe2a5553ea4e583a518b9dc62 | c44236a255e3807cadfabb4a393d5799b02a741b | refs/heads/master | 2020-07-28T02:34:37.828263 | 2019-09-18T11:34:16 | 2019-09-18T11:34:16 | 209,281,551 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,023 | r | Iso_DE_Analysis.R | rm(list = ls())
library(EBSeq)
setwd("/home/antonio/Escritorio/isoformas/")
# Leemos los datos
data <- read.csv("brca_iso_read_paired.txt", sep = "\t")
# Asignamos los grupos experimentales, en este caso Normales vs Tumorales
a <- colnames(data)
condicion <- as.factor(substr(a, nchar(a), nchar(a)))
# EBSeq necesita por un lado de la matriz de conteos, que puede estar en formato RSEM,
# además de dos vectores con los nombres de las isoformas y los nombres de los genes a los que pertenecen las isoformas.
# Hay que tener en cuenta que en el vector de nombres de genes, estos genes se van a repetir si tienen más de una isoforma.
# Es decir, gen 1, gen 2, gen 2, gen 3 se corresponde con iso 1-1, iso 2-1, iso 2-2, iso 3-1, etc. Es importante que ocupen
# las posiciones que corresponden en el vector.
# La matriz de conteos la tenemos directamente en data
# Para el nombre de genes y de isoformas:
genesiso <- rownames(data)
# quitar <- grep("\\?", genesiso) # Quitar genes hipotéticos
genes <- unlist(sapply(strsplit(genesiso, ","), "[", 1))
isoformas <- unlist(sapply(strsplit(genesiso, ","), "[", 2))
data.matriz <- data.matrix(data)
rownames(data.matriz) <- isoformas
data.size <- MedianNorm(data.matriz) # Isoform-level library size factors
# En el siguiente paso tenemos que elegir en cuantos grupos queremos que nos separe los genes dependiendo del número
# de isoformas que estos tengan. Por ejemplo, si nuestros genes tienen entre 1 y 3 isoformas pondremos 3 grupos.
# Para saber cuántos tenemos, podemos ver cuantos genes tenemos duplicados n veces. O el gen con más duplicados.
duplicados <- table(genes)
NgList <- GetNg(isoformas, genes, TrunThre = 3)
IsoNgTrun <- NgList$IsoformNgTrun
# Análisis de expresión diferencial
IsoEBOut <- EBTest(Data = data.matriz, NgVector = IsoNgTrun, Conditions = condicion, sizeFactors = data.size, maxround = 5)
# Isoformas diferencialmente expresadas
deiso <- GetDEResults(IsoEBOut, FDR = .05)
deiso
IsoFC <- PostFC(IsoEBOut)
IsoFC$PostFC |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.