blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ea61855892927b1e3ea4a1c72ad8422c82f9d056
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/GGEBiplots/examples/DiscRep.Rd.R
|
55f38f414de554ee8f8d17bbc7b9e8369bb49fc9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 230
|
r
|
DiscRep.Rd.R
|
library(GGEBiplots)
### Name: DiscRep
### Title: Discrimination vs. representativeness biplot
### Aliases: DiscRep
### Keywords: GGE
### ** Examples
library(GGEBiplotGUI)
data(Ontario)
GGE1<-GGEModel(Ontario)
DiscRep(GGE1)
|
ef40d4a06e6deb4c684d51fc81016fba5170f891
|
66d54063f1b6c995fbb2510a54b934d3fab73c21
|
/Jaime_shiny_examples - copia/Usando_drive/Google_sheets/survey_example/survey_v2/app.R
|
176d596fa26daea09a2e2c4b8bfd3b66c6f5b77a
|
[] |
no_license
|
Jsvelezm/Shiny_usefull_examples
|
d6ae95711285f36e77ff33c105e45da91b15e453
|
897a5ba5416d675b9f1417b223c51ca7dcb871cd
|
refs/heads/master
| 2022-10-13T11:13:20.915362
| 2020-06-10T18:33:32
| 2020-06-10T18:33:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,862
|
r
|
app.R
|
## app.R ##
###### Lectura de bases ######
##### librerias ####
library(shiny)
library(googlesheets)
library(shiny)
# init some useful variables
Logged = FALSE
start = TRUE
data_id = 1
# seteando la clave de la aplicación
options(googleAuthR.client_id = "807629257711-q5d10ng1egi2qfru5drik4gj9ieknu2s.apps.googleusercontent.com",
googleAuthR.client_secret = "OCb3yBlkDVRMtSG0tTOWmPcY")
gs_auth(token = "sheet_token.rds")
key_id <- "1w2EIP70p_TMPD1UEBtAyP7ZNOPPb6WUyqMiZZrWo9Nw"
# load for firstime the data
sheet <- gs_key(key_id)
base <- gs_read_csv(sheet)
# save and read google sheets
saveData <- function(data) {
# Grab the Google Sheet
sheet <- gs_key(key_id)
# Add the data as a new row
gs_add_row(sheet, input = data)
}
loadData <- function(key_id,hoja= 1) {
# Grab the Google Sheet
sheet <- gs_key(key_id)
# Read the data
datos = gs_read_csv(sheet,ws = hoja)
return(datos)
}
ui1 <- function(){
tagList(
div(id = "survey",
wellPanel(# Input: Nombres ----
textInput("nombres", "Nombre de quien responde"),
# Input: apellidos ----
textInput("apellidos", "Apellidos de quien responde"),
# Input: c_nacimiento ----
selectInput("c_nacimiento", "Ciudad de nacimiento",
choices = c("Bogotá","Cali","Medellín","Jaimitolandia","deux_machine")),
# Input: c_actual ----
selectInput("c_actual", "Ciudad ",
choices = c("Bogotá","Cali","Medellín","Jaimitolandia","deux_machine")),
# Input: sexo ----
selectInput("sexo", "Sexo biológico",
choices = c("Hombre","Mujer")),
# Input: edad ----
selectInput("edad", "edad",
choices = c(1:100)),
br(),actionButton("send", "Enviar respuesta"))),
tags$style(type="text/css", "#survey {font-size:10px; text-align: left;position:absolute;top: 40%;left: 50%;margin-top: -100px;margin-left: -150px;}")
)}
ui2 <- function(){
fluidPage(
sidebarLayout(
sidebarPanel("gracias por responder",
br(),actionButton("back", "Enviar otra respuesta")),
mainPanel(DT::dataTableOutput("table")
)
)
)}
ui = function(){
htmlOutput("page")
}
server = (function(input, output,session) {
USER <- reactiveValues(Logged = Logged)
ids <- reactiveValues(data_id = data_id)
if(start == TRUE){
output$page <- renderUI({
div(class="outer",do.call(bootstrapPage,c("",ui1())))
})
}
observeEvent(input$send,{
if (USER$Logged == FALSE) {
USER$Logged <- TRUE
output$page <- renderUI({
div(class="outer",do.call(bootstrapPage,c("",ui2())))
})
saveData(Results())
base_interna$base_int = loadData(key_id)
start = FALSE
ids$data_id = ids$data_id + 1
}})
observeEvent(input$back,{
if (USER$Logged == TRUE) {
output$page <-renderUI({
div(class="outer",do.call(bootstrapPage,c("",ui1())))
})
USER$Logged <- FALSE
}})
# cargar la info a la base de datos
Results <- reactive(
c(nrow(base) + ids$data_id , input$nombres, input$apellidos, input$c_nacimiento, input$c_actual, input$sexo, input$edad, Sys.time())
)
base_interna <- reactiveValues(base_int = base)
output$table <- DT::renderDataTable({base_interna$base_int})
})
#deploy the app
shinyApp(ui, server)
|
d95736f8c2182aacf4d3d5c75f51fca2357b5346
|
e037c771ce9ad1f9d3583597937a43d29000fd3b
|
/bussiness_prac/비즈니스활용사례R05.R
|
d26b4371da7cf9cc4f9cad9cd5fe1722c717f161
|
[] |
no_license
|
Sup90/R-
|
ff2920046d9033ee995331dd5cb6b7196f8e539b
|
7eb6e1c35615a2aed1a5374c5429d4e4d946a876
|
refs/heads/master
| 2021-01-01T16:41:49.953354
| 2018-11-20T12:42:22
| 2018-11-20T12:42:22
| 97,892,427
| 2
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 2,600
|
r
|
비즈니스활용사례R05.R
|
#5장 A/B테스트
getwd()
setwd("c:/Users/rhkdt/Desktop/R-/bussiness_prac/R")
ab_imp<-read.csv("section5-ab_test_imp.csv",header = T,sep = ",",stringsAsFactors = F)
head(ab_imp)
ab_goal<-read.csv("section5-ab_test_goal.csv",header = T,stringsAsFactors = F)
head(ab_goal)
ab_imp_goal<-merge(x = ab_imp,y = ab_goal,by = "transaction_id",suffixes = c("",".g"),all.x = T)
#imp가 전체 건수 goal이 성공 케이스, suffix는 접미어로 두개의 값을 넣으면 앞에는 x의 컬럼 뒤는 y의 컬럼
head(ab_imp_goal)
ab_imp_goal$flag<-ifelse(is.na(ab_imp_goal$user_id.g),0,1)
#user_id.g가 na이면 0, 아니면 1
head(ab_imp_goal)
install.packages("plyr")
library(plyr)
head(ab_imp_goal[is.na(ab_imp_goal$user_id.g)==F,])
ddply(ab_imp_goal,.variables = .(test_case),summarize,cvr=sum(flag)/length(user_id))
#ddply로 데이터 계산 테이블만들기
#변수는 테스트 케이스
#계산 방법은 서머리
#계산 값은 테스트 케이스에 따른 유저 전체
chisq.test(ab_imp_goal$test_case,ab_imp_goal$flag)
#카이검정 실행
#p-value < 2.2e-16
#p-value가 0.05보다 작으면 일반적으로 통계적 유의미한 차이가 있다고 간주
#결과적으로 a,b를 나눈 테스트의 의미가 있었다.
ab_imp_goal_summary<-
ddply(ab_imp_goal,.(log_date,test_case),summarize,
imp=length(user_id),
cv=sum(flag),
cvr=sum(flag)/length(user_id),
cvr.avg=sum(cv)/sum(imp))
#ddply를 통해 데이터셋 새로 조정
#log_date와 test_case를 변수로 사용하여
#imp,cv,crv를 새로 만들음
#여기는 행별로 새로 만들음
head(ab_imp_goal_summary)
ab_imp_goal_summary<-ddply(ab_imp_goal_summary,.(test_case),transform,cvr.avg=sum(cv)/sum(imp))
#transform을 통해 원래 데이터에 새로운 집계결과 추가 가능
#여기는 전체 데이터 를 통해 새로 만들음
install.packages("ggplot2")
library(ggplot2)
library(scales)
ab_imp_goal_summary$log_date<-as.Date(ab_imp_goal_summary$log_date)
#날짜 데이터로 변환
limits<-c(0,max(ab_imp_goal_summary$cvr))
#축을 위해 한계선 설정
ggplot(ab_imp_goal_summary,aes(log_date,cvr,col=test_case,lty=test_case,shape=test_case))+geom_line(lwd=1)+
geom_line(aes(y=cvr.avg,col=test_case))+
geom_point(size=4)+
scale_y_continuous(label=percent,limits = limits)
#lty=test_case를 통해 점선화
#shape를 통해 포인트 모양 다르게
#geom_line(lwd=2)굵기 조절
#geom_point(size=10)포인트 사이즈 조절
#geom_line(aes(y=cvr.avg,col=test_case)) 가로축 선 삽입
#scale_y_continuous(label=percent,limits = limits) y축 정보 삽ㅇ
|
aff414f49fc393c4a973987384b3c862b604de67
|
7d8c2e74c2e395a9c6cf427e77b0fb5e96584012
|
/Week3/Code/boilerplate.R
|
21af261572e8503751fec6919aab7943e7b08621
|
[] |
no_license
|
hjosullivan/CMEECourseWork
|
52ef24377923b496cd88fb8295c4d0c77e91b7dc
|
b0e68602d8cab0e37ebdc0f22d8a9c673a978cc6
|
refs/heads/master
| 2021-07-12T19:00:17.011985
| 2019-03-14T07:54:00
| 2019-03-14T07:54:00
| 151,391,285
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 640
|
r
|
boilerplate.R
|
##########################
## A boilerplate script ##
##########################
## Author: Hannah O'Sullivan h.osullivan18@imperial.ac.uk
## Script: boilerplate.R
## Desc: Introduction to writing R functions
## Date: October 2018
#clean environment
rm(list = ls())
MyFunction <- function(Arg1, Arg2){
#statements involving Arg1, Arg2:
print(paste("Argument", as.character(
Arg1), "is a", class(Arg1)))
#print Arg1 type
print(paste("Argument", as.character(
Arg2), "is a", class(Arg2)))
#print Arg2 type
return(c(Arg1, Arg2))
}
#Test the function
MyFunction(1, 2)
# numeric
MyFunction("Riki", "Tiki")
#character
|
f821944cd5287a905d1695cd6e105ad75632459d
|
3e832b24c9967221ee76aabbb0f64fce9506ac2a
|
/signal analysis.R
|
12b536d3d22172c506fbb25b75098e6c1942e54e
|
[] |
no_license
|
misophist/microstructure-analysis
|
205f2938c23c93fb29d55799d5e81898a12a8760
|
ed5b9dc94917100d3eb990fa97ecf58a1f14351b
|
refs/heads/master
| 2021-01-19T19:37:42.810067
| 2014-07-01T07:44:55
| 2014-07-01T07:44:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,979
|
r
|
signal analysis.R
|
dtt <- data.frame(
trl.vwap.1 = runif(100),
trl.vwap.2 = runif(100),
cog.1 = runif(100),
cog.2 = runif(100),
fwd.vwap.1 = runif(100),
fwd.vwap.2 = runif(100)
)
# for each RIC
# break dataset into 60-20-20 training-validation-test sets
# with the training set:
### run regression for each fwd.adv with all the cog and trailing vwap factors (except normalizer)
### run regression for each fwd.adv with only trailing vwap factors (except normalizer)
# with each validation set:
### predict with each model and measure the rms error
# table results of RMS eror and winner for each #adv
# with the test set and the winner of the validation set
### predict with each model (trailing and full winner) and save the RMS errors
n <- round( nrow(dtt) * c( 0.6, 0.8, 1 ))
idx <- sample.int(n[3])
ds <- list(
train=dtt[ idx[ 1:n[1] ], ],
valid=dtt[ idx[ n[1]:n[2] ], ],
test =dtt[ idx[ n[2]:n[3] ], ]
)
fwd.labels <- names(dtt)[ grep( "fwd.*", names(dtt) ) ]
trl.labels <- names(dtt)[ grep( "trl.*", names(dtt) ) ]
hist.label <- "trl.vwap.1" # closest correspondance to what we have now in prod
full.labels <- names(dtt)[ grep( "(trl|cog).*", names(dtt) ) ]
rms.fn <- function(x,y) sqrt(sum((x-y)*(x-y)))
m <- list()
rms <- data.frame()
for( fwd.label in fwd.labels )
{
fm <- eval(parse(text=paste( fwd.label, "~ .", collapse="" )))
m$full.lm <- lm( fm, data=ds$train[, c(fwd.label, full.labels)] )
m$trl.lm <- lm( fm, data=ds$train[, c(fwd.label, trl.labels )] )
prd.train.full.lm <- predict( m$full.lm )
prd.valid.full.lm <- predict( m$full.lm, newdata=ds$valid[, c(fwd.label, full.labels)] )
prd.test.full.lm <- predict( m$full.lm, newdata=ds$test[ , c(fwd.label, full.labels)] )
rms.train.full <- rms.fn(prd.train.full.lm, ds$train[,fwd.label])
rms.valid.full <- rms.fn(prd.valid.full.lm, ds$valid[,fwd.label])
rms.test.full <- rms.fn(prd.test.full.lm, ds$test[ ,fwd.label])
prd.train.trl.lm <- predict( m$trl.lm )
prd.valid.trl.lm <- predict( m$trl.lm, newdata=ds$valid[, c(fwd.label, trl.labels)] )
prd.test.trl.lm <- predict( m$trl.lm, newdata=ds$test[ , c(fwd.label, trl.labels)] )
rms.train.trl <- rms.fn(prd.train.trl.lm, ds$train[,fwd.label])
rms.valid.trl <- rms.fn(prd.valid.trl.lm, ds$valid[,fwd.label])
rms.test.trl <- rms.fn(prd.test.trl.lm, ds$test[ ,fwd.label])
rms.train.hist <- rms.fn(ds$train[,hist.label], ds$train[,fwd.label])
rms.valid.hist <- rms.fn(ds$valid[,hist.label], ds$valid[,fwd.label])
rms.test.hist <- rms.fn(ds$test[,hist.label], ds$test[ ,fwd.label])
rms <- rbind(rms,
data.frame(
output=fwd.label,
model=c("full", "trl", "hist"),
train.rms=c( rms.train.full, rms.train.trl, rms.train.hist ),
valid.rms=c( rms.valid.full, rms.valid.trl, rms.valid.hist ),
test.rms =c( rms.test.full, rms.test.trl, rms.test.hist )
))
}
best.rms <- do.call("rbind", by(rms, rms$model, function(x) x[which(x$valid.rms == min(x$valid.rms)),] ))
|
e44bccca3fed2117724aa6455a5e0e4b9ace8499
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/aspace/examples/as_radians.Rd.R
|
66e6e7c7d36827c392cf2f78744ef1c6a26722ff
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 164
|
r
|
as_radians.Rd.R
|
library(aspace)
### Name: as_radians
### Title: Converts degrees to radians
### Aliases: as_radians
### Keywords: array
### ** Examples
as_radians(theta = 90)
|
eff195558859690cebfe9edc1759da0374343303
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googleruntimeconfigv1beta1.auto/man/Waiter.Rd
|
cac3c215d205c30290ee3ed47c99404c7b671df1
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,649
|
rd
|
Waiter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runtimeconfig_objects.R
\name{Waiter}
\alias{Waiter}
\title{Waiter Object}
\usage{
Waiter(error = NULL, failure = NULL, success = NULL, done = NULL,
createTime = NULL, timeout = NULL, name = NULL)
}
\arguments{
\item{error}{[Output Only] If the waiter ended due to a failure or timeout, this value}
\item{failure}{[Optional] The failure condition of this waiter}
\item{success}{[Required] The success condition}
\item{done}{[Output Only] If the value is `false`, it means the waiter is still waiting}
\item{createTime}{[Output Only] The instant at which this Waiter resource was created}
\item{timeout}{[Required] Specifies the timeout of the waiter in seconds, beginning from}
\item{name}{The name of the Waiter resource, in the format:}
}
\value{
Waiter object
}
\description{
Waiter Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
A Waiter resource waits for some end condition within a RuntimeConfig resourceto be met before it returns. For example, assume you have a distributedsystem where each node writes to a Variable resource indidicating the node'sreadiness as part of the startup process.You then configure a Waiter resource with the success condition set to waituntil some number of nodes have checked in. Afterwards, your applicationruns some arbitrary code after the condition has been met and the waiterreturns successfully.Once created, a Waiter resource is immutable.To learn more about using waiters, read the[Creating a Waiter](/deployment-manager/runtime-configurator/creating-a-waiter)documentation.
}
|
337504139025b8c210e8b7ca479b3ff39d56c44f
|
cdc0504ea03ec5c439006f1e47bbc618fb983ba0
|
/man/corona_lockdown.Rd
|
be1f0d7c557580480f0421496af3c6dcfdabebad
|
[] |
no_license
|
jvanschalkwyk/corona
|
a0ae3df8ff81199b848747f2133d685c40b5f1d4
|
5d4621092cc8bb3772595ea5b50390cfcd564098
|
refs/heads/master
| 2022-12-24T20:43:37.738883
| 2020-10-01T01:34:55
| 2020-10-01T01:34:55
| 270,596,873
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,018
|
rd
|
corona_lockdown.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lockdown.R
\name{corona_lockdown}
\alias{corona_lockdown}
\title{Draw multiple smoothed graphs of new daily cases, with lockdown date, if present}
\usage{
corona_lockdown(
pdf = FALSE,
minpeople = 4e+06,
mincases = 200,
cols = 7,
striptextsize = 10,
textsize = 10,
legendx = 0.94,
legendy = 0.02
)
}
\arguments{
\item{pdf}{print to PDF}
\item{minpeople}{Minimum population for the country}
\item{mincases}{Minimum number of COVID-19 cases}
\item{cols}{Number of columns to display, default = 7}
\item{striptextsize}{size of text in country names}
\item{textsize}{Size of text header}
\item{legendx}{X position of legend}
\item{legendy}{Y position of legend}
}
\description{
By default limited to countries with population > 4M, and over 200 cases.
This may take over 5s to run, depending on your hardware.
}
\examples{
\dontrun{
corona_lockdown( cols=14 )
}
}
\keyword{corona}
\keyword{lockdown}
\keyword{smoothed}
|
32a64ee9d8beee407e389429dced0a65ced9da7a
|
a82ebc7c1dcc3eb671542f10645ab3d457853565
|
/r_modular/classifier_mpdsvm_modular.R
|
76424bef76d2ee73e8364b0a02289cfc402b74e2
|
[] |
no_license
|
joobog/shogun-eval
|
dac24f629744521760061c7979aa579129daa666
|
12b1ba2a67d5c661c6a11580634fb1a036e61af2
|
refs/heads/master
| 2021-03-12T23:24:41.686252
| 2016-11-23T10:04:04
| 2016-11-23T10:04:04
| 31,391,835
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,494
|
r
|
classifier_mpdsvm_modular.R
|
# In this example a two-class support vector machine classifier is trained on a
# toy data set and the trained classifier is used to predict labels of test
# examples. As training algorithm the Minimal Primal Dual SVM is used with SVM
# regularization parameter C=1 and a Gaussian kernel of width 1.2 and the
# precision parameter 1e-5.
#
# For more details on the MPD solver see
# Kienzle, W. and B. Schölkopf: Training Support Vector Machines with Multiple
# Equality Constraints. Machine Learning: ECML 2005, 182-193. (Eds.) Carbonell,
# J. G., J. Siekmann, Springer, Berlin, Germany (11 2005)
library(shogun)
fm_train_real <- t(as.matrix(read.table('../data/fm_train_real.dat')))
fm_test_real <- t(as.matrix(read.table('../data/fm_test_real.dat')))
label_train_multiclass <- as.double(read.table('../data/label_train_multiclass.dat')$V1)
# libsvmmulticlass
print('LibSVMMulticlass')
feats_train <- RealFeatures()
dump <- feats_train$set_feature_matrix(fm_train_real)
feats_test <- RealFeatures()
dump <- feats_test$set_feature_matrix(fm_test_real)
width <- 2.1
kernel <- GaussianKernel(feats_train, feats_train, width)
C <- 1.2
epsilon <- 1e-5
num_threads <- as.integer(8)
labels <- MulticlassLabels()
labels$set_labels(label_train_multiclass)
svm <- MulticlassLibSVM(C, kernel, labels)
dump <- svm$set_epsilon(epsilon)
dump <- svm$parallel$set_num_threads(num_threads)
dump <- svm$train()
dump <- kernel$init(feats_train, feats_test)
lab <- svm$apply()
out <- lab$get_labels()
|
faf65c57baeeedd41782cf74a973c4a4a53db13b
|
104e7052ad28ab830b441968543f07d36938b45a
|
/try_test_3.R
|
5d033b2cc789a4ae910b4829d9f547c1bc4b48f2
|
[
"MIT"
] |
permissive
|
talkdatatome/kaggle
|
bfafd58abdd468417d6747c2b2e231c1d473ad41
|
c666b19a115935ff565e4288532955ed47e07662
|
refs/heads/master
| 2021-01-21T13:14:14.206603
| 2016-04-30T17:12:55
| 2016-04-30T17:12:55
| 53,010,547
| 0
| 1
| null | 2016-04-06T02:16:14
| 2016-03-03T01:35:56
|
R
|
UTF-8
|
R
| false
| false
| 532
|
r
|
try_test_3.R
|
load("test_of_test.RData")
library(tm)
library(gamlr)
library(SnowballC)
dtm_STT$dimnames[[2]] <- paste("ST", dtm_STT$dimnames[[2]], sep="_")
dtmPD$dimnames[[2]] <- paste("DS", dtmPD$dimnames[[2]], sep="_")
testX <- cbind(dtm_STT, dtmPD)
### TRY - I'm still missing terms
# try adding ncol and dimnames for empty names
save.image("test_of_test.RData")
#library(tm)
#library(SnowballC)
#library(gamlr)
# this fails because we didn't add back in terms that were in the train dtm_ST but not in dtm_STT
#pred <- predict(m1, testX)
|
ee5aabf6b70c6fbe6ae26300b898e5aa6bec8b73
|
34445bf76bb3ec1d0e75c063c16f842a1afe5e97
|
/R/myncurve.R
|
8c44b26a4f73d17e9c6302f4b626f1ad1822d8f7
|
[] |
no_license
|
medgar591/MATH4753EDGAR
|
4b96e7167d7bc7695f1c2991b9dae16ceeffb8f4
|
a6fefb7dfce41c18de4d842e38d8d15982101a30
|
refs/heads/master
| 2023-04-04T20:39:53.901521
| 2021-04-20T00:22:36
| 2021-04-20T00:22:36
| 334,214,148
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 667
|
r
|
myncurve.R
|
#' @title myncurve
#'
#' @param mu Mean value for a normal distribution
#' @param sigma Standard deviation for a normal distribution
#' @param a Value to calculate the probability of, P(Y>=a)
#'
#' @return Graph of the curve with shading based on a, as well as a calculation of P(Y>=a)
#' @export
#'
#' @examples
myncurve <- function(mu, sigma, a){
lbound = mu - 3*sigma
rbound = mu + 3*sigma
curve(dnorm(x, mean = mu, sd = sigma), xlim = c(lbound, rbound))
xcurve = seq(lbound - 2, a, length = 1000)
ycurve = dnorm(xcurve, mean = mu, sd = sigma)
polygon(c(lbound - 2, xcurve, a), c(0, ycurve, 0), col = "Light Blue")
pnorm(a, mean = mu, sd = sigma)
}
|
1fa545e066b9e0980d508ecf21d927874dc86872
|
c5f4ffb6e2525c91657a3721c29ec95e4549ec2e
|
/apps/prep.R
|
54d6dc7ec11c94f7e0a0526898122fff584934cf
|
[] |
no_license
|
g64164017/yt-subtitle-search
|
52378ade2c484f4cb6b8d84fed2a8c5625f03589
|
a531cfa0cf20b1eadf23f7e10596a0ec1fbd28d9
|
refs/heads/master
| 2021-05-05T23:00:20.761405
| 2018-03-09T23:44:51
| 2018-03-09T23:44:51
| 116,452,603
| 0
| 2
| null | 2018-01-19T17:13:57
| 2018-01-06T04:17:12
|
CSS
|
UTF-8
|
R
| false
| false
| 1,052
|
r
|
prep.R
|
## SET WORKING DIRECTORY
# working dir = current file path
setwd(normalizePath("."))
library(tuber)
## AUTHENTICATION
## Manage API on https://console.developers.google.com/apis/credentials
app_id="find your own"
app_secret="find you own"
yt_oauth(app_id, app_secret, token="")
## COLECTING DOCS
channel_id = "UC4a-Gbdw7vOaccHmFo40b9g" # Khan
tgl = "2017-01-01"
videos = yt_search("computer+science"
, channel_id=channel_id
, video_caption="closedCaption"
)
length(videos$video_id)
## CAPTIONING
coll = c()
for(vid in videos$video_id){
# vid = as.character(res$video_id[i])
cap_tracks = list_caption_tracks(video_id=vid,lang = "en")
cap_id = as.character(cap_tracks$id[cap_tracks$language=="en"])[1] # lang = "en" only
caps = get_captions(id=cap_id)
caps = as.character(caps)
caps = paste(caps, sep="", collapse="")
caps = sapply(seq(1, nchar(caps), by=2), function(x) substr(caps, x, x+1))
caps = rawToChar(as.raw(strtoi(caps, 16L)))
coll = c(coll,caps)
}
df = data.frame(videos$video_id,coll)
write.csv("data.csv", x=df)
|
37f0dbe4424aa27d4a8220870171825a07a9a8ac
|
d2ee3f02b09c20a6c35bba1e11e7c78865569417
|
/scripts/plot_umap_sf.R
|
8d7f0fdc5ebca91850936fbea77db67f73c52e64
|
[] |
no_license
|
marcalva/DIEM2019
|
7bcfa2efcc4a87bf1e38ae941cee40137ca524ba
|
87ca5081e095b6eac6560462b3375c65f373e7bb
|
refs/heads/master
| 2020-08-05T17:52:32.161696
| 2020-04-22T07:38:26
| 2020-04-22T07:38:26
| 212,642,580
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,559
|
r
|
plot_umap_sf.R
|
# Splice fractions
setwd("../")
library(diem)
suppressMessages(library(DropletUtils))
library(ggplot2)
library(gplots)
library(ggpubr)
library(gridExtra)
library(ggExtra)
library(RColorBrewer)
source("scripts/common/plotting.R")
#=========================================
# Functions
#=========================================
ct <- theme(text=element_text(size=16),
plot.title=element_text(size=18, hjust=0.5, face="bold")
)
yexp <- 1.1
plot_umap_fct <- function(x, names, colname="SpliceFrctn", legend_name="Fraction Spliced",
color_limits=NULL, color_breaks=waiver(),
size=1, alpha=alpha, order_col = TRUE){
dfl <- lapply(1:length(x), function(i) {
data.frame(Mito=x[[i]]@meta.data[,colname],
UMAP1=x[[i]]@reductions$umap@cell.embeddings[,"UMAP_1"],
UMAP2=x[[i]]@reductions$umap@cell.embeddings[,"UMAP_2"],
Method=names[[i]])
})
df <- do.call(rbind, dfl)
if (order_col) df <- df[order(df$Mito, decreasing=FALSE),,drop=FALSE]
p <- ggplot(df, aes(x=UMAP1, y=UMAP2, color=Mito)) +
geom_point(size=size, alpha=0.8) + theme_bw() +
facet_wrap(~Method, ncol=3, scale="free") +
theme(text=element_text(size=16),
axis.text=element_blank(),
axis.ticks=element_blank(),
plot.title=element_text(hjust=0.5),
panel.grid=element_blank()) +
scale_colour_gradient(low="gray90", high="red3",
name=legend_name,
limits=color_limits,
breaks=color_breaks)
#scale_color_distiller(palette = "Spectral", name=legend_name,
# limits=color_limits, breaks=color_breaks)
return(p)
}
#=========================================
#=========================================
methd_names <- c("Quantile", "EmptyDrops", "DIEM")
#=========================================
# Read in data
#=========================================
# Adipocyte
seur_diem_ad <- readRDS("data/processed/adpcyte/diem/adpcyte.seur_obj.rds")
seur_quant_ad <- readRDS("data/processed/adpcyte/quantile/adpcyte.seur_obj.rds")
seur_ED_ad <- readRDS("data/processed/adpcyte/emptydrops/adpcyte.seur_obj.rds")
# Mouse brain
seur_diem_mb <- readRDS("data/processed/mouse_nuclei_2k/diem/mouse_nuclei_2k.seur_obj.rds")
seur_quant_mb <- readRDS("data/processed/mouse_nuclei_2k/quantile/mouse_nuclei_2k.seur_obj.rds")
seur_ED_mb <- readRDS("data/processed/mouse_nuclei_2k/emptydrops/mouse_nuclei_2k.seur_obj.rds")
# Adipose tissue
seur_diem_at <- readRDS("data/processed/atsn/diem/atsn.seur_obj.rds")
seur_quant_at <- readRDS("data/processed/atsn/quantile/atsn.seur_obj.rds")
seur_ED_at <- readRDS("data/processed/atsn/emptydrops/atsn.seur_obj.rds")
seur_ad = list("DIEM" = seur_diem_ad, "EmptyDrops" = seur_ED_ad, "Quantile" = seur_quant_ad)
seur_mb = list("DIEM" = seur_diem_mb, "EmptyDrops" = seur_ED_mb, "Quantile" = seur_quant_mb)
seur_at = list("DIEM" = seur_diem_at, "EmptyDrops" = seur_ED_at, "Quantile" = seur_quant_at)
#=========================================
# Main Figure
#=========================================
pu1 <- plot_umap_fct(list(seur_quant_ad, seur_ED_ad, seur_diem_ad), methd_names,
colname="SpliceFrctn", legend_name="Fraction Spliced", size=0.5) + ggtitle("DiffPA")
pu2 <- plot_umap_fct(list(seur_quant_mb, seur_ED_mb, seur_diem_mb), methd_names,
colname="SpliceFrctn", legend_name="Fraction Spliced", size=0.5) + ggtitle("Mouse Brain")
pu3 <- plot_umap_fct(list(seur_quant_at, seur_ED_at, seur_diem_at), methd_names,
colname="SpliceFrctn", legend_name="Fraction Spliced", size=0.5) + ggtitle("Adipose Tissue")
dir_plot <- paste0("results/plots/")
pdfname <- paste0(dir_plot, "SpliceFrctn.umap.pdf")
jpgname <- paste0(dir_plot, "SpliceFrctn.umap.jpeg")
pdf(pdfname, width=10, height=12)
ggarrange(pu1, pu2, pu3, nrow=3)
dev.off()
system(paste("convert", "-density", "200", pdfname, jpgname))
#=========================================
# Proportion of droplets with high spliced reads
#=========================================
above_sd <- function(seur, trait = "SpliceFrctn", thresh = NULL){
ta <- seur@meta.data[,trait]
if (is.null(thresh)){
thresh <- 2 * sd(ta, na.rm = T) + mean(ta, na.rm = T)
}
ab <- table(ta > thresh)
return(ab[["TRUE"]] / length(ta))
}
a_ad <- sapply(seur_ad, above_sd, thresh = 50)
a_mb <- sapply(seur_mb, above_sd, thresh = 50)
a_at <- sapply(seur_at, above_sd, thresh = 50)
datf <- data.frame("DiffPA" = a_ad, "Mouse brain" = a_mb, "Adipose Tissue" = a_at)
rownames(datf) <- c("DIEM", "EmptyDrops", "Quantile")
datfm <- reshape2::melt(as.matrix(datf))
colnames(datfm) <- c("Method", "DataSet", "PD")
datfm$PD <- 100 * datfm$PD
# Barplot of total MT markers
labeler <- c("DiffPA", "Mouse brain", "Adipose Tissue")
names(labeler) <- c("DiffPA", "Mouse.brain", "Adipose.Tissue")
p <- ggplot(datfm, aes(x=Method, y=PD, fill=Method)) +
geom_bar(stat="identity", color="black", position=position_dodge()) +
facet_wrap(~DataSet, scale="free_y", ncol = 1, labeller = labeller(DataSet = labeler)) +
theme_minimal() + ylab("Background droplets (percent of filtered)") +
theme(legend.position = "none",
axis.text.x = element_text(angle = 45, vjust = 0.9, hjust = .8),
axis.title.x = element_blank(),
text=element_text(size=18),
plot.title=element_text(hjust=0.5),
panel.grid.major.x=element_blank())
pdf("results/plots/prop_sf_clusters.pdf", width=3, height=7)
p
dev.off()
|
5227a2fb45fe1c74693d71f135fa2b9fbcf72313
|
22331b9b9a318c24ade5724acf67e9daa9ec830e
|
/ui.R
|
9588d570b790309421fecec8f385e5dda95392fe
|
[] |
no_license
|
paolo64/predwage
|
e14e785e3a2944d636abc450b29060cd4d8b708c
|
37f2a1056b0c2e82e34370c2b618d23331f6d345
|
refs/heads/master
| 2021-01-18T20:31:18.687531
| 2015-07-26T20:48:56
| 2015-07-26T20:48:56
| 39,738,157
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,908
|
r
|
ui.R
|
library(shiny)
library(quantmod)
library(ISLR);
library(ggplot2);
#require(rCharts)
options(RCHART_LIB = 'polycharts')
data(Wage)
#
# shinyUI
#
shinyUI(fluidPage(
# Application title
title = "Wage Predictor",
fluidRow(
column(4,
img(src='wages.png', align = "left")
),
column(8,
h1("Wage Predictor", align = "center"),
br(),
p("The application implements a wage predictor based on
income survey data for 3000 males in Mid-Atlantic region of USA, loaded from ISLR package."),
br(),
br()
),
p("Selecting 8 variables (\"Education\", \"Race\", \"Age\", \"Marital Status\", \"Job Class\", \"Health\", \"Health Insurance\", \"Year\") the system will provide a prediction of wage the worker could obtain.
If the combination of variables chosen by user is the same as those in training data, the exact value of wage will be displayed and also the residual
(the difference between the observed value and the estimated value)."),
p("The app is structured in 7 different areas:"),
HTML("<ul>
<li>Description area </li>
<li>Input area, where the user can select variables</li>
<li>Predictor tab, with input data and estimated values of wage</li>
<li>Data Exploration tab, with boxplot of wage versus education related to input age variable</li>
<li>Summary of train model</li>
<li>Table of train data</li>
<li>Code in server.R and ui.R files</li>
</ul>
"),
h4("How To Use Predictor Tab"),
HTML("
Select input variables (all of them are already set with default values) to obtain the output.
</br>
The output could be the only predicted wage (in USD) or predicted age with obesrved age and its residual.
"),
h4("Data Exploration Tab"),
HTML("
The only input variable used is Age, that you can change via slidebar.
</br>
See in the boxplot how the wage is affected changing education level and age.
"),
br(),
em("More technical details are provided in", a("Technical Notes.", href="https://paolo0164.shinyapps.io/predwage/technotes.html")),
hr(),
fluidRow(
# COL 1
column(4,
radioButtons("education", h5("Education:"),
choices = list("1. < HS Grad"="1. < HS Grad", "2. HS Grad"="2. HS Grad",
"3. Some College"="3. Some College", "4. College Grad"="4. College Grad",
"5. Advanced Degree"="5. Advanced Degree"), selected="2. HS Grad"),
radioButtons("race", h5("Race:"),
choices = list("1. White"="1. White", "2. Black"="2. Black","3. Asian"="3. Asian",
"4. Other"="4. Other"),selected="1. White"),
sliderInput("age",
"Age:",
min = 18,
max = 80,
value = 42)
),
# COL 2
column(4,
radioButtons("maritl", h5("Marital Status"),
choices = list("Never Married"="1. Never Married", "2. Married"="2. Married","3. Widowed"="3. Widowed",
"4. Divorced"="4. Divorced", "5. Separated"="5. Separated"),selected="2. Married"),
radioButtons("jobclass", h5("Job Class"),
choices = list("1. Industrial"="1. Industrial", "2. Information"="2. Information"),selected="1. Industrial")
),
# COL 3
column(4,
radioButtons("health", h5("Health:"),
choices = list("1. Good"="1. <=Good", "2. Very Good"="2. >=Very Good"),selected="2. >=Very Good"),
radioButtons("health_ins", h5("Health Insurance:"),
choices = list("1. Yes"="1. Yes", "2. No"="2. No"),selected="1. Yes"),
selectInput("year", h5("Year:"),
choices = list("2003"=2003, "2004"=2004,"2005"=2005,"2006"=2006,"2007"=2007,
"2008"=2008,"2009"=2009,"2010"=2010,"2011"=2011,"2012"=2012,
"2013"=2013,"2014"=2014,"2015"=2015,"2015"=2016), selected = 2003)
)
),
mainPanel(
tabsetPanel(
tabPanel("Predictor",
h3("Input Data:"),
tableOutput("data4display"),
h3("Predictor results [annual salary in USD]:"),
tableOutput("resultpred")
#plotOutput("ageboxplot")
),
tabPanel("Wage/Education/Age",
h3( "Data Exploration:"),
h6("Change the Age variable from slidebar and see in the boxplot how the wage is affected changing education level and age."),
plotOutput("ageboxplot")
),
tabPanel("Model Summary", verbatimTextOutput("summary")),
tabPanel("Table", dataTableOutput("dtable"))
) )
))
)
|
f2a53d605555187c6e893c4def0f59839da2bbae
|
59c770cd3731ed3bbc177ea90eafda077d5cec6f
|
/R/degseq.R
|
6c5a0e339a1aea9ec4a501446d9b2b1f03dc7849
|
[] |
no_license
|
vishalbelsare/rigraph
|
e52af967467ebe453bd07cfba0555354cc182a36
|
b1ae1de3aca4e2b7eedb4d0f00b8a5f1df35b78d
|
refs/heads/dev
| 2023-01-21T13:25:31.175473
| 2022-04-27T11:02:53
| 2022-04-27T11:02:53
| 129,304,592
| 0
| 0
| null | 2022-04-28T12:22:47
| 2018-04-12T19:58:07
|
R
|
UTF-8
|
R
| false
| false
| 4,601
|
r
|
degseq.R
|
## -----------------------------------------------------------------------
##
## IGraph R package
## Copyright (C) 2015 Gabor Csardi <csardi.gabor@gmail.com>
## 334 Harvard street, Cambridge, MA 02139 USA
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
## 02110-1301 USA
##
## -----------------------------------------------------------------------
#' Check if a degree sequence is valid for a multi-graph
#'
#' \code{is_degseq} checks whether the given vertex degrees (in- and
#' out-degrees for directed graphs) can be realized by a graph. Note that the
#' graph does not have to be simple, it may contain loop and multiple edges.
#' For undirected graphs, it also checks whether the sum of degrees is even.
#' For directed graphs, the function checks whether the lengths of the two
#' degree vectors are equal and whether their sums are also equal. These are
#' known sufficient and necessary conditions for a degree sequence to be valid.
#'
#' @aliases is.degree.sequence is_degseq
#' @param out.deg Integer vector, the degree sequence for undirected graphs, or
#' the out-degree sequence for directed graphs.
#' @param in.deg \code{NULL} or an integer vector. For undirected graphs, it
#' should be \code{NULL}. For directed graphs it specifies the in-degrees.
#' @return A logical scalar.
#' @author Tamas Nepusz \email{ntamas@@gmail.com} and Szabolcs Horvat \email{szhorvat@gmail.com}
#' @references Z Kiraly, Recognizing graphic degree sequences and generating
#' all realizations. TR-2011-11, Egervary Research Group, H-1117, Budapest,
#' Hungary. ISSN 1587-4451 (2012).
#'
#' B. Cloteaux, Is This for Real? Fast Graphicality Testing, \emph{Comput. Sci. Eng.} 17, 91 (2015).
#'
#' A. Berger, A note on the characterization of digraphic sequences, \emph{Discrete Math.} 314, 38 (2014).
#'
#' G. Cairns and S. Mendan, Degree Sequence for Graphs with Loops (2013).
#'
#' @keywords graphs
#'
#' @family graphical degree sequences
#' @examples
#' g <- sample_gnp(100, 2/100)
#' is_degseq(degree(g))
#' is_graphical(degree(g))
#' @export
#' @include auto.R
is_degseq <- function(out.deg, in.deg=NULL) {
is_graphical(out.deg, in.deg, allowed.edge.types="all")
}
#' Is a degree sequence graphical?
#'
#' Determine whether the given vertex degrees (in- and out-degrees for
#' directed graphs) can be realized in a graph.
#'
#' The classical concept of graphicality assumes simple graphs. This function
#' can perform the check also when self-loops, multi-edges, or both are allowed
#' in the graph.
#'
#' @aliases is.graphical.degree.sequence
#' @param out.deg Integer vector, the degree sequence for undirected graphs, or
#' the out-degree sequence for directed graphs.
#' @param in.deg \code{NULL} or an integer vector. For undirected graphs, it
#' should be \code{NULL}. For directed graphs it specifies the in-degrees.
#' @param allowed.edge.types The allowed edge types in the graph. \sQuote{simple}
#' means that neither loop nor multiple edges are allowed (i.e. the graph must be
#' simple). \sQuote{loops} means that loop edges are allowed but mutiple edges
#' are not. \sQuote{multi} means that multiple edges are allowed but loop edges
#' are not. \sQuote{all} means that both loop edges and multiple edges are
#' allowed.
#' @return A logical scalar.
#' @author Tamas Nepusz \email{ntamas@@gmail.com}
#' @references Hakimi SL: On the realizability of a set of integers as degrees
#' of the vertices of a simple graph. \emph{J SIAM Appl Math} 10:496-506, 1962.
#'
#' PL Erdos, I Miklos and Z Toroczkai: A simple Havel-Hakimi type algorithm to
#' realize graphical degree sequences of directed graphs. \emph{The Electronic
#' Journal of Combinatorics} 17(1):R66, 2010.
#' @keywords graphs
#'
#' @family graphical degree sequences
#' @examples
#' g <- sample_gnp(100, 2/100)
#' is_degseq(degree(g))
#' is_graphical(degree(g))
#' @export
#' @include auto.R
is_graphical <- is_graphical
|
2c4e447f61be4f90d27f19a1438ef59e88735dee
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/envlpaster/examples/targetboot.Rd.R
|
fc0dc35ac0cb0abff590f1f85407c42715cb7f69
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 594
|
r
|
targetboot.Rd.R
|
library(envlpaster)
### Name: targetboot
### Title: targetboot
### Aliases: targetboot
### ** Examples
## Not run:
##D set.seed(13)
##D library(envlpaster)
##D library(aster2)
##D data(simdata30nodes)
##D data <- simdata30nodes.asterdata
##D nnode <- length(vars)
##D xnew <- as.matrix(simdata30nodes[,c(1:nnode)])
##D m1 <- aster(xnew, root, pred, fam, modmat)
##D target <- 5:9
##D indices <- c(1,2,4,5)
##D u <- length(indices)
##D nboot <- 2000; timer <- nboot/2
##D bar <- targetboot(m1, nboot = nboot, index = target,
##D u = u, data = data, m = timer)
##D bar
## End(Not run)
|
951a5bf80c452cc5fc90d37b2e5d285751f72874
|
25682e28a0cc24ab1e13ebf3b124cad6e7ec06f3
|
/scripts/step02-DEG-3-packages.R
|
7c368629a290bb92f08e05ad2f771ff2219d3bcc
|
[] |
no_license
|
ixxmu/tcga_example
|
0258a2910e4213026e9b0ebd2ab2a2a4fcf517cd
|
8dec1553267f76e1223cb69b2384b550fedce13c
|
refs/heads/master
| 2020-05-09T20:24:36.408744
| 2019-05-23T01:25:08
| 2019-05-23T01:25:24
| 181,406,409
| 0
| 0
| null | 2019-04-15T03:32:44
| 2019-04-15T03:32:44
| null |
UTF-8
|
R
| false
| false
| 5,165
|
r
|
step02-DEG-3-packages.R
|
library("BiocParallel")
register(MulticoreParam(2)) ##我的是两个核心 ,貌似有个检测核心个数找不找代码了…………
rm(list=ls())
options(stringsAsFactors = F)
library(DESeq2)
library(stringr)
getwd='../Rdata/'
Figure_dir='../figures/'
# 加载上一步从RTCGA.miRNASeq包里面提取miRNA表达矩阵和对应的样本临床信息。
load( file =
file.path(getwd,'TCGA-KIRC-miRNA-example.Rdata')
)
expr_raw <- miRNA_tcga_xena # miRNA信息赋值给主变量,
## ** 主变量:为整个代买跑下来几乎约定俗成的变量,
rownames(expr_raw)<-expr_raw[,1]
expr_raw <- expr_raw[,-1]
meta <- miRNA_clinical # 临床信息提取
rownames(meta)<-str_replace_all(meta[,1],"-","." )## str_replace_all 替换函数
meta <-meta[,-1]
meta <- meta[str_sub(colnames(expr_raw),1,15),]##从meta里面提取所需要的样本
dim(expr)
dim(meta)
expr <- 2^expr_raw-1
exprSet<- expr## # 为了DESeq2使用的数据;
exprSet<-round(exprSet) ## 或者使用另一个函数,expr <- ceiling(expr)## # 取整数 为了DESeq2分析;
## gdc tcga 里面每一项都会标注数据是如何存储的,查看只有转换为原始counts进行后续计算;
# 可以看到是 537个病人,但是有593个样本,每个样本有 552个miRNA信息。
# 当然,这个数据集可以下载原始测序数据进行重新比对,可以拿到更多的miRNA信息
# 这里需要解析TCGA数据库的ID规律,来判断样本归类问题。
group_list=ifelse(as.numeric(substr(colnames(expr),14,15)) < 10,'tumor','normal')
table(group_list)
group_list <- factor(group_list)
exprSet=na.omit(expr)
source('~/r_prac/pre_dara/functions1.R')
### ---------------
###
### Firstly run DESeq2
###
### ---------------
if(T){
library(DESeq2)
(colData <- data.frame(row.names=colnames(exprSet),
group_list=group_list) )
dds <- DESeqDataSetFromMatrix(countData = exprSet,
colData = colData,
design = ~ group_list)
tmp_f=file.path(getwd(),'TCGA-KIRC-miRNA-DESeq2-dds.Rdata')
if(!file.exists(tmp_f)){
dds <- DESeq(dds)
save(dds,file = tmp_f)
}
load(file = tmp_f)
res <- results(dds,
contrast=c("group_list","tumor","normal"))
resOrdered <- res[order(res$padj),]
head(resOrdered)
DEG =as.data.frame(resOrdered)
DESeq2_DEG = na.omit(DEG)
nrDEG=DESeq2_DEG[,c(2,6)]
colnames(nrDEG)=c('log2FoldChange','pvalue')
draw_h_v(exprSet,nrDEG,'DEseq2',group_list,1)
}
### ---------------
###
### Then run edgeR
###
### ---------------
if(T){
library(edgeR)
d <- DGEList(counts=exprSet,group=factor(group_list))
keep <- rowSums(cpm(d)>1) >= 2
table(keep)
d <- d[keep, , keep.lib.sizes=FALSE]
d$samples$lib.size <- colSums(d$counts)
d <- calcNormFactors(d)
d$samples
dge=d
design <- model.matrix(~0+factor(group_list))
rownames(design)<-colnames(dge)
colnames(design)<-levels(factor(group_list))
dge=d
dge <- estimateGLMCommonDisp(dge,design)
dge <- estimateGLMTrendedDisp(dge, design)
dge <- estimateGLMTagwiseDisp(dge, design)
fit <- glmFit(dge, design)
# https://www.biostars.org/p/110861/
lrt <- glmLRT(fit, contrast=c(-1,1))
nrDEG=topTags(lrt, n=nrow(dge))
nrDEG=as.data.frame(nrDEG)
head(nrDEG)
edgeR_DEG =nrDEG
nrDEG=edgeR_DEG[,c(1,5)]
colnames(nrDEG)=c('log2FoldChange','pvalue')
draw_h_v(exprSet,nrDEG,'edgeR',group_list,1)
}
### ---------------
###
### Lastly run voom from limma
###
### ---------------
if(T){
suppressMessages(library(limma))
design <- model.matrix(~0+factor(group_list))
colnames(design)=levels(factor(group_list))
rownames(design)=colnames(exprSet)
design
dge <- DGEList(counts=exprSet)
dge <- calcNormFactors(dge)
logCPM <- cpm(dge, log=TRUE, prior.count=3)
v <- voom(dge,design,plot=TRUE, normalize="quantile")
fit <- lmFit(v, design)
group_list
cont.matrix=makeContrasts(contrasts=c('tumor-normal'),levels = design)
fit2=contrasts.fit(fit,cont.matrix)
fit2=eBayes(fit2)
tempOutput = topTable(fit2, coef='tumor-normal', n=Inf)
DEG_limma_voom = na.omit(tempOutput)
head(DEG_limma_voom)
nrDEG=DEG_limma_voom[,c(1,4)]
colnames(nrDEG)=c('log2FoldChange','pvalue')
draw_h_v(exprSet,nrDEG,'limma',group_list,1)
}
save(DEG,DEG_limma_voom,edgeR_DEG,group_list,expr,expr_raw,exprSet,meta,nrDEG,nrDEG1,file="TCGA-KIRC-miRNA-DEG_results2.Rdata") #保存标量吧
tmp_f=file.path(getwd(),'TCGA-KIRC-miRNA-DEG_results.Rdata')
if(file.exists(tmp_f)){
save(DEG_limma_voom,DESeq2_DEG,edgeR_DEG, file = tmp_f)
}else{
load(file = tmp_f)
}
nrDEG1=DEG_limma_voom[,c(1,4)]
colnames(nrDEG1)=c('log2FoldChange','pvalue')
nrDEG2=edgeR_DEG[,c(1,5)]
colnames(nrDEG2)=c('log2FoldChange','pvalue')
nrDEG3=DESeq2_DEG[,c(2,6)]
colnames(nrDEG3)=c('log2FoldChange','pvalue')
mi=unique(c(rownames(nrDEG1),rownames(nrDEG1),rownames(nrDEG1)))
lf=data.frame(lf1=nrDEG1[mi,1],
lf2=nrDEG2[mi,1],
lf3=nrDEG3[mi,1])
cor(na.omit(lf))
# 可以看到采取不同R包,会有不同的归一化算法,这样算到的logFC会稍微有差异。
|
a6f16253e0e322733793556b7e9894b2924267a2
|
a06e8825887605e10507e41a923458c03040a362
|
/man/hr_train.Rd
|
ae2f536c2538b1810cf2983af8aeb9c592e125ad
|
[
"MIT"
] |
permissive
|
rsquaredacademy/mbar
|
592a7c73af88229a96a032f4c8821d94270b9640
|
4914774326ee5b96b2d605c4fa626fdd5c402975
|
refs/heads/master
| 2023-08-30T17:02:00.692601
| 2019-06-10T14:15:16
| 2019-06-10T14:15:16
| 181,642,656
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 386
|
rd
|
hr_train.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tree-data.R
\docType{data}
\name{hr_train}
\alias{hr_train}
\title{Decision tree train data}
\format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 1029 rows and 35 columns.}
\usage{
hr_train
}
\description{
Training data set for decision tree model
}
\keyword{datasets}
|
de786c1f4b4eaf6d83669832fac0fca18fb4f68f
|
7946b84034a7dd7e3d5b4d69db88373d58185789
|
/R/get_bio_oracle.R
|
f263a0d78b34e210e99680b63a9fec5b29a1afee
|
[] |
no_license
|
luismurao/ntbox
|
3839a7a346b390850d9d1dc77cbd50cb32a52d00
|
220112e32c53ceef4f5f1bcdb7daed6b755604bf
|
refs/heads/master
| 2023-07-09T05:55:01.119706
| 2023-07-08T18:01:47
| 2023-07-08T18:01:47
| 79,830,037
| 7
| 9
| null | 2020-07-21T00:42:51
| 2017-01-23T17:39:41
|
R
|
UTF-8
|
R
| false
| false
| 5,983
|
r
|
get_bio_oracle.R
|
#' get_bio_oracle: Get environmental layers from Bio-Oracle.
#' @description Get bioclimatic layers from Bio-Oracle for current and future scenarios
#' @param period Time period. Posible values are: "current","2050","2100","2200".
#' @param var_type Type of variable. Posible values are: 'salinity','sea_surface_temperature',
#' 'current_velocity','sea_water_temperature','sea_water_salinity','sea_ice_thickness',
#' 'chlorophyll_concentration','sea_surface_salinity'.
#' @param model Climate model. Possible values are: "UKMO-HadCM3" and "AOGCM"
#' @param scenario Climate change scenario. Posible values are "a1b","a2","b1","rcp26","rcp45","rcp60","rcp85".
#' @param sv_dir Path to the directory where the layers will be saved. The default is the working directory of the R session.
#' @param load2r Logical. Load layers into R?
#' @param parallel Download layers in parallel.
#' @seealso \code{\link[ntbox]{get_envirem_elev}}, \code{\link[ntbox]{get_envirem_clim}}, \code{\link[ntbox]{get_chelsa}}
#' @details For more details visit \url{http://www.bio-oracle.org/index.php}
#' @references Assis, J., Tyberghein, L., Bosh, S., Verbruggen, H., Serrao, E. A., & De Clerck, O. (2017). Bio-ORACLE v2.0: Extending marine data layers for bioclimatic modelling. Global Ecology and Biogeography.
#' @export
#' @examples
#' \dontrun{
#' swater_temp <- get_bio_oracle(period = "current",
#' var_type = 'sea_water_temperature',
#' model = NULL,
#' scenario = NULL,
#' sv_dir="~/Desktop/",
#' load2r = TRUE,
#' parallel = TRUE)
#' swater_temp_2100_AOGCM_rcp85 <- get_bio_oracle(period = "2100",
#' var_type ='sea_water_temperature',
#' model = "AOGCM",
#' scenario = "rcp85",
#' sv_dir="C:/Users/l916o895/Desktop",
#' load2r = TRUE,
#' parallel = TRUE)
#' }
get_bio_oracle <- function(period,var_type,model=NULL,scenario=NULL,sv_dir=getwd(),load2r=TRUE,parallel=TRUE){
if(!dir.exists(sv_dir))
stop(paste("No such a file or directory,",
sv_dir))
bior_down <- NULL
bio_oracle <- base::readRDS(file.path(system.file("extdata",
package = "ntbox"),
"bio_oracle.rds"))
bio_oracle_urls <- NULL
if(period == "current"){
bio_oracle <- bio_oracle[!duplicated(bio_oracle$current_layer_code),]
bio_oracle <- bio_oracle %>% filter_(~type==var_type)
layers_des <- paste("bio_oracle",
var_type,
period,sep = "_")
bio_oracle_urls <- bio_oracle$current_layer_code
}
if(period %in% c("2050",
"2100",
"2200") &&
scenario %in% c("rcp26",
"a1b",
"a2",
"b1",
"rcp26",
"rcp45",
"rcp60",
"rcp85") &&
model %in% c("UKMO-HadCM3","AOGCM")){
scenario <- base::toupper(scenario)
period <- as.numeric(period)
bio_oracle$split_code <- paste(bio_oracle$type,
bio_oracle$year,
bio_oracle$model,
bio_oracle$scenario,
sep="_")
layers_des <- paste(var_type,
period,
model,
scenario,
sep = "_")
bio_oracle <- bio_oracle %>% filter_(~split_code==layers_des)
if(nrow(bio_oracle)>0L){
layers_des <- paste("bio_oracle",
var_type,
period,
model,
scenario,
sep = "_")
bio_oracle_urls <- bio_oracle$future_layer_code
}
}
if(is.null(bio_oracle_urls) && exists("layers_des")){
warning(paste("No spatial information for",
layers_des))
}
else{
dir_name <- base::file.path(sv_dir, layers_des)
if(!dir.exists(dir_name ))
dir.create(dir_name )
if(parallel){
ncores <- parallel::detectCores() -1
cl <- parallel::makeCluster(ncores)
parallel::clusterExport(cl,varlist = c("bio_oracle_urls",
"dir_name"),
envir = environment())
pardown <- function(x){
r1 <- sdmpredictors::load_layers(bio_oracle_urls[x],
rasterstack = FALSE,
datadir = dir_name)
return(r1)
}
bior_down <- parallel::clusterApply(cl, seq_along(bio_oracle_urls),
function(x) pardown(x))
parallel::stopCluster(cl)
}
else{
bior_down <- sdmpredictors::load_layers(bio_oracle_urls,
rasterstack = FALSE,
datadir = dir_name)
}
if(load2r)
bior_down <- raster::stack(unlist(bior_down))
cite_bior <- paste("Assis, J., Tyberghein, L., Bosh, S., Verbruggen, H.,",
"Serrao, E. A., & De Clerck, O. (2017). Bio-ORACLE v2.0:",
"Extending marine data layers for bioclimatic modelling.",
"Global Ecology and Biogeography.")
base::message(paste("Please cite as",cite_bior))
}
return(bior_down)
}
|
314ffbad35d314eadc5e1c6b7ede36054e4d5a25
|
bc5d84e2464651b6267b4c93fffabf1df5fa8d7f
|
/src/image.R
|
8e936893d3486129c27efef252f007c9f8b93d08
|
[
"OGL-Canada-2.0",
"MIT"
] |
permissive
|
amygoldlist/Baby_weights_by_sex
|
3e7fb0cbc33830bc0052ed61e31932f3259dda06
|
c1b9b7c7c8e09005d98d3ac2e99663be2f3295d3
|
refs/heads/master
| 2021-08-29T19:24:23.014519
| 2017-12-14T18:51:27
| 2017-12-14T18:51:27
| 111,855,893
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,260
|
r
|
image.R
|
##image.R
##by Amy Goldlist, Decmenber 2017
## Usage: Rscript src/image.R $orig_filename $new_filename
## the origina filename is "results/baby_data.csv"
## the target filename is "results/images/baby_histogram.png"
##This script created png file with a histogram of the data, grouped by sex
# read in command line argument
args <- commandArgs(trailingOnly = TRUE)
input_file <- args[1]
output_file <- args[2]
###load tidyverse
library(tidyverse)
library(forcats)
##load data
###baby_data <- read.csv("results/baby_data.csv")
baby_data <- read.csv(input_file)
##relevel the birthweights properly, so that we can create graphics
baby_data <- baby_data %>%
mutate(Weight_class = fct_relevel(Weight_class,c("less than 500 grams", "750 to 999 grams" )))
##create barplot of baby weights by sex
p <- baby_data %>%
filter(Value >=0) %>%
group_by(SEX, Weight_class) %>%
summarize(count = sum(Value)) %>%
ggplot(aes(x = Weight_class, y = count, fill = SEX))+
geom_col(position = "dodge")+
theme_minimal()+
scale_y_continuous(name = "Count", labels = scales::comma)+
scale_x_discrete(name = "Birthweight")+
theme(axis.text.x = element_text(angle = 70, hjust = 1))+
labs(title = "Birthweights by Sex")
ggsave(output_file, p)
|
e3968c25007cd066abcd15c2dfbb9fb79f4a3d9b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/matchMulti/examples/matchMulti.Rd.R
|
893f369429958d34ae4df05c88e7770b94973fa0
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,760
|
r
|
matchMulti.Rd.R
|
library(matchMulti)
### Name: matchMulti
### Title: A function that performs multilevel matching.
### Aliases: matchMulti
### ** Examples
#toy example with short runtime
library(matchMulti)
#Load Catholic school data
data(catholic_schools)
# Trim data to speed up example
catholic_schools <- catholic_schools[catholic_schools$female_mean >.45 &
catholic_schools$female_mean < .60,]
#match on a single covariate
student.cov <- c('minority')
match.simple <- matchMulti(catholic_schools, treatment = 'sector',
school.id = 'school', match.students = FALSE,
student.vars = student.cov, verbose=TRUE, tol=.01)
#Check balance after matching - this checks both student and school balance
balanceMulti(match.simple, student.cov = student.cov)
## Not run:
##D #larger example
##D data(catholic_schools)
##D
##D student.cov <- c('minority','female','ses')
##D
##D # Check balance student balance before matching
##D balanceTable(catholic_schools[c(student.cov,'sector')], treatment = 'sector')
##D
##D #Match schools but not students within schools
##D match.simple <- matchMulti(catholic_schools, treatment = 'sector',
##D school.id = 'school', match.students = FALSE)
##D
##D #Check balance after matching - this checks both student and school balance
##D balanceMulti(match.simple, student.cov = student.cov)
##D
##D #Estimate treatment effect
##D output <- matchMultioutcome(match.simple, out.name = "mathach",
##D schl_id_name = "school", treat.name = "sector")
##D
##D # Perform sensitivity analysis using Rosenbaum bound -- increase Gamma to increase effect of
##D # possible hidden confounder
##D matchMultisens(match.simple, out.name = "mathach",
##D schl_id_name = "school",
##D treat.name = "sector", Gamma = 1.3)
##D
##D
##D # Now match both schools and students within schools
##D match.out <- matchMulti(catholic_schools, treatment = 'sector',
##D school.id = 'school', match.students = TRUE, student.vars = student.cov)
##D
##D # Check balance again
##D bal.tab <- balanceMulti(match.out, student.cov = student.cov)
##D
##D # Now match with fine balance constraints on whether the school is large
##D # or has a high percentage of minority students
##D match.fb <- matchMulti(catholic_schools, treatment = 'sector', school.id = 'school',
##D match.students = TRUE, student.vars = student.cov,
##D school.fb = list(c('size_large'),c('size_large','minority_mean_large')))
##D
##D # Estimate treatment effects
##D matchMultioutcome(match.fb, out.name = "mathach", schl_id_name = "school", treat.name = "sector")
##D
##D #Check Balance
##D balanceMulti(match.fb, student.cov = student.cov)
##D
## End(Not run)
|
f6cb214fc85c5475e07bb47262dfd745f38510ae
|
218f94dc54f33ea755df171448e1ca9493c446a6
|
/1_Nov2018/3_Practices/Governance/2_Tables_for_Back-end_Governance.R
|
37a3c920fe1bd97159a2b0d25da727ec6b593980
|
[] |
no_license
|
WWF-ConsEvidence/ConservationDashboard
|
b96565195092fdb7f1c7a05c010e7b28ae700cbe
|
94db2429b99f2bfe2f98a902d0ed06117b01bafa
|
refs/heads/master
| 2021-06-02T17:09:58.342296
| 2020-10-30T23:10:36
| 2020-10-30T23:10:36
| 109,167,974
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,616
|
r
|
2_Tables_for_Back-end_Governance.R
|
#
# code: Governance Practice Indicator and Initiative Tables
#
# author: Kelly Claborn, clabornkelly@gmail.com
# created: August 2018
# modified:
#
# ---- inputs ----
# 1) Governance-specific data tables (in 1_Nov2018/2_FlatDataFiles/ConsDB_Input)
#
# ---- outputs ----
# 1) Governance-specific back-end tables -- ready to be consolidated with other Practices, to go into back-end:
# - Dim_Context_Indicator_Type
# - Fact_Global_Context_Indicators
# - Dim_Initiative
# - Fact_Initiative_Financials
# - Dim_Initiative_Indicator_Type
# - Fact_Initiative_Indicators
# - Milestone_Group_Bridge
# - Dim_Milestone
#
# ---- code sections ----
# 1) Load libraries, add reference tables
# 2) Global Context
# 3) Initiatives
#
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 1: Load libraries, add reference tables ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
pacman::p_load(dplyr, xlsx)
practice_key_ref <- read.xlsx('1_Nov2018/2_FlatDataFiles/ConsDB_Input/cons_dashboard_dim_tables_20180828.xlsx',
sheetName='Dim_Practice')
practice_outcome_key_ref <- read.xlsx('1_Nov2018/2_FlatDataFiles/ConsDB_Input/cons_dashboard_dim_tables_20180828.xlsx',
sheetName='Dim_Practice_Outcome')
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 2: Global Context ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- 2.1 Context - State ----
# -- INDIGENOUS AND COMMUNITY LAND RIGHTS
Dim_Context_State_Governance_A <-
data.frame(Indicator_Type_Key="GCS_GV_A",
Indicator_Name="Gap in formal recognition of ICCAs, compared to all community conserved lands",
Indicator_Label="Formally Recognized Indigenous and Community Conserved Areas (ICCAs)*",
Panel_Label="Indigenous and Community Land Rights",
Panel="State",
Indicator_Subcategory=NA,
Indicator_Unit="% of total estimated community conserved lands",
Data_Source="WDPA; IUCN (for estimate of total community conserved lands)")
Fact_Context_State_Governance_A <-
read.csv('1_Nov2018/2_FlatDataFiles/ConsDB_Input/ICCA_timeseries.csv') %>%
subset(.,STATUS_YR>1994) %>%
transmute(Year_Key=STATUS_YR,
Practice_Key=rep(practice_key_ref$id[practice_key_ref$practice_name=="Governance"],length(STATUS_YR)),
Indicator_Type_Key=rep(Dim_Context_State_Governance_A$Indicator_Type_Key,length(STATUS_YR)),
Indicator_Value=ICCA_PERCENT_EST,
Indicator_Upper_Value=ICCA_PERCENT_HI,
Indicator_Lower_Value=ICCA_PERCENT_LOW)
# ---- 2.2 Context - Threat ----
# -- UNSUSTAINABLE DEVELOPMENT - TOTAL LOSS
Dim_Context_Threat_Governance_A <-
data.frame(Indicator_Type_Key="GCT_GV_A",
Indicator_Name="Intact ecosystems lost to unsustainable development",
Indicator_Label="Global Tree Cover Loss",
Panel_Label="Unsustainable Development",
Panel="Threat",
Indicator_Subcategory="Total Loss",
Indicator_Unit="M ha per year",
Data_Source="Global Forest Watch")
Fact_Context_Threat_Governance_A <-
read.xlsx('1_Nov2018/2_FlatDataFiles/ConsDB_Input/GFW_treeloss_bydriver_2018_0919.xlsx', sheetName="Sheet1") %>%
subset(.,Geography=="World" & Loss_type=="Total Loss") %>%
transmute(Year_Key=Year,
Practice_Key=rep(practice_key_ref$id[practice_key_ref$practice_name=="Governance"],length(Year_Key)),
Indicator_Type_Key=rep(Dim_Context_Threat_Governance_A$Indicator_Type_Key,length(Year_Key)),
Indicator_Value=Value,
Indicator_Upper_Value=NA,
Indicator_Lower_Value=NA)
# -- UNSUSTAINABLE DEVELOPMENT - COMMODITY DRIVEN DEFORESTATION
Dim_Context_Threat_Governance_B <-
data.frame(Indicator_Type_Key="GCT_GV_B",
Indicator_Name="Intact ecosystems lost to unsustainable development",
Indicator_Label="Global Tree Cover Loss",
Panel_Label="Unsustainable Development",
Panel="Threat",
Indicator_Subcategory="Commodity Driven Loss",
Indicator_Unit="M ha per year",
Data_Source="Global Forest Watch; Curtis et al (2018) Global drivers of forest loss")
Fact_Context_Threat_Governance_B <-
read.xlsx('1_Nov2018/2_FlatDataFiles/ConsDB_Input/GFW_treeloss_bydriver_2018_0919.xlsx', sheetName="Sheet1") %>%
subset(.,Geography=="World" & Loss_type=="Commodity Driven Deforestation") %>%
transmute(Year_Key=Year,
Practice_Key=rep(practice_key_ref$id[practice_key_ref$practice_name=="Governance"],length(Year_Key)),
Indicator_Type_Key=rep(Dim_Context_Threat_Governance_B$Indicator_Type_Key,length(Year_Key)),
Indicator_Value=Value,
Indicator_Upper_Value=NA,
Indicator_Lower_Value=NA)
# ---- 2.3 Context - Response ----
# -- COMMUNITY CONSERVED LAND - ICCA COVERAGE
Dim_Context_Response_Governance_A <-
data.frame(Indicator_Type_Key="GCR_GV_A",
Indicator_Name="ICCA coverage (of formally recognized Indigenous and Community Conserved Areas)",
Indicator_Label="Indigenous and Community Conserved Areas (ICCAs)*",
Panel_Label="Community Conserved Land",
Panel="Response",
Indicator_Subcategory="Coverage",
Indicator_Unit="M ha",
Data_Source="WDPA")
Fact_Context_Response_Governance_A <-
read.csv('1_Nov2018/2_FlatDataFiles/ConsDB_Input/ICCA_timeseries.csv') %>%
subset(.,STATUS_YR>1994) %>%
transmute(Year_Key=STATUS_YR,
Practice_Key=rep(practice_key_ref$id[practice_key_ref$practice_name=="Governance"],length(STATUS_YR)),
Indicator_Type_Key=rep(Dim_Context_Response_Governance_A$Indicator_Type_Key,length(STATUS_YR)),
Indicator_Value=AREA_MHA_TIME,
Indicator_Upper_Value=NA,
Indicator_Lower_Value=NA)
# -- COMMUNITY CONSERVED LAND - EFFECTIVE GUARDIANSHIP
Dim_Context_Response_Governance_B <-
data.frame(Indicator_Type_Key="GCR_GV_B",
Indicator_Name="Effective guardianship of community conserved lands",
Indicator_Label="Indigenous and Community Conserved Areas (ICCAs)*",
Panel_Label="Community Conserved Land",
Panel="Response",
Indicator_Subcategory="Effective Guardianship*",
Indicator_Unit="",
Data_Source="")
Fact_Context_Response_Governance_B <-
data.frame(Year_Key=9999,
Practice_Key=rep(practice_key_ref$id[practice_key_ref$practice_name=="Governance"],length(1)),
Indicator_Type_Key=rep(Dim_Context_Response_Governance_B$Indicator_Type_Key,length(1)),
Indicator_Value=NA,
Indicator_Upper_Value=NA,
Indicator_Lower_Value=NA)
# ---- 2.4 Consolidated Governance-specific Global Context tables ----
Dim_Context_Governance <-
rbind.data.frame(Dim_Context_State_Governance_A,
Dim_Context_Threat_Governance_A,
Dim_Context_Threat_Governance_B,
Dim_Context_Response_Governance_A,
Dim_Context_Response_Governance_B)
Fact_Context_Governance <-
rbind.data.frame(Fact_Context_State_Governance_A,
Fact_Context_Threat_Governance_A,
Fact_Context_Threat_Governance_B,
Fact_Context_Response_Governance_A,
Fact_Context_Response_Governance_B)
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 3: Initiatives ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- 3.1 Load data ----
dim.initiatives.governance <-
read.xlsx('1_Nov2018/2_FlatDataFiles/ConsDB_Input/fy18_initiative_reporting_dim_2018_1121.xlsx',sheetName="Sheet1") %>%
subset(.,Practice=="Governance")
dim.initiative.indicators.governance <-
read.xlsx('1_Nov2018/2_FlatDataFiles/ConsDB_Input/fy18_initiative_indicators_fact_2018_1121.xlsx',sheetName="Sheet1") %>%
subset(.,Practice=="Governance")
# ---- 3.2 Governance-specific Dim_Initiative ----
Dim_Initiative_Governance <-
dim.initiatives.governance %>%
transmute(Initiative_Key=Initiative.key,
Initiative_Name=Initiative,
Initiative_Status=Overall.status,
Initiative_Status_Justification=Overall.just,
Initiative_Goal=Initiative.statement)
# ---- 3.3 Governance-specific Dim_Initiative_Indicator_Type ----
Dim_Initiative_Indicator_Governance <-
dim.initiative.indicators.governance %>%
transmute(Indicator_Type_Key=Initiative.indicator.key,
Indicator_Type=Indicator.type,
Indicator_Name=ifelse(!is.na(Indicator.name),as.character(Indicator.name),"FORTHCOMING"),
Indicator_Label=ifelse(!is.na(Indicator.label),as.character(Indicator.label),"Not Yet Identified"),
Indicator_Subcategory=Subcategory,
Indicator_Target=Target,
Indicator_Unit=Units,
Data_Source=Source,
Display_Order=Display.order)
# ---- 3.4 Governance-specific Fact_Initiative_Indicators ----
Fact_Initiative_Indicator_Governance <-
dim.initiative.indicators.governance %>%
left_join(.,dim.initiatives.governance[,c("Initiative.key","Initiative","Practice.outcome.key")],
by="Initiative") %>%
melt(.,measure.vars=c("Baseline.value","Current.value","Target")) %>%
transmute(Initiative.indicator.key=Initiative.indicator.key,
Initiative=Initiative,
Initiative.key=Initiative.key,
Practice.outcome.key=Practice.outcome.key,
Year.type=c(rep("Baseline",length(variable[variable=="Baseline.value"])),
rep("Current",length(variable[variable=="Current.value"])),
rep("Target",length(variable[variable=="Target"]))),
Year=c(Baseline.year[variable=="Baseline.value"],
Current.year[variable=="Current.value"],
Target.year[variable=="Target"]),
Raw.value=c(value[variable=="Baseline.value"],
value[variable=="Current.value"],
value[variable=="Target"]),
Raw.baseline.value=rep(value[variable=="Baseline.value"],3),
Value=ifelse(grepl("% change",Units,ignore.case=T)==T |
grepl("% reduction",Units,ignore.case=T)==T |
grepl("% increase",Units,ignore.case=T)==T,
ifelse(Year.type=="Baseline" & !is.na(Year),
0,
ifelse(Year.type=="Current" & Desired.trend=="Down",
(1-(Raw.value/Raw.baseline.value))*100,
ifelse(Year.type=="Current" & Desired.trend=="Up",
((Raw.value/Raw.baseline.value)-1)*100,
Raw.value))),
Raw.value)) %>%
.[!(is.na(.$Year)==T & .$Year.type=="Current") &
!(is.na(.$Value)==T & .$Year.type=="Target"),] %>%
transmute(Year_Key=ifelse(!is.na(Year),Year,9999),
Practice_Key=rep(practice_key_ref$id[practice_key_ref$practice_name=="Governance"],length(Year_Key)),
Initiative_Key=Initiative.key,
Indicator_Type_Key=Initiative.indicator.key,
Practice_Outcome_Key=Practice.outcome.key,
Indicator_Value=Value,
Indicator_Upper_Value=NA,
Indicator_Lower_Value=NA)
# ---- 3.5 Governance-specific Fact_Initiative_Financials ----
Fact_Initiative_Financials_Governance <-
dim.initiatives.governance %>%
transmute(Date_Key=Date,
Practice_Key=rep(practice_key_ref$id[practice_key_ref$practice_name=="Governance"],length(Date_Key)),
Initiative_Key=Initiative.key,
Amount_needed=Funds.needed,
Amount_secured=Funds.secured)
# ---- REMOVE CLUTTER ----
rm(Dim_Context_State_Governance_A,
Dim_Context_Threat_Governance_A,
Dim_Context_Threat_Governance_B,
Dim_Context_Response_Governance_A,
Dim_Context_Response_Governance_B,
Fact_Context_State_Governance_A,
Fact_Context_Threat_Governance_A,
Fact_Context_Threat_Governance_B,
Fact_Context_Response_Governance_A,
Fact_Context_Response_Governance_B,
dim.initiatives.governance,
dim.initiative.indicators.governance)
|
74e2e8dd117084ca5976c96372b3ad915cc5a49a
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/A_github/sources/authors/1689/DIME/huber.R
|
e0e118924b70520877c9665654ff27d3c21d1ca9
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 459
|
r
|
huber.R
|
`huber` <-
function(input, co = -1.345, shape = c('full','lower','upper'))
{
input <- unlist(input);
len <- length(input);
shape <- match.arg(shape)
input <- (input - mean(input))/sd(input)
change <- switch(shape,
full = which(abs(input) > abs(co)),
lower = which(input <= co),
upper = which(input >= abs(co))
)
if (length(change)<1) input <- rep(1,len) ;
input[change] <- abs(co)/abs(input[change])
input[-change] <- 1
return(input)
}
|
2d7130c81b30a9b66e82074e7eb634a9356fa5e2
|
068e0cfa3f62ba1a92c95256b3bb6df35629c56c
|
/SiReX/server.R
|
79a599e43f13415c89d1dde7f83035e244f5bef6
|
[
"Unlicense",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
anhnguyendepocen/SAFJR17
|
e48ec7215dbc3087137cdebe02be58780046b8e9
|
56e55ef4b6d85a34a9af92719fe8bc83218ce7b7
|
refs/heads/master
| 2020-04-17T11:50:10.099420
| 2017-07-31T11:31:58
| 2017-07-31T11:31:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,054
|
r
|
server.R
|
## server.R script ##
source("common.R")
function(input, output, session) {
# Serve only possible values:
observe({
# Adjust n. of individuals, simulation 1
s1_sel_n_clusters = input$s1_n_clusters
updateSelectInput(session,
"s1_n_individuals",
choices = sort(unique(s1$n_individuals[s1$n_clusters == input$s1_n_clusters])))
# Adjust n. of individuals, simulation 2
s2_sel_n_clusters = input$s2_n_clusters
updateSelectInput(session,
"s2_n_individuals",
choices = sort(unique(s2$n_individuals[s2$n_clusters == input$s2_n_clusters])))
})
# Table of results, simulation 1:
output$s1_table <- renderTable(
s1 %>%
filter(n_clusters == input$s1_n_clusters,
n_individuals == input$s1_n_individuals,
frailty_theta == input$s1_frailty_theta,
treatment_effect == input$s1_treatment_effect,
par %in% c("convp", "convn", input$s1_par)) %>%
select(stat, AF, IN, GQ15, GQ35, GQ75, GQ105) %>%
mutate(stat = factor(stat, levels = c("convn", "convp", "covp", "bias", "pbias", "mean", "se_mean", "median", "se_median", "empse", "mse"), labels = c("N. of simulations converging", "P. of simulations converging", "Coverage probability", "Bias", "Percentage bias", "Mean estimate", "Mean SE", "Median estimate","Median SE", "Empirical SE", "MSE"))) %>%
arrange(stat) %>%
rename(Statistic = stat),
digits = 4)
# Table of results, simulation 2:
output$s2_table <- renderTable(
s2 %>%
filter(n_clusters == input$s2_n_clusters,
n_individuals == input$s2_n_individuals,
frailty_sigma == input$s2_frailty_sigma,
treatment_effect == input$s2_treatment_effect,
par %in% c("convp", "convn", input$s2_par)) %>%
select(stat, GQ15, GQ35, GQ75, GQ105) %>%
mutate(stat = factor(stat, levels = c("convn", "convp", "covp", "bias", "pbias", "mean", "se_mean", "median", "se_median", "empse", "mse"), labels = c("N. of simulations converging", "P. of simulations converging", "Coverage probability", "Bias", "Percentage bias", "Mean estimate", "Mean SE", "Median estimate","Median SE", "Empirical SE", "MSE"))) %>%
arrange(stat) %>%
rename(Statistic = stat),
digits = 4)
# Plot, simulation 1:
output$s1_plot = renderPlot(
s1 %>%
filter(n_clusters == input$s1_n_clusters,
n_individuals == input$s1_n_individuals,
frailty_theta == input$s1_frailty_theta,
treatment_effect == input$s1_treatment_effect,
par == input$s1_par) %>%
select(stat, AF, IN, GQ15, GQ35, GQ75, GQ105) %>%
filter(stat %in% c("bias", "covp", "mse")) %>%
mutate(stat = factor(stat, levels = c("bias", "covp", "mse"), labels = c("Bias", "Coverage probability", "MSE"))) %>%
gather(key = key, value = value, 2:7) %>%
mutate(key = factor(key, levels = c("AF", "IN", "GQ15", "GQ35", "GQ75", "GQ105"))) %>%
ggplot(aes(x = key, y = value)) +
geom_bar(stat = "identity") +
facet_wrap(~ stat, scales = "free_y") +
theme_bw() +
labs(x = "", y = ""))
# Plot, simulation 2:
output$s2_plot = renderPlot(
s2 %>%
filter(n_clusters == input$s2_n_clusters,
n_individuals == input$s2_n_individuals,
frailty_sigma == input$s2_frailty_sigma,
treatment_effect == input$s2_treatment_effect,
par == input$s2_par) %>%
select(stat, GQ15, GQ35, GQ75, GQ105) %>%
filter(stat %in% c("bias", "covp", "mse")) %>%
mutate(stat = factor(stat, levels = c("bias", "covp", "mse"), labels = c("Bias", "Coverage probability", "MSE"))) %>%
gather(key = key, value = value, 2:5) %>%
mutate(key = factor(key, levels = c("GQ15", "GQ35", "GQ75", "GQ105"))) %>%
ggplot(aes(x = key, y = value)) +
geom_bar(stat = "identity") +
facet_wrap(~ stat, scales = "free_y") +
theme_bw() +
labs(x = "", y = ""))
}
|
f1dd7353826b71c8bc97d73ab0a460159f4ac1c7
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/srm/R/SRM_PARTABLE.R
|
a470b80b271bd03e70e0e96f62662ec5df6f4657
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,197
|
r
|
SRM_PARTABLE.R
|
## File Name: SRM_PARTABLE.R
## File Version: 0.07
SRM_PARTABLE_ORDER <- function(LIST, ngroups = 1L) {
NEWLIST <- data.frame()
for (g in 1:ngroups) {
tmp <- subset(LIST,LIST$op=="=~" & LIST$group==g )
if (nrow(tmp) > 0L) {
tmp <- tmp[order(tmp$lhs,tmp$rhs),]
NEWLIST <- rbind(NEWLIST,tmp)
}
tmp <- subset(LIST,LIST$op=="~" & LIST$group==g)
if (nrow(tmp) > 0L) {
tmp <- tmp[order(tmp$lhs,tmp$rhs),]
NEWLIST <- rbind(NEWLIST,tmp)
}
tmp <- subset(LIST,LIST$op=="~~" & LIST$group==g)
if (nrow(tmp) > 0L) {
tmp <- tmp[order(tmp$lhs,tmp$rhs),]
NEWLIST <- rbind(NEWLIST,tmp)
}
tmp <- subset(LIST,LIST$op=="~1" & LIST$group==g)
if (nrow(tmp) > 0L) {
tmp <- tmp[order(tmp$lhs,tmp$rhs),]
NEWLIST <- rbind(NEWLIST,tmp)
}
}
return(NEWLIST)
}
SRM_PARTABLE_FIXEDVALUES <- function(LIST, name="EMPTY", ngroups = 1L) {
equal <- rep(NA,length(LIST$lhs))
if (name=="Person") {
for (g in 1:ngroups) {
idx <- which(!(is.na(LIST$equal)) & LIST$group == g )
equal[idx] <- LIST$equal[idx]
}
} else if (name == "Dyad") {
lauf2 <- 0
for (g in 1:ngroups) {
# we start with the loadings and regressions
idx <- which(LIST$op %in% c("=~","~") & !(is.na(LIST$equal)) & LIST$group == g)
equal[idx] <- LIST$equal[idx]
# now, we compute the constraints for the "~~" parameters
idx.names <- which(LIST$op == "~~" & grepl("@AP",LIST$lhs) & LIST$group == g)
var.names <- unique(gsub("@AP","",LIST$lhs[idx.names]))
lauf <- 1 + lauf2
for (i in 1:length(var.names)) {
tmp.idx1 <- which(LIST$op == "~~" & LIST$group == g &
gsub("@AP","",LIST$lhs) == var.names[i] &
gsub("@AP","",LIST$rhs) == var.names[i] )
tmp.idx2 <- which(LIST$op == "~~" & LIST$group == g &
gsub("@PA","",LIST$lhs) == var.names[i] &
gsub("@PA","",LIST$rhs) == var.names[i] )
tmp.idx <- c(tmp.idx1,tmp.idx2)
## if user - constrained
if(any(LIST$user[tmp.idx] == 1)) {
k.idx = which(LIST$user[tmp.idx] == 1)
equal[tmp.idx] <- LIST$equal[k.idx][1]
} else {
equal[tmp.idx] <- paste("vv",lauf,sep="")
lauf <- lauf + 1
}
} # for-loop
lauf2 <- lauf2 + lauf
} # for-loop groups
} # else - Dyad
LIST$equal <- equal
return(LIST)
}
## the following two functions are very similar to
## lavaan's lavaanify aka lav_partable functions
## SRM_PARTABLE_PERSON: lavaanify for persons
## SRM_PARTABLE_DYAD: lavaanify for dyads
SRM_PARTABLE_PERSON <- function(PARLIST = NULL,
as.a.data.frame=TRUE,
ngroups = 1L)
{
## for-loop for groups
## call SRM_PARTABLE_FLAT to add default elements
LIST <- SRM_PARTABLE_FLAT_PERSON(
PARLIST,
# definitions for default parameters
# 1. covariance actor-partner effects of one latent rr
auto.cov.lv.ap = TRUE,
# 2. covariance a-p effects of one observed rr
auto.cov.ov.ap = TRUE,
# 3. covariance a-p-effects of across latent rrs
auto.cov.lv.block = FALSE,
# 4. meanstructure
auto.int.ov = TRUE,
auto.int.lv = FALSE,
# definitions for fixed values
auto.fix.loa.first.ind.a=TRUE,
auto.fix.loa.first.ind.p=TRUE,
ngroups = ngroups )
## now, we handling the modifiers fixed values and equality constraints
if (as.a.data.frame) {
LIST <- as.data.frame(LIST, stringsAsFactors = FALSE)
}
TABLE <- SRM_PARTABLE_ORDER(LIST,ngroups=ngroups) # sort the list
TABLE <- SRM_PARTABLE_FIXEDVALUES(TABLE,name="Person",ngroups=ngroups)
return(TABLE)
}
SRM_PARTABLE_DYAD <- function(PARLIST = NULL,
as.a.data.frame=TRUE,
ngroups = 1L)
{
## here for-loop for groups?
## call SRM_PARTABLE_FLAT to add default elements
LIST <- SRM_PARTABLE_FLAT_DYAD(
PARLIST,
# definitions for default parameters
# 1. covariance relationship effects of one latent rr
auto.cov.lv.dy = TRUE,
# 2. covariance relationship effects of one observed rr
auto.cov.ov.dy = TRUE,
# 3. covariance relationship-effects of across latent rrs
auto.cov.lv.block = FALSE,
# 4. meanstructure
auto.int.ov = FALSE,
auto.int.lv = FALSE,
# definitions for fixed values
auto.fix.loa.first.ind.ij=TRUE,
auto.fix.loa.first.ind.ji=TRUE,
auto.fix.loa.ind.ij.ji = TRUE,
auto.fix.int.first.ind.ij=FALSE,
auto.fix.int.first.ind.ji=FALSE,
ngroups = ngroups
)
## now, we handling the modifiers fixed values and equality constraints
if (as.a.data.frame) {
LIST <- as.data.frame(LIST, stringsAsFactors = FALSE)
}
TABLE <- SRM_PARTABLE_ORDER(LIST, ngroups = ngroups)
TABLE <- SRM_PARTABLE_FIXEDVALUES(TABLE,name="Dyad",ngroups = ngroups)
return(TABLE)
}
|
0863eea31a2184b78f415c84fedb983f0ebd5ea8
|
0cc77bb4edd0aad0c9f32e61a0a7cf8ca1718aa4
|
/code/sim_results_plot.R
|
dae067aa24b73e3c17707a722dd7404e7ea04b54
|
[] |
no_license
|
rbrown789/mixture-cure-simulation
|
85ebf53c4b43bdb0a6a4b2a5163ae3e1ddea875f
|
7d1b1d134f0e46034049feb24cd07672fa9552f9
|
refs/heads/master
| 2020-12-28T12:03:30.810304
| 2020-02-05T05:42:30
| 2020-02-05T05:42:30
| 238,325,449
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,296
|
r
|
sim_results_plot.R
|
########################################################################
# This script generates all the results plots from Simulation 1. Apologies
# to anyone trying to figure out what's going on, cause I didn't anything.
#
########################################################################
library(survival)
library(rms)
library(flexsurv)
library(smcure)
library(abind)
library(R.utils)
library(truncdist)
library(lattice)
library(latticeExtra)
root <- "C:/Users/rbrow/OneDrive/Documents/Public Github/mixture-cure-simulation/"
data <- "results/"
code <- "code/"
graphics <- "Graphics/"
source(paste0(root,code,"sim_results_functions.R"))
resfin.df <- todf(resfin)
##############################################################################################################
##############################################################################################################
##############################################################################################################
##############################################################################################################
## Generate all bias plots
qnms <- dimnames(resfin)$pred_quant[1:18]
for( i in qnms)
{
parres <- todf.par(i,resfin)
# true <- sprintf("%.2f", round(parres$true[1],2))
if(i %in% c("b1","b2")) { parres <- parres[!parres$model %in% c("CoxPH","MCPH") ,]}
if(i %in% c("curep_00","curep_10","curep_01","curep_11")) { parres <- parres[parres$model %in% c("MCAFT","MCPH") ,]}
parres40 <- parres[parres$n=="N40",]
parres90 <- parres[parres$n=="N90",]
ylim <- c(min(parres$bias,na.rm=T),max(parres$bias,na.rm=T))
if(export) { pdf(paste0(root,graphics,"Simulation Plots/Bias/",i,"_bias.pdf"),height=8,width=8)}
trellis.plot( xvarnm="sdist", yvarnm="uor", measnm="bias",bynm="cp",modelnm="model",
dat=parres40,ylim=ylim,maintit=paste0("N=40 - ",i),
bigxlab="Censoring Proportion",bigylab="Bias",h=0 )
trellis.plot( xvarnm="sdist", yvarnm="uor", measnm="bias",bynm="cp",modelnm="model",
dat=parres90,ylim=ylim,maintit=paste0("N=90 - ",i),
bigxlab="Censoring Proportion",bigylab="Bias",h=0 )
if(export) {dev.off()}
}
## Generate all relative bias plots
for(i in qnms)
{
parres <- todf.par(i,resfin)
# true <- sprintf("%.2f", round(parres$true[1],2))
if(i %in% c("b1","b2")) { parres <- parres[!parres$model %in% c("CoxPH","MCPH") ,]}
if(i %in% c("curep_00","curep_10","curep_01","curep_11")) { parres <- parres[parres$model %in% c("MCAFT","MCPH") ,]}
parres40 <- parres[parres$n=="N40",]
parres90 <- parres[parres$n=="N90",]
ylim <- c(min(parres$relbias,na.rm=T),max(parres$relbias,na.rm=T))
if(export) { pdf(paste0(root,graphics,"Simulation Plots/Relative Bias/",i,"_relbias.pdf"),height=8,width=8)}
trellis.plot( xvarnm="sdist", yvarnm="uor", measnm="relbias",bynm="cp",modelnm="model",
dat=parres40,ylim=ylim,maintit=paste0("N=40 - ",i),
bigxlab="Censoring Proportion",bigylab="Relative Bias",h=0 )
trellis.plot( xvarnm="sdist", yvarnm="uor", measnm="relbias",bynm="cp",modelnm="model",
dat=parres90,ylim=ylim,maintit=paste0("N=90 - ",i),
bigxlab="Censoring Proportion",bigylab="Relative Bias",h=0 )
if(export) {dev.off()}
}
## Generate all MSE plots
qnms <- dimnames(resfin)$pred_quant[1:18]
for( i in qnms)
{
parres <- todf.par(i,resfin)
# true <- sprintf("%.2f", round(parres$true[1],2))
if(i %in% c("b1","b2")) { parres <- parres[!parres$model %in% c("CoxPH","MCPH") ,]}
if(i %in% c("curep_00","curep_10","curep_01","curep_11")) { parres <- parres[parres$model %in% c("MCAFT","MCPH") ,]}
parres40 <- parres[parres$n=="N40",]
parres90 <- parres[parres$n=="N90",]
ylim <- c(min(parres$mse,na.rm=T),max(parres$mse,na.rm=T))
if(export) { pdf(paste0(root,graphics,"Simulation Plots/MSE/",i,"_mse.pdf"),height=8,width=8)}
trellis.plot( xvarnm="sdist", yvarnm="uor", measnm="mse",bynm="cp",modelnm="model",
dat=parres40,ylim=ylim,maintit=paste0("N=40 - ",i),bigxlab="Censoring Proportion",bigylab="MSE" )
trellis.plot( xvarnm="sdist", yvarnm="uor", measnm="mse",bynm="cp",modelnm="model",
dat=parres90,ylim=ylim,maintit=paste0("N=90 - ",i),bigxlab="Censoring Proportion",bigylab="MSE" )
if(export) {dev.off()}
}
## Generate all Variance plots
qnms <- dimnames(resfin)$pred_quant[1:18]
for( i in qnms)
{
parres <- todf.par(i,resfin)
# true <- sprintf("%.2f", round(parres$true[1],2))
if(i %in% c("b1","b2")) { parres <- parres[!parres$model %in% c("CoxPH","MCPH") ,]}
if(i %in% c("curep_00","curep_10","curep_01","curep_11")) { parres <- parres[parres$model %in% c("MCAFT","MCPH") ,]}
parres40 <- parres[parres$n=="N40",]
parres90 <- parres[parres$n=="N90",]
ylim <- c(min(parres$var,na.rm=T),max(parres$var,na.rm=T))
if(export) { pdf(paste0(root,graphics,"Simulation Plots/Variance/",i,"_var.pdf"),height=8,width=8)}
trellis.plot( xvarnm="sdist", yvarnm="uor", measnm="var",bynm="cp",modelnm="model",
dat=parres40,ylim=ylim,maintit=paste0("N=40 - ",i),bigxlab="Censoring Proportion",bigylab="Variance" )
trellis.plot( xvarnm="sdist", yvarnm="uor", measnm="var",bynm="cp",modelnm="model",
dat=parres90,ylim=ylim,maintit=paste0("N=90 - ",i),bigxlab="Censoring Proportion",bigylab="Variance" )
if(export) {dev.off()}
}
## Generate all coverage rate plots
qnms <- dimnames(resfin)$pred_quant[1:18]
for( i in qnms)
{
parres <- todf.par(i,resfin)
if(i %in% c("b1","b2")) { parres <- parres[!parres$model %in% c("CoxPH","MCPH") ,]}
if(i %in% c("curep_00","curep_10","curep_01","curep_11")) { parres <- parres[parres$model %in% c("MCAFT","MCPH") ,]}
parres40 <- parres[parres$n=="N40",]
parres90 <- parres[parres$n=="N90",]
ylim <- c(min(parres$covrate,na.rm=T),max(parres$covrate,na.rm=T))
if(export) { pdf(paste0(root,graphics,"Simulation Plots/Coverage Rate/",i,"_covrate.pdf"),height=8,width=8)}
trellis.plot( xvarnm="sdist", yvarnm="uor", measnm="covrate",bynm="cp",modelnm="model",
dat=parres40,ylim=ylim,maintit=paste0("N=40 - ",i),bigxlab="Censoring Proportion",
bigylab="Coverage Rate",h=0.95 )
trellis.plot( xvarnm="sdist", yvarnm="uor", measnm="covrate",bynm="cp",modelnm="model",
dat=parres90,ylim=ylim,maintit=paste0("N=90 - ",i),bigxlab="Censoring Proportion",
bigylab="Coverage Rate",h=0.95 )
if(export) {dev.off()}
}
##############################################################################################################
##############################################################################################################
##############################################################################################################
##############################################################################################################
###### Making Pareto Charts and Pareto Fronts ########
#################################################################################
# Make pareto charts for RMSE and Bias
alldat <- NULL
qnms <- dimnames(resfin)$pred_quant[1:14]
for( i in qnms)
{
parres <- todf.par(i,resfin)
if(i %in% c("b1","b2")) { parres <- parres[!parres$model %in% c("CoxPH","MCPH") ,]}
#### bias ######
# fit linear model to the simulation results
mod <- lm(bias ~ model + n + uor + cp + sdist + model*n + model*uor + model*cp + model*sdist ,data=parres)
pprepdat <- paretoprep(mod,eflab)
if(export) { pdf(paste0(root,graphics,"Simulation Pareto Charts/Bias/pareto_",i,"_bias.pdf"),height=7,width=7)}
par(mar=c(5.1,12.1,4.1,2.1))
barplot( pprepdat$logworth, horiz=T,main=paste0("Pareto Chart: ",i," Bias"),xlab="Log Worth")
axis(2,at=pprepdat$loc*1.2-0.5,labels=pprepdat$lab,las=1,col.ticks="white")
box( bty = "L")
if(export) {dev.off()}
pprepdat$meas <- "bias"
pprepdat$par <- i
alldat <- rbind(alldat,pprepdat)
#### RMSE ######
parres$rmse <- sqrt(parres$mse)
mod <- lm(rmse ~ model + n + uor + cp + sdist + model*n + model*uor + model*cp + model*sdist ,data=parres)
pprepdat <- paretoprep(mod,eflab)
if(export) { pdf(paste0(root,graphics,"Simulation Pareto Charts/RMSE/pareto_",i,"_rmse.pdf"),height=7,width=7)}
par(mar=c(5.1,12.1,4.1,2.1))
barplot( pprepdat$logworth, horiz=T,main=paste0("Pareto Chart: ",i," RMSE"),xlab="Log Worth")
axis(2,at=pprepdat$loc*1.2-0.5,labels=pprepdat$lab,las=1,col.ticks="white")
box( bty = "L")
if(export) {dev.off()}
pprepdat$meas <- "rmse"
pprepdat$par <- i
alldat <- rbind(alldat,pprepdat)
# standard error
parres$se <- sqrt(parres$var)
mod <- lm(se ~ model + n + uor + cp + sdist + model*n + model*uor + model*cp + model*sdist ,data=parres)
pprepdat <- paretoprep(mod,eflab)
if(export) { pdf(paste0(root,graphics,"Simulation Pareto Charts/StdErr/pareto_",i,"_stderr.pdf"),height=7,width=7)}
par(mar=c(5.1,12.1,4.1,2.1))
barplot( pprepdat$logworth, horiz=T,main=paste0("Pareto Chart: ",i," stderr"),xlab="Log Worth")
axis(2,at=pprepdat$loc*1.2-0.5,labels=pprepdat$lab,las=1,col.ticks="white")
box( bty = "L")
if(export) {dev.off()}
pprepdat$meas <- "se"
pprepdat$par <- i
alldat <- rbind(alldat,pprepdat)
}
### Make 2D Pareto Chart (Frontier?) ####
tmp <- alldat[,c("logworth","lab","meas","par")]
tmpbias <- tmp[tmp$meas=="bias",]
names(tmpbias) <- c("lw_bias","lab","meas","par")
tmprmse <- tmp[tmp$meas=="rmse",]
names(tmprmse) <- c("lw_rmse","lab","meas","par")
tmpse <- tmp[tmp$meas=="se",]
names(tmpse) <- c("lw_se","lab","meas","par")
par2dat <- merge(tmpbias,tmprmse,by=c("lab","par"),all=T)
par2dat <- merge(par2dat,tmpse,by=c("lab","par"),all=T)
labs <- c("Model","Censoring Proportion","Unobserved Rate","Sample Size","Survival Distribution",
"Model*Censoring Proportion","Model*Unobserved Rate","Model*Sample Size","Model*Survival Distribution")
pchs <- c(17,17,17,17,17,16,16,16,16)
cols <- c("black","red","steelblue","orange","purple","red","steelblue","orange","purple")
if(export) { pdf(paste0(root,graphics,"Simulation Pareto Charts/2d Pareto_biasbystderr.pdf"),height=9,width=9)}
plot("n",ylim=c(0,max(par2dat$lw_se)),xlim=c(0,max(par2dat$lw_bias)),axes=F,
xlab="Log Worth(Bias)",ylab="Log Worth (Std Err)")
for( i in 1:length(labs))
{
tmp <- par2dat[par2dat$lab==labs[i],]
points(tmp$lw_bias,tmp$lw_se,pch=pchs[i],col=cols[i],cex=1.5)
}
axis(1);axis(2);box()
legend("topright",labs,pch=pchs,col=cols,bty="n")
if(export) {dev.off() }
if(export) { pdf(paste0(root,graphics,"Simulation Pareto Charts/2d Pareto_biasbyrmse.pdf"),height=9,width=9)}
plot("n",ylim=c(0,max(par2dat$lw_rmse)),xlim=c(0,max(par2dat$lw_bias)),axes=F,
xlab="Log Worth(Bias)",ylab="Log Worth (RMSE)")
for( i in 1:length(labs))
{
tmp <- par2dat[par2dat$lab==labs[i],]
points(tmp$lw_bias,tmp$lw_rmse,pch=pchs[i],col=cols[i],cex=1.5)
}
axis(1);axis(2);box()
legend("topright",labs,pch=pchs,col=cols,bty="n")
if(export) {dev.off() }
####################################################################################
|
aa721338654971d9bdfb6d356abbd749d438883c
|
aba5794905d20a12d0207026b7d843a5d81c31ad
|
/man/load_filtered.Rd
|
f6f33c2c5980e633929571c2687f1c6fc9b1489b
|
[] |
no_license
|
kgori/svfiltr
|
9097a4eba8c0792766d3b81f9e515d71b0e79980
|
106a22110cf10310fba7da38e95a389ca28acb54
|
refs/heads/master
| 2021-01-12T14:55:10.535079
| 2016-11-04T21:15:51
| 2016-11-04T21:15:51
| 68,912,542
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 294
|
rd
|
load_filtered.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/svIO.R
\name{load_filtered}
\alias{load_filtered}
\title{Load a filtered data frame}
\usage{
load_filtered(filename)
load_filtered(filename)
}
\description{
Load a filtered data frame
Load a filtered data frame
}
|
7e0aa7769f74b8c47cb6bfd46dd8b0d569e78a79
|
80bace7c01fc4fb4e0a0a1742da436b240c36349
|
/man/merge_rgSets.Rd
|
7287bfa09cd04325230813c42a4cbfb939de07ee
|
[] |
no_license
|
ttriche/miser
|
58b5995459b7586c4dcdde2d0cdb9d2224803c36
|
71953a0c463b59260a2ea14e3b17d8a698479f95
|
refs/heads/master
| 2023-07-20T00:53:55.058931
| 2023-07-10T14:43:39
| 2023-07-10T14:43:39
| 152,498,256
| 4
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 648
|
rd
|
merge_rgSets.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/merge_rgSets.R
\name{merge_rgSets}
\alias{merge_rgSets}
\title{convenience function for merging large RGChannelSets}
\usage{
merge_rgSets(rgSet1, rgSet2)
}
\arguments{
\item{rgSet1}{the first RGChannelSet}
\item{rgSet2}{the second RGChannelSet}
}
\value{
\if{html}{\out{<div class="sourceCode">}}\preformatted{ the result of cbind(rgSet1, rgSet2) with sensible metadata
}\if{html}{\out{</div>}}
}
\description{
Merges metadata, drops from each RGChannelSet, merges RGChannelSets, re-adds
RGChannelSets' metadata must have identical names, and platform must match.
}
|
157651fac4f3b133b663e496e7ed818ca2da4cbf
|
c0db54d7ec766ee9c087bcf967612a3c25169d7f
|
/R/calc.MMS.R
|
522e0d9baae9cfc6a9a089cc0c3a9b89abb0a810
|
[] |
no_license
|
dataspekt/crodi
|
5c7b78be89a446748e92f3f5d456a3ff33aa7e36
|
b423da4e18facba5c26dec1166379ab6208232e1
|
refs/heads/master
| 2020-03-20T19:47:26.787067
| 2018-06-17T12:42:04
| 2018-06-17T12:42:04
| 135,913,156
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 342
|
r
|
calc.MMS.R
|
#' Calculate min-max scaling
#'
#' @param x Indicator values.
#' @param refval Refrence value.
#' @param reverse Reverse direction of indicator values.
calc.MMS <- function(x, refval, reverse = FALSE)
{
if(reverse){
x <- 1-x
refval <- 1-refval
}
((x-min(x))/(max(x)-min(x))) / ((refval-min(x))/(max(x)-min(x)))
}
|
926ccfa8835af89260cb4f06e4d227e23c89e9c1
|
d26b1b446e18850cae39119828067d21a5079acd
|
/man/CBS_PBMC_array.Rd
|
6f9e42fcc1e84776a57669084d1a4b6b1df9f542
|
[] |
no_license
|
ziyili20/TOAST
|
c623dcb2c64a89f00f84fddfab759da62d6434a5
|
102b6b1fa801561976b070adf1e15378bde43f76
|
refs/heads/master
| 2022-08-31T11:28:40.598741
| 2022-08-24T19:35:31
| 2022-08-24T19:35:31
| 145,612,563
| 12
| 4
| null | 2021-07-20T06:07:43
| 2018-08-21T19:53:08
|
R
|
UTF-8
|
R
| false
| false
| 1,111
|
rd
|
CBS_PBMC_array.Rd
|
\name{CBS_PBMC_array}
\alias{CBS_PBMC_array}
\docType{data}
\title{
An example dataset for partial reference-free
cell composition estimation from tissue gene expression
}
\description{
The dataset contains 511 microarray gene expressions
for 20 PBMC samples (mixed_all) and PBMC microarray
reference for the matched 511 genes from 5immune
cell types (LM_5). It also contains the true cell
compositions from cell sorting experiment (trueProp)
and prior knowledge of cell compositions for 5 cell types
in PBMC (prior_alpha and prior_sigma).
}
\usage{data("CBS_PBMC_array")}
\references{
Newman, Aaron M., et al. "Robust enumeration
of cell subsets from tissue expression profiles."
Nature methods 12.5 (2015): 453.
Rahmani, Elior, et al. "BayesCCE: a Bayesian
framework for estimating cell-type composition
from DNA methylation without the need for methylation
reference." Genome biology 19.1 (2018): 141.
}
\examples{
data("CBS_PBMC_array")
CBS_PBMC_array$mixed_all[1:5,1:5]
head(CBS_PBMC_array$LM_5,3)
head(CBS_PBMC_array$trueProp,3)
CBS_PBMC_array$prior_alpha
CBS_PBMC_array$prior_sigma
}
\keyword{datasets}
|
5b6c203be0b0613d1d0a415c36d1fb0ca55d65ba
|
bdd4a0cf241425e857757b95afbf072ee017cf25
|
/reproduce/doubleeffect-est-heuristic.R
|
6a0de512f9b92ab4cbb93c2d6c9980cd87058ff7
|
[] |
no_license
|
CMLennon/WERM
|
aac17916b957792b14b6a1eb233b8182adad1586
|
9fe27c49a6db62b2e29f775e641aa11f6bbc0834
|
refs/heads/master
| 2022-12-30T13:36:58.092893
| 2020-10-22T05:56:22
| 2020-10-22T05:56:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,845
|
r
|
doubleeffect-est-heuristic.R
|
library(xgboost)
library(boot)
source('WERM_Heuristic.R')
multiHeuristic = function(OBS,D,numCate){
################################
# Data Setup
################################
W = OBS[,1:D]
X = OBS[,(D+1)]; X0 = rep(0,length(X)); X1 = rep(1,length(X));
R = OBS[,(D+2)]
Z = OBS[,(D+3)]
Y = OBS[,(D+4)]
DATA = data.frame(W,X,R,Z,Y)
myYbinary = 0
################################################################
# Evaluate the weight hat{W}
# See Eq. (A.5)
# W = P^{W}(y|w,r,z)P(x)P(r)P(z|w,x)/(P(x|w)P(r|w)P(z|w,x,r)P(y|w,x,r,z)
################################################################
# Learn P(x)
Prob.X = X*mean(X) + (1-X)*(1-mean(X))
# Learn P(r)
Prob.R = R*mean(R) + (1-R)*(1-mean(R))
# Learn P(x|w)
model.X.w = learnXG(inVar = data.matrix(data.frame(W=W)), labelval = X, regval = rep(0,length(X)))
# model.X.w = learnXG_Planid(DATA,c(1:D),X,rep(0,length(X)))
pred.X.w = predict(model.X.w, newdata=data.matrix(data.frame(W=W)),type='response')
Prob.X.w = X*pred.X.w + (1-X)*(1-pred.X.w)
Prob.X0.w = (1-pred.X.w)
Prob.X1.w = pred.X.w
# Learn P(r|w)
model.R.w = learnXG(inVar = data.matrix(data.frame(W=W)), labelval = R, regval = rep(0,length(R)))
# model.R.w = learnXG_Planid(DATA,c(1:D),R,rep(0,length(R)))
pred.R.w = predict(model.R.w, newdata=data.matrix(data.frame(W=W)),type='response')
Prob.R.w = R*pred.R.w + (1-R)*(1-pred.R.w)
# Learn P(z|w,x,r)
model.Z.wxr = learnXG(inVar = data.matrix(data.frame(W=W,X=X,R=R)), labelval = Z, regval = rep(0,length(Z)))
# model.Z.wxr = learnXG_Planid(DATA,c(1:(D+2)),Z,rep(0,length(Z)))
pred.Z.wxr = predict(model.Z.wxr, newdata=as.matrix(data.frame(W=W,X=X,R=R)),type='response')
Prob.Z.wxr = Z*pred.Z.wxr + (1-Z)*(1-pred.Z.wxr)
# Learn P(z)
Prob.Z = Z * mean(Z) + (1-Z)*(1-mean(Z))
# Learn P^{Wd}(y|w,r,z)
# Ybox = rep(0,nrow(DATA))
# bootstrap_iter = 10
# for (idx in 1:bootstrap_iter){
# sampled_df = WERM_Sampler(DATA,(Prob.R*Prob.Z)/(Prob.R.w*Prob.Z.wxr))
# # Learn Pw(y|w,z)
# model.Yw.wzr = learnXG_Planid(sampled_df,c(1:D,(D+2),(D+3)),Y,rep(0,length(X)))
# pred.Yw.wzr = predict(model.Yw.wzr, newdata=data.matrix(data.frame(W,R,Z)),type='response')
# Prob.Yw.wzr = Y*pred.Yw.wzr + (1-Y)*(1-pred.Yw.wzr)
# Ybox = Ybox + Prob.Yw.wzr
# }
# Prob.Yw.wzr = Ybox/bootstrap_iter
# Learn P(z|w,x)
model.Z.wx = learnXG(inVar = data.matrix(data.frame(W=W,X=X)), labelval = Z, regval = rep(0,length(Z)))
# model.Z.wx = learnXG_Planid(DATA,c(1:(D+1)),Z,rep(0,length(Z)))
# pred.Z.wx = predict(model.Z.wx, newdata=data.matrix(DATA[,c(1:(D+1))]),type='response')
pred.Z.wx = predict(model.Z.wx, newdata=as.matrix(data.frame(W=W,X=X)),type='response')
Prob.Z.wx = Z*pred.Z.wx + (1-Z)*(1-pred.Z.wx)
# Learn P(y|w,x,r,z)
lambda_y = rep(0,nrow(OBS))
model.Y.wxrz = learnXG(inVar = data.matrix(data.frame(W=W,X=X,R=R,Z=Z)), labelval = Y, regval = lambda_y)
# model.Y.wxrz = learnXG_Planid(DATA,c(1:(D+3)),Y,rep(0,length(Y)))
pred.Y.wxrz = predict(model.Y.wxrz, newdata=data.matrix(data.frame(W=W,X=X,R=R,Z=Z)),type='response')
pred.Y.wx0rz = predict(model.Y.wxrz, newdata=data.matrix(data.frame(W=W,X=X0,R=R,Z=Z)),type='response')
pred.Y.wx1rz = predict(model.Y.wxrz, newdata=data.matrix(data.frame(W=W,X=X1,R=R,Z=Z)),type='response')
# Prob.Y.wxrz = Y*pred.Y.wxrz + (1-Y)*(1-pred.Y.wxrz)
Prob.Y.wxrz = pred.Y.wx1rz
# Compute \hat{W}
W_importance = (Prob.X* Prob.R * Prob.Z.wx * (Prob.X0.w*pred.Y.wx0rz + Prob.X1.w*pred.Y.wx1rz))/(Prob.X.w * Prob.R.w * Prob.Z.wxr * Prob.Y.wxrz)
learned_W = W_importance
lambda_h = rep(0.5,nrow(OBS))
X0R0 = data.matrix(data.frame(X=rep(0,nrow(DATA)),R=rep(0,nrow(DATA))))
X0R1 = data.matrix(data.frame(X=rep(0,nrow(DATA)),R=rep(1,nrow(DATA))))
X1R0 = data.matrix(data.frame(X=rep(1,nrow(DATA)),R=rep(0,nrow(DATA))))
X1R1 = data.matrix(data.frame(X=rep(1,nrow(DATA)),R=rep(1,nrow(DATA))))
Yx0r0 = WERM_Heuristic(inVar_train = data.frame(X=X, R=R), inVar_eval = data.frame(X=rep(0,nrow(DATA)),R=rep(0,nrow(DATA))), Y = Y, Ybinary = myYbinary, lambda_h = lambda_h, learned_W= learned_W)
Yx0r1 = WERM_Heuristic(inVar_train = data.frame(X=X, R=R), inVar_eval = data.frame(X=rep(0,nrow(DATA)),R=rep(1,nrow(DATA))), Y = Y, Ybinary = myYbinary, lambda_h = lambda_h, learned_W= learned_W)
Yx1r0 = WERM_Heuristic(inVar_train = data.frame(X=X, R=R), inVar_eval = data.frame(X=rep(1,nrow(DATA)),R=rep(0,nrow(DATA))), Y = Y, Ybinary = myYbinary, lambda_h = lambda_h, learned_W= learned_W)
Yx1r1 = WERM_Heuristic(inVar_train = data.frame(X=X, R=R), inVar_eval = data.frame(X=rep(1,nrow(DATA)),R=rep(1,nrow(DATA))), Y = Y, Ybinary = myYbinary, lambda_h = lambda_h, learned_W= learned_W)
WERManswer = c(Yx0r0,Yx0r1,Yx1r0,Yx1r1)
return(WERManswer)
}
|
cbd45d7b83b64e718de502c14acfb00e4e178236
|
f20346056c31fbf071e862897ccdef3dcb9a129e
|
/R/dist_matrix.R
|
cc8e6441e2de05062fbb93f61d996c03c9ca9bd1
|
[
"CC0-1.0"
] |
permissive
|
kbroman/Talk_CTC2019
|
38758a75bdae92bd412b44632b54c194d0032ed4
|
77e23f7ea71e130d15a6aac7944cae229aa341a5
|
refs/heads/master
| 2020-05-29T23:32:55.456873
| 2019-06-10T20:39:52
| 2019-06-10T20:39:52
| 189,437,953
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 865
|
r
|
dist_matrix.R
|
# image of distance matrix
file <- "../Data/dist_matrix.rds"
if(file.exists(file)) {
d <- readRDS(file)
} else {
z <- readRDS("../Data/sample_results_allchr.rds")
d <- matrix(nrow=length(z), ncol=nrow(z[[1]]))
rownames(d) <- names(z)
colnames(d) <- rownames(z[[1]])
for(i in seq_along(z)) {
x <- apply(z[[i]], 1, function(a) (a[1,2]+a[3,1])/(sum(a[1,]) + sum(a[3,])))
d[names(z)[i], names(x)] <- x
}
d <- d[order( as.numeric(sub("DO-", "", rownames(d)))), ]
d <- d[, order( as.numeric(sub("DO-", "", colnames(d))))]
saveRDS(d, file)
}
pdf("../Figs/dist_matrix.pdf", height=5.5, width=10, pointsize=16)
par(mar=c(3.1, 3.1, 1.1, 1.6), las=1)
image(1:ncol(d), 1:nrow(d), t(d), col=gray(((0:256)/256)^(0.6)),
xlab="genomic DNA sample", ylab="microbiome DNA sample",
mgp=c(2.1, 0.5, 0))
dev.off()
|
54bb5bb250ac5032c96306f3da527bc9cd93f774
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/JumpTest/man/jumptestperiod.Rd
|
cbc83ae48304a4736394faa9d2d8eaa887e56ecb
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,283
|
rd
|
jumptestperiod.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/testsim.R
\name{jumptestperiod}
\alias{jumptestperiod}
\title{Nonparametric jump test for a long period}
\usage{
jumptestperiod(retmat, method = "BNS")
}
\arguments{
\item{retmat}{log return matrix, with intervals saved in columns}
\item{method}{jump test methods, chosen from "BNS", "Amed", and "Amin"}
}
\value{
\item{stat}{test statistics}
\item{pvalue}{p-value}
\item{adjp}{adjusted p-values via 'BH' method}
}
\description{
perform nonparametric jump test for many intervals, and saved in vectors
}
\examples{
orip <- matrix(runif(3000),1000,3)
testres <- jumptestperiod(orip)
ts <- testres@stat
pv <- testres@pvalue
adjpv <- testres@adjp
}
\references{
Barndorff-Nielsen, O. E. and N. Shephard (2006). "Econometrics of testing for jumps in financial economics using bipower variation." Journal of financial Econometrics 4(1): 1-30.
Andersen, T. G., et al. (2012). "Jump-robust volatility estimation using nearest neighbor truncation." Journal of Econometrics 169(1): 75-93.
Dumitru, A.-M. and G. Urga (2012). "Identifying jumps in financial assets: a comparison between nonparametric jump tests." Journal of Business & Economic Statistics 30(2): 242-255.
}
|
53471262996608c3db899d53e9f2febc0aed1fc2
|
e77503b75af8918e1ad6529afe4781805b32fd8e
|
/R/ps.match.pscore.R
|
9d82d77c522bec2839e666e29bcad583ce765407
|
[] |
no_license
|
cran/nonrandom
|
ba91609f41e53b89415c1450582912c3b0f431d5
|
9341226967cd90d052dc2e32a7400a602bc404ae
|
refs/heads/master
| 2021-01-15T22:00:49.870115
| 2014-04-04T00:00:00
| 2014-04-04T00:00:00
| 17,719,145
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,635
|
r
|
ps.match.pscore.R
|
## ###################################################
## Function to match data if object is of class pscore
## ###################################################
ps.match.pscore <- function(object,
object.control = NULL,
matched.by = NULL,
control.matched.by = matched.by,
who.treated = 1,
treat = NULL,
name.match.index = "match.index",
ratio = 1,
caliper = "logit",
x = 0.2,
givenTmatchingC = TRUE,
bestmatch.first = TRUE,
setseed = FALSE,
combine.output = TRUE)
{
## ############
## Extract data
data <- object$data
## ######################
## Check name.match.index
if(any(names(data) == name.match.index))
stop(paste("Argument 'name.match.index' =",
name.match.index,
" already exists in data.", sep=""))
## ################
## Check matched.by
if ( is.null(matched.by) ){
match.vec <- object$pscore
matched.by <- object$name.pscore
}else{
if (is.character(matched.by) | is.numeric(matched.by)){
A <- find.sel(data = data,
sel = matched.by,
sel.name = "matched.by")
match.vec <- A[,1]
matched.by <- names(A)[1]
}else{
stop("Argument 'matched.by' must be either numeric or a string.")
}
}
## #######################
## Extract values of treat
if ( is.null(treat) ){
name.treat <- object$name.treat
treat <- data[, name.treat]
treat.values <- levels(as.factor(treat))
}else{
if (is.character(treat) | is.numeric(treat)){
A <- find.treat(data = data,
treat = treat)
treat <- A[[1]]
name.treat <- A[[2]]
treat.values <- levels(as.factor(treat)) }else{
stop("Argument 'treat' has to be either numeric or a string.")
}
}
if (any(treat.values == who.treated)){
tvect <- data[,name.treat] == treat.values[treat.values == who.treated]
## TRUE if treated
cvect <- data[,name.treat] == treat.values[treat.values != who.treated]
## TRUE if control
}else{
stop("Who was treated? Define argument 'who.treated'.")
}
## #########################################
## Separate data regarding treated/untreated
data1 <- data[tvect,]
data2 <- data[cvect,]
## ######################
## Call match function
match <- ps.matchcaliper(vect1 = data1[, matched.by],
vect2 = data2[, matched.by],
ratio,
caliper,
x,
givenTmatchingC,
bestmatch.first,
setseed )
## #############
## Manage output
## create new column where match.info is included
data[,name.match.index] <- rep(NA,nrow(data))
tvect[is.na(tvect)] <- cvect[is.na(cvect)] <- TRUE
data[tvect == TRUE, name.match.index] <- match$pairvect1
data[cvect == TRUE, name.match.index] <- match$pairvect2
match.index <- data[, name.match.index]
match.parameters <- list(caliper = match$caliper,
ratio = match$ratio,
who.treated = who.treated,
givenTmatchingC = match$givenTmatchingC,
bestmatch.first = match$bestmatch.first)
object$data <- data
object$data[,name.match.index] <- match.index
object$data.matched <- data[data[, name.match.index] != 0, ]
object$name.match.index <- name.match.index
object$match.index <- match.index
object$match.parameters <- match.parameters
object$matched.by <- matched.by
object$treat <- treat ## if name.treat is
## specified,
object$name.treat <- name.treat ## those arguments
## must be modified
class(object) <- c("matched.pscore",
class(object)[class(object)!="matched.pscore"])
return(object)
}
|
3bf39cb34abc34f977d7a864d6a5931c800b61f1
|
9b78537a128feef02ff8249e1226b3eeb11156bd
|
/rScripts/oplsWithDiffAna.R
|
e8096bc553bc96f6d7efd810c067a1d4a7be277b
|
[] |
no_license
|
yz8169/p3bacter
|
0c3a0624bdad9bf6e02b1a18de442a3e8577c83c
|
8f94634f7e286c523dd3c677287884c324d6f1e3
|
refs/heads/master
| 2021-01-26T01:25:33.254726
| 2020-02-26T12:28:31
| 2020-02-26T12:28:31
| 243,256,732
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,685
|
r
|
oplsWithDiffAna.R
|
# Title : TODO
# Objective : TODO
# Created by: yz
# Created on: 2018/3/20
library(optparse)
option_list <- list(
make_option("--v", type = 'character', action = "store", default = "1", help = "vip Cutoff"),
make_option("--l", type = 'logical', action = "store", default = "F", help = "log 10 transform"),
make_option("--s", type = 'character', action = "store", default = "pareto", help = "Scaling"),
make_option("--p", type = 'integer', action = "store", default = "20", help = "Number of permutations"),
make_option("--t", type = 'character', action = "store", default = "summary", help = "Graphic type"),
make_option("--pa", type = 'logical', action = "store", default = "T", help = "Group ecllipse "),
make_option("--par", type = 'numeric', action = "store", default = "0.8", help = "Amount by which plotting text should be magnified relative to the default "),
make_option("--lo", type = 'numeric', action = "store", default = "1", help = "logFC threshold"),
make_option("--paired", default = F, type = "logical", help = "T test paired"),
make_option("--pCutoff", default = 0.05, type = "numeric", help = "P value cut off"),
make_option("--qCutoff", default = 1, type = "numeric", help = "fdr value cut off"),
make_option("--m", default = "tTest", type = "character", help = "test method"),
make_option("--ve", default = F, type = "logical", help = "var equal"),
make_option("--cvi", default = "7", type = "integer", help = "number of cross-validation segments"),
make_option("--oi", default = "NA", type = "character", help = "number of orthogonal components")
)
opt <- parse_args(OptionParser(option_list = option_list))
xMN <- read.table(quote = "", "deal.txt",
check.names = FALSE,
header = TRUE,
row.names = 1,
sep = "\t",
comment.char = "")
tmpDf = xMN
samDF <- read.table(quote = "", "group.txt",
check.names = FALSE,
header = T,
row.names = 1,
com = '',
sep = "\t")
sampleNames = rownames(samDF)
sampleSize = length(sampleNames)
xMN = xMN[rownames(samDF)]
xMN = t(xMN)
yMCN <- matrix(samDF[, "Group"], ncol = 1, dimnames = list(rownames(xMN), "Group"))
predI = 1
algoC = "nipals"
#can set value
orthoI = opt$oi
if (orthoI == "NA")orthoI = NA else orthoI = as.integer(orthoI)
crossvalI = opt$cvi
log10L = opt$l
scaleC = opt$s
permI = opt$p
library(ropls)
ropLs <- opls(x = xMN,
y = yMCN,
predI = predI,
orthoI = orthoI,
algoC = algoC ,
crossvalI = crossvalI,
log10L = log10L,
permI = permI,
scaleC = scaleC,
subset = NULL,
printL = FALSE,
plotL = FALSE)
modC <- ropLs@typeC
sumDF <- getSummaryDF(ropLs)
desMC <- ropLs@descriptionMC
scoreMN <- getScoreMN(ropLs)
loadingMN <- getLoadingMN(ropLs)
vipVn <- coeMN <- orthoScoreMN <- orthoLoadingMN <- orthoVipVn <- NULL
vipVn <- getVipVn(ropLs)
coeMN <- coef(ropLs)
orthoScoreMN <- getScoreMN(ropLs, orthoL = TRUE)
orthoLoadingMN <- getLoadingMN(ropLs, orthoL = TRUE)
orthoVipVn <- getVipVn(ropLs, orthoL = TRUE)
parCompVi <- c(1, 2)
#plot
parAsColFcVn = NA
parCexN = opt$par
parEllipsesL = opt$pa
parLabVc = NA
ploC = opt$t
plot(ropLs,
typeVc = ploC,
parAsColFcVn = parAsColFcVn,
parCexN = parCexN,
parCompVi = parCompVi,
parEllipsesL = parEllipsesL,
parLabVc = parLabVc,
file.pdfC = "figure.pdf")
rspModC <- gsub("-", "", modC)
rspModC <- paste0("", rspModC)
if (sumDF[, "pre"] + sumDF[, "ort"] < 2) {
tCompMN <- scoreMN
pCompMN <- loadingMN
} else {
if (sumDF[, "ort"] > 0) {
if (parCompVi[2] > sumDF[, "ort"] + 1)
stop("Selected orthogonal component for plotting (ordinate) exceeds the total number of orthogonal components of the model", call. = FALSE)
tCompMN <- cbind(scoreMN[, 1], orthoScoreMN[, parCompVi[2] - 1])
pCompMN <- cbind(loadingMN[, 1], orthoLoadingMN[, parCompVi[2] - 1])
colnames(pCompMN) <- colnames(tCompMN) <- c("h1", paste("o", parCompVi[2] - 1, sep = ""))
} else {
if (max(parCompVi) > sumDF[, "pre"])
stop("Selected component for plotting as ordinate exceeds the total number of predictive components of the model", call. = FALSE)
tCompMN <- scoreMN[, parCompVi, drop = FALSE]
pCompMN <- loadingMN[, parCompVi, drop = FALSE]
}
}
## x-scores and prediction
colnames(tCompMN) <- paste0(rspModC, "_XSCOR-", colnames(tCompMN))
tCompDF <- as.data.frame(tCompMN)[rownames(samDF), , drop = FALSE]
tesVl <- NULL
fitMCN <- fitted(ropLs)
colnames(fitMCN) <- paste0(rspModC,
"_predictions")
fitDF <- as.data.frame(fitMCN)[rownames(samDF), , drop = FALSE]
tCompDF <- cbind.data.frame(tCompDF, fitDF)
samDF <- cbind.data.frame(samDF, tCompDF)
## x-loadings and VIP
colnames(pCompMN) <- paste0(rspModC, "_XLOAD-", colnames(pCompMN))
if (! is.null(vipVn)) {
pCompMN <- cbind(pCompMN, vipVn)
colnames(pCompMN)[ncol(pCompMN)] <- paste0(rspModC,
"_VIP",
ifelse(! is.null(orthoVipVn),
"_pred",
""))
if (! is.null(orthoVipVn)) {
pCompMN <- cbind(pCompMN, orthoVipVn)
colnames(pCompMN)[ncol(pCompMN)] <- paste0(rspModC,
"_VIP_ortho")
}
}
if (! is.null(coeMN)) {
pCompMN <- cbind(pCompMN, coeMN)
if (ncol(coeMN) == 1)
colnames(pCompMN)[ncol(pCompMN)] <- paste0(rspModC, "_COEFF")
else
colnames(pCompMN)[(ncol(pCompMN) - ncol(coeMN) + 1) : ncol(pCompMN)] <- paste0(rspModC, "_", colnames(coeMN), "-COEFF")
}
pCompDF <- as.data.frame(pCompMN)[]
std <- function(x) sd(x) / sqrt(length(x))
data = tmpDf
uniq.group <- as.character(unique(samDF$Group))
group1 <- rownames(subset(samDF, Group == uniq.group[1]))
group2 <- rownames(subset(samDF, Group == uniq.group[2]))
group1 <- as.character(group1)
group2 <- as.character(group2)
mean1 = apply(data[, group1], 1, mean)
okk = as.data.frame(mean1)
rownames(okk) = rownames(data)
okk$std1 = apply(data[, group1], 1, std)
okk$mean2 = apply(data[, group2], 1, mean)
okk$std2 = apply(data[, group2], 1, std)
okk$logFC = log2(okk$mean2 / okk$mean1)
for (i in 1 : nrow(data)) {
x = as.numeric(data[i, group1])
y = as.numeric(data[i, group2])
okk$p[i] = tryCatch(
{
if (opt$m == "tTest") {
tets = t.test(x , y, alternative = "two.sided", paired = opt$paired, var.equal = opt$ve)
}else {
tets = wilcox.test(x , y, alternative = "two.sided", paired = opt$paired)
}
tets$p.value
}, error = function(e){
1
}
)
}
okk$fdr = p.adjust(okk$p, method = "fdr", n = length(okk$p))
okk = okk[order(okk[, "p"]),]
## sampleMetadata
write.table(samDF,
file = "sampleOut.txt",
quote = FALSE,
row.names = T,
col.names = NA,
sep = "\t")
outDf = cbind(okk, pCompDF, data[, group1], data[, group2])
tmpNames = names(outDf[, group1])
names = paste(tmpNames, "(", uniq.group[1], ")", sep = "")
names(outDf)[which(names(outDf) %in% tmpNames)] = names
tmpNames = names(outDf[, group2])
names = paste(tmpNames, "(", uniq.group[2], ")", sep = "")
names(outDf)[which(names(outDf) %in% tmpNames)] = names
outDf = subset(outDf, OPLSDA_VIP_pred >= opt$v)
outDf = subset(outDf, p < opt$pCutoff)
outDf = subset(outDf, fdr < opt$qCutoff)
names(outDf)[names(outDf) == "mean1"] = paste("mean(", uniq.group[1], ")", sep = "")
names(outDf)[names(outDf) == "std1"] = paste("stderr(", uniq.group[1], ")", sep = "")
names(outDf)[names(outDf) == "mean2"] = paste("mean(", uniq.group[2], ")", sep = "")
names(outDf)[names(outDf) == "std2"] = paste("stderr(", uniq.group[2], ")", sep = "")
names(outDf)[names(outDf) == "p"] = "t-test.p"
names(outDf)[names(outDf) == "fdr"] = "t-test.fdr"
outDf = subset(outDf, abs(logFC) > opt$lo)
names(outDf)[names(outDf) == "logFC"] = paste("log2FC(", uniq.group[2], "/", uniq.group[1], ")", sep = "")
write.table(outDf,
file = "dataOut.txt",
quote = FALSE,
row.names = T,
col.names = NA,
sep = "\t")
|
f11cea199f6aebccb87587099109e332adbc49e3
|
fca29f054d4328f70a035e8cccecc9d6e18e966c
|
/Plot1.R
|
c57fefca54ab50e88c95eebe0732859e748acc57
|
[] |
no_license
|
rutlandneil/ExData_Plotting1
|
cb5314c084695a65feeb679fd819dd0f7b7d6d56
|
997a4cc78b639c23a2dd02c0036c8477dbab360c
|
refs/heads/master
| 2020-12-25T23:46:44.653644
| 2015-10-11T21:11:58
| 2015-10-11T21:11:58
| 44,062,500
| 0
| 0
| null | 2015-10-11T17:39:35
| 2015-10-11T17:39:34
| null |
UTF-8
|
R
| false
| false
| 1,168
|
r
|
Plot1.R
|
library(data.table)
library(dplyr)
library(lubridate)
#sets the location of the zip file to a location in your current working directory
zipLoc<-'./exdata_data_household_power_consumption.zip'
#unzips the file into a folder called exdata_data_household_power_consumption
unzip(zipLoc)
raw<-tbl_df(read.table('household_power_consumption.txt', header=FALSE, sep= ';'
, na.strings = c('?',''), skip=66637, nrows=2880))
#get and apply column headers from first row of data
names<-read.table('household_power_consumption.txt', header=TRUE, sep= ';'
, na.strings = c('?',''), nrows=1)
names(raw)<-names(names)
#Convert the date into a date class object so we can get the Day name
raw$Date=as.Date(raw$Date, '%d/%m/%Y')
raw$DateTime<-paste(raw$Date,raw$Time)
raw$DateTime<-strptime(raw$DateTime,'%Y-%m-%d %H:%M:%S')
#Draw the first graph - a Histogram of Global Active Power
png(filename="plot1.png", width=480, height=480)
par(bg='transparent')
with(raw, hist(raw$Global_active_power, col = 'Red',main='Global Active power'
,xlab='Global Active power (killowatts)', ylab = 'Frequency'))
dev.off()
|
31c45c79222de8a8c4af5b61e7f933ef047e3a43
|
6fa2802ec42c4e52baeeb0b5d61ad0cadbcd372b
|
/R/rmoutlier1d.R
|
c9be9e964aa9deb0ea4cf822e73222e9b0524a53
|
[] |
no_license
|
Taigi/l1kdeconv
|
6c8529b6b54ecbe07e08a270b457aef2200850cd
|
463499db63e7bd0839be883c7ccdc97cfa672200
|
refs/heads/master
| 2021-04-18T21:31:14.281323
| 2017-07-08T03:41:45
| 2017-07-08T03:41:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 940
|
r
|
rmoutlier1d.R
|
# vim: set noexpandtab tabstop=2:
#' Remove the Outliers in a Vector of 1D Coordinates
#'
#' Remove the Outliers in a Vector of 1D Coordinates
#'
#' @param x a numeric vector
#' @param dy_thr the threshold for dy
#' @param clustersize_thr the threshold for cluster size
#' @param gapsize the threshold of points in recognizing data free gap
#' @keywords distribution
#' @export
#' @examples
#' x=c(1,10:30,50)
#' par(mfrow=c(2,1))
#' plot(density(x))
#' plot(density(rmoutlier1d(x)))
rmoutlier1d=function(
x
, dy_thr=dnorm(4)
, clustersize_thr=3
, gapsize=10
) {
d = density(x)
dx = d$x
dy = d$y
delta = diff(dx[1:2])
cluster_ranges = getclusterranges(
dx[dy * delta > dy_thr]
, gapsize * delta
)
raw_clusters=lapply(
seq_len(nrow(cluster_ranges))
, function(i) {
x[cluster_ranges[i, 'left'] <= x & x <= cluster_ranges[i, 'right']]
}
)
unlist(raw_clusters[sapply(raw_clusters, length)>=clustersize_thr])
}
|
c9f81a7888c5b298a8672c692edda3ecc3754306
|
e33ec1ba05866fd6655a9525b2f5632ed49e3d13
|
/R/am.smath.r
|
bf97fa7c0e831d7ae857ff175bdfa0700de2fb70
|
[] |
no_license
|
ameshkoff/amdata
|
d38070a6162ab736de20ff6c651272a7b5f7ac5e
|
1cb7e5c214c8fdf2e34ca71b74722dce3f9247a9
|
refs/heads/master
| 2020-12-24T11:52:40.715039
| 2017-08-22T18:23:20
| 2017-08-22T18:23:20
| 73,110,037
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 333
|
r
|
am.smath.r
|
#' Test if is whole number
#'
#' Test if the number is the whole number
#'
#' @param x numeric
#' @param threshold numeric: tolerance, threshold
#' @return Logical
#' @seealso ...
#' @export
amm.is.wholenumber <- function(x
, threshold = .Machine$double.eps ^ 0.5) {
abs(x - round(x)) < threshold
}
|
d03f80bf1557d07f4cea12a0e4a2c6aaf17ed1a0
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ggTimeSeries/examples/stat_marimekko.Rd.R
|
9e41f7e9cd4d0120ee4f6038701f27559d625fd1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 519
|
r
|
stat_marimekko.Rd.R
|
library(ggTimeSeries)
### Name: stat_marimekko
### Title: Plot two categorical variables as marimekko
### Aliases: stat_marimekko
### ** Examples
{
library(ggplot2)
ggplot(
data.frame(
x1 = round(3 * runif(10000), 0),
y1 = pmax(pmin(round(3 * rnorm(10000), 0), 3), -3),
weight = 1:10000
)
) +
stat_marimekko(
aes(
xbucket = x1,
ybucket = y1,
fill = factor(y1),
weight = weight
),
xlabelyposition = 1.1,
color = 'black'
)}
|
8d870b6db21f7694bb8cf84dd29e798634a96ec1
|
64e38921903014f892033a6c802cee381956c37c
|
/man/get_res_value.Rd
|
7f1100872346a3fc48cf57b08c7cb446902a424f
|
[
"MIT"
] |
permissive
|
scienceverse/scienceverse
|
53092891f145f456b02c351d848516271dffb013
|
2519e7af87eae9439828cd36d4468160cf6e09a5
|
refs/heads/master
| 2021-06-28T12:32:23.106786
| 2020-11-09T17:22:09
| 2020-11-09T17:22:09
| 182,833,527
| 31
| 2
|
NOASSERTION
| 2020-06-26T12:30:50
| 2019-04-22T17:17:36
|
HTML
|
UTF-8
|
R
| false
| true
| 408
|
rd
|
get_res_value.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{get_res_value}
\alias{get_res_value}
\title{Get value from results list}
\usage{
get_res_value(txt, results)
}
\arguments{
\item{txt}{text of result to check against names}
\item{results}{named list of results}
}
\value{
value from results list or the txt if not found
}
\description{
Get value from results list
}
|
dab4d94c16049795393d5ede301e0bd014033c48
|
dee361052b87ddd442608360f0dfecf765731859
|
/R/wallet-deposits.r
|
2d2b404e2addcd6a018cad333c14bfb101fc4654
|
[
"MIT"
] |
permissive
|
zamorarr/rcoinbase
|
b828d8779bd963883bcf5bb8f5449914bdd9c5d5
|
e3c97694c8cdefafab23e52b7f32e6558fd92957
|
refs/heads/master
| 2021-08-29T19:33:08.909098
| 2017-12-14T19:23:29
| 2017-12-14T19:23:29
| 112,635,105
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 726
|
r
|
wallet-deposits.r
|
#' List deposits
#'
#' Lists deposits for an account.
#'
#' @param account_id Account Id
#' @export
#' @family wallet-deposits
#' @references \url{https://developers.coinbase.com/api/v2#list-deposits}
get_deposits <- function(account_id) {
endpoint <- paste("accounts", account_id, "deposits", sep = "/")
coinbase_get(endpoint)
}
#' Show a deposit
#'
#' Show an individual deposit.
#'
#' @param account_id Account Id
#' @param deposit_id Deposit Id
#' @export
#' @family wallet-deposits
#' @references \url{https://developers.coinbase.com/api/v2#show-a-deposit}
get_deposit <- function(account_id, deposit_id) {
endpoint <- paste("accounts", account_id, "deposits", deposit_id, sep = "/")
coinbase_get(endpoint)
}
|
ac77e4840e20e338bdef4aa4ef311afb0a328955
|
60491b8d44eaa4ee02c7ae9d90d9d6991febbcd6
|
/code/24_7_study/food/food_data_preparation.R
|
4cf778dab2fa2fcb0975e15dd09b1190be0da23f
|
[
"MIT"
] |
permissive
|
jaspershen/microsampling_multiomics
|
ae2be38fe06679f43b980b76ea152109bbdd8fce
|
dea02f1148e5aad3243c057a98f565f889be302f
|
refs/heads/main
| 2023-04-14T17:44:20.010840
| 2022-09-05T23:23:26
| 2022-09-05T23:23:26
| 469,214,924
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,095
|
r
|
food_data_preparation.R
|
##
no_function()
masstools::setwd_project()
setwd("data/24_7_study/food_log/data_preparation/")
library(tidyverse)
rm(list = ls())
load("../../all_omes_wear_food")
dim(all_omes_wear_food)
data =
all_omes_wear_food %>%
dplyr::filter(MolClass == "Food")
data %>%
dplyr::filter(is.na(Intensity))
data$SampleID = as.character(data$DT)
###check duplicated samples for
library(plyr)
data %>%
plyr::dlply(.variables = .(MolName, SampleID)) %>%
purrr::map(function(x) {
if (nrow(x) == 1) {
return(NULL)
} else{
x
}
}) %>%
do.call(rbind, .) %>%
as.data.frame()
sample_info <-
data %>%
dplyr::ungroup() %>%
dplyr::select(
sample_id = SampleID,
accurate_time = DT,
day = day,
time = tod,
hour = hour_of_day
) %>%
dplyr::distinct(.keep_all = TRUE) %>%
as.data.frame() %>%
dplyr::mutate(subject_id = "Mike1") %>%
dplyr::select(subject_id, sample_id, everything())
variable_info <-
data %>%
dplyr::ungroup() %>%
dplyr::select(mol_name = MolName,
class = MolClass,
subclass = MolSubclass) %>%
dplyr::distinct(.keep_all = TRUE) %>%
as.data.frame()
expression_data <-
data %>%
dplyr::ungroup() %>%
dplyr::select(sample_id = SampleID,
mol_name = MolName,
intensity = Intensity) %>%
tidyr::pivot_wider(names_from = sample_id,
values_from = intensity) %>%
as.data.frame()
expression_data$mol_name == variable_info$mol_name
variable_info <-
variable_info %>%
dplyr::mutate(variable_id = paste("food", 1:nrow(variable_info), sep = "_")) %>%
dplyr::select(variable_id, everything())
rownames(expression_data) <- variable_info$variable_id
expression_data <-
expression_data %>%
dplyr::select(-mol_name) %>%
as.data.frame()
# expression_data %>%
# apply(2, unlist)
dim(expression_data)
dim(variable_info)
dim(sample_info)
colnames(expression_data) == sample_info$sample_id
save(sample_info, file = "sample_info")
save(variable_info, file = "variable_info")
save(expression_data, file = "expression_data")
|
a01d97c333f7065575545dd509dffbece03ba16e
|
4798cb29678fb3e54a317ef28ff1ddaec260cb89
|
/HD_RGB_Flight_Height_Tool/old_scripts/Flight_Height.R
|
a728fc5cc8234776e51eb88c0207f707d3119f0f
|
[] |
no_license
|
HiDef-Aerial-Surveying/RBG_Flight_Height_Analysis
|
5dd481b3542edb662d75b67a020e24b06f1b97e8
|
167076025cc73526ae586e794bfcc4b7516fff78
|
refs/heads/master
| 2023-06-22T09:36:37.840795
| 2021-07-23T15:11:06
| 2021-07-23T15:11:06
| 320,638,015
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,145
|
r
|
Flight_Height.R
|
############################################################
### Flight height calculation module
### v 0.0.1
### Grant Humphries, Ruth Peters-Grundy
### April, 2020
### R version 3.6.3 "Holding the Windsock"
###########################################################
# Load libraries ----------------------------------------------------------
require(tidyverse)
require(readxl)
require(foreach)
require(magrittr)
require(HTSSIP) # <-- for the perc_overlap function
require(ggthemes)
# Read data sheets --------------------------------------------------------
Turbine.inputs <- readxl::read_xlsx("./Input_data.xlsx",sheet="Blades")
rotor.range <- Turbine.inputs$Value
Species.inputs <- readxl::read_xlsx("./Input_data.xlsx",sheet="Species")
#dat <- readxl::read_xlsx("./TEST_SHEET.xlsx", sheet="Data")
dat <- readxl::read_xlsx("./KI_Ross_Test.xlsx", sheet="Sheet1")
# Load functions ----------------------------------------------------------
#Create CV function
calc_cv <- function(x) { (sd(x,na.rm=T) / mean(x,na.rm=T))*100 }
#Flight height formula
flight.height <- function(aircraft.height, reflected.size, measured.size){
aircraft.height*(1-(reflected.size/measured.size))
}
#Function for counting if value meets a threshold criteria
bird.range.thresh <- function(value,threshold=1){
if(value > threshold){
return(1)
}else if(value == 100){
return(1)
}else {
return(0)
}
}
# Clean data --------------------------------------------------------------
#Filter out unwanted columns and rows where no birds measured. Add new variance, CV, mean, max columns.
##(Note: Re. filtering rows - I've done this for 'plane height' because if at least one bird measured then plane height is filled in - needs adjusting so definitely at least two frames measured )
data_mod <- dat %>%
#select(Date,Camera,`Reel Name`,Frame,`Marker Number`,`Frame X`,`Frame Y`,`Plane Height`:`Frame 8`) %>%
#select(Date,Camera,`Reel Name`,Frame,`Marker Number`,`Frame X`,`Frame Y`,`Plane Height`:`Frame 8`) %>%
dplyr::filter(!is.na(`Plane Height`) | `Plane Height` != "") %>%
mutate(variance = pmap_dbl(list(`Frame 1`,`Frame 2`,
`Frame 3`,`Frame 4`,
`Frame 5`,`Frame 6`,
`Frame 7`,`Frame 8`),
function(...) var(c(...), na.rm = TRUE)),
CV = pmap_dbl(list(`Frame 1`,`Frame 2`,
`Frame 3`,`Frame 4`,
`Frame 5`,`Frame 6`,
`Frame 7`,`Frame 8`),
function(...) calc_cv(c(...))),
mean = pmap_dbl(list(`Frame 1`,`Frame 2`,
`Frame 3`,`Frame 4`,
`Frame 5`,`Frame 6`,
`Frame 7`,`Frame 8`),
function(...) mean(c(...), na.rm = TRUE)),
max = pmap_dbl(list(`Frame 1`,`Frame 2`,
`Frame 3`,`Frame 4`,
`Frame 5`,`Frame 6`,
`Frame 7`,`Frame 8`),
function(...) max(c(...), na.rm = TRUE))
) %>%
## Filter out the CV values that are too high
dplyr::filter(CV < 100) %>%
### Now we calculate the mean - 2 * SD of the Max bird length to eliminate values
#dplyr::filter(max > (mean(max) - 2*sd(max))) %>%
mutate(
## Apply the flight height formula to calculate the minimum height and maximum height that
## the bird could be flying at
Min_bird_height = pmap_dbl(list(`Plane Height`,
Species.inputs$Max_length[Species.inputs$Species_Code == 'KI'],
max),
flight.height),
Max_bird_height = pmap_dbl(list(`Plane Height`,
Species.inputs$Min_length[Species.inputs$Species_Code == 'KI'],
max),
flight.height)
) %>%
mutate(
Flight_Height_Range = Max_bird_height - Min_bird_height,
## We calculate the percent overlap of the bird's range in the rotor range
Percent_Overlap = pmap_dbl(list(Min_bird_height,Max_bird_height,
rotor.range[1],rotor.range[2]),
HTSSIP:::perc_overlap)
) %>%
# Summarize proportion of birds in and out of collision height --------
mutate(
Thresh_100 = pmap_dbl(list(Percent_Overlap,100),bird.range.thresh),
Thresh_99 = pmap_dbl(list(Percent_Overlap,99),bird.range.thresh),
Thresh_90 = pmap_dbl(list(Percent_Overlap,90),bird.range.thresh),
Thresh_66.6 = pmap_dbl(list(Percent_Overlap,66.6),bird.range.thresh),
Thresh_33.3 = pmap_dbl(list(Percent_Overlap,33.3),bird.range.thresh),
Thresh_10 = pmap_dbl(list(Percent_Overlap,10),bird.range.thresh),
Thresh_1 = pmap_dbl(list(Percent_Overlap,1),bird.range.thresh),
Thresh_0 = pmap_dbl(list(Percent_Overlap,0),bird.range.thresh)
)
###################
# Summarize threshold values ----------------------------------------------
T100.inPCH <- sum(data_mod$Thresh_100)
T99.inPCH <- sum(data_mod$Thresh_99)
T90.inPCH <- sum(data_mod$Thresh_90)
T66.6.inPCH <- sum(data_mod$Thresh_66.6)
T33.3.inPCH <- sum(data_mod$Thresh_33.3)
T10.inPCH <- sum(data_mod$Thresh_10)
T1.inPCH <- sum(data_mod$Thresh_1)
T0.inPCH <- sum(data_mod$Thresh_0)
T100.outPCH <- nrow(data_mod) - sum(data_mod$Thresh_100)
T99.outPCH <- nrow(data_mod) - sum(data_mod$Thresh_99)
T90.outPCH <- nrow(data_mod) - sum(data_mod$Thresh_90)
T66.6.outPCH <- nrow(data_mod) - sum(data_mod$Thresh_66.6)
T33.3.outPCH <- nrow(data_mod) - sum(data_mod$Thresh_33.3)
T10.outPCH <- nrow(data_mod) - sum(data_mod$Thresh_10)
T1.outPCH <- nrow(data_mod) - sum(data_mod$Thresh_1)
T0.outPCH <- nrow(data_mod) - sum(data_mod$Thresh_0)
proportion_thresh_100 <- T100.inPCH/nrow(data_mod)
proportion_thresh_99 <- T99.inPCH/nrow(data_mod)
proportion_thresh_90 <- T90.inPCH/nrow(data_mod)
proportion_thresh_66.6 <- T66.6.inPCH/nrow(data_mod)
proportion_thresh_33.3 <- T33.3.inPCH/nrow(data_mod)
proportion_thresh_10 <- T10.inPCH/nrow(data_mod)
proportion_thresh_1 <- T1.inPCH/nrow(data_mod)
proportion_thresh_0 <- T0.inPCH/nrow(data_mod)
# Summarize data for plotting ---------------------------------------------
outf <- data.frame(
Var = c('proportion','in_PCH','out_PCH'),
Certain = c(proportion_thresh_100,T100.inPCH,T100.outPCH),
`Virtually certain` = c(proportion_thresh_99,T99.inPCH,T99.outPCH),
`Very likely` = c(proportion_thresh_90,T90.inPCH,T90.outPCH),
Likely = c(proportion_thresh_66.6,T66.6.inPCH,T66.6.outPCH),
Unlikely = c(proportion_thresh_33.3,T33.3.inPCH,T33.3.outPCH),
`Very unlikely` = c(proportion_thresh_10,T10.inPCH,T10.outPCH),
`Exceptionally unlikely` = c(proportion_thresh_1,T1.inPCH,T1.outPCH),
Impossible = c(proportion_thresh_0,T0.inPCH,T0.outPCH)
)
## Goes from WIDE to LONG for plotting
outf2 <- reshape2::melt(outf, id.vars="Var")
P <- tbl_df(outf2) %>% dplyr::filter(Var=='proportion') %>%
ggplot(aes(x=variable,y=value)) +
geom_bar(stat='identity',fill="#f5aa42",color="black",width=0.5) +
scale_y_continuous(expand=c(0,0),limits=c(0,0.8))+
scale_x_discrete(labels=c('Certain','Virtually certain',
'Very likely', 'Likely',
'Unlikely','Very unlikely',
'Exceptionally unlikely', 'Impossible'))+
ylab("Proportion of bird height ranges at collision risk height") +
xlab("Risk threshold")+
ggthemes::theme_gdocs()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
P
ggsave("Bird_Height_Props.png",P,device="png",width=9,height=7,dpi=300)
hist(data_mod$Percent_Overlap)
# Write out a CSV table ---------------------------------------------------
out_df <- data.frame(
"IPCC description" = c("Threshold value", "Number inside collision height",
"Number outside collision height", "Total number of birds",
"Proportion at collision height"),
"Certain" = c(1,T100.inPCH,T100.outPCH,T100.inPCH+T100.outPCH,proportion_thresh_100),
"Virtually certain" = c(0.99,T99.inPCH,T99.outPCH,T99.inPCH+T99.outPCH,proportion_thresh_99),
"Very likely" = c(0.9,T90.inPCH,T90.outPCH,T90.inPCH+T90.outPCH,proportion_thresh_90),
"Likely" = c(0.666,T66.6.inPCH,T66.6.outPCH,T66.6.inPCH+T66.6.outPCH,proportion_thresh_66.6),
"Unlikely" = c(0.333,T33.3.inPCH,T33.3.outPCH,T33.3.inPCH+T33.3.outPCH,proportion_thresh_33.3),
"Very unlikely" = c(0.1,T10.inPCH,T10.outPCH,T10.inPCH+T10.outPCH,proportion_thresh_10),
"Exceptionally unlikely" = c(0.01,T1.inPCH,T1.outPCH,T1.inPCH+T1.outPCH,proportion_thresh_1),
"Impossible" = c(0,T0.inPCH,T0.outPCH,T0.inPCH+T0.outPCH,proportion_thresh_0)
)
names(out_df) <- c("IPCC description","Certain", "Virtually certain", "Very likely",
"Likely","Unlikely","Very unlikely","Exceptionally unlikely",
"Impossible")
write.csv(out_df,"summary_Table.csv",row.names=F)
|
7fb82593ac54efe18e8cb5a9f131a58b0a5f4885
|
257fe6f1e3416c381e8eb8bcd2d7d3471a182213
|
/Week3/hw2.R
|
2896ab391cda48c39d996e3d106199206971a566
|
[] |
no_license
|
RobertCPhillips/EdxAnalyticsEdge
|
ede27095cc6600083c4b13139b82c87e434f2cb1
|
f03d4add0a4c016683c4fbf1892d9abf3a5a1ba4
|
refs/heads/master
| 2020-06-04T01:23:33.791474
| 2015-08-30T15:10:57
| 2015-08-30T15:10:57
| 40,155,589
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,617
|
r
|
hw2.R
|
par <- read.csv("parole.csv")
str(par)
summary(par)
# male: 1 if the parolee is male, 0 if female
# race: 1 if the parolee is white, 2 otherwise
# age: the parolee's age (in years) when he or she was released from prison
# state: a code for the parolee's state. 2 is Kentucky, 3 is Louisiana, 4 is Virginia, and 1 is any other state. The three states were selected due to having a high representation in the dataset.
# time.served: the number of months the parolee served in prison (limited by the inclusion criteria to not exceed 6 months).
# max.sentence: the maximum sentence length for all charges, in months (limited by the inclusion criteria to not exceed 18 months).
# multiple.offenses: 1 if the parolee was incarcerated for multiple offenses, 0 otherwise.
# crime: a code for the parolee's main crime leading to incarceration. 2 is larceny, 3 is drug-related crime, 4 is driving-related crime, and 1 is any other crime.
# violator: 1 if the parolee violated the parole, and 0 if the parolee completed the parole without violation.
q2 <- sum(par$violator == 1)
par$state <- as.factor(par$state)
par$crime <- as.factor(par$crime)
require(caTools)
set.seed(144)
split <- sample.split(par$violator, SplitRatio = 0.7)
train <- subset(par, split == TRUE)
test <- subset(par, split == FALSE)
par.mod1 <- glm(violator ~ ., data=train, family="binomial")
summary(par.mod1)
q43.data <- data.frame(male=1, race=1, age=50, state=factor(1,levels=c(1,2,3,4)), time.served=3, max.sentence=12, multiple.offenses=0, crime=factor(2,levels=c(1,2,3,4)))
#state2=0, state3=0, state4=0,
#crime2=1, crime3=0, crime4=0)
q43.predict.res <- predict(par.mod1,q43.data,type="response")
q43.predict <- predict(par.mod1,q43.data)
q43.odds <- exp(q43.predict)
q43.prob <- 1/(1+exp(-q43.predict))
#q43.predict.res/(1-q43.predict.res)
par.mod1.test <- predict(par.mod1,test,type="response")
max(par.mod1.test)
par.mod1.t <- table(test$violator, par.mod1.test >= .5)
par.mod1.sens <- par.mod1.t[2,2]/(par.mod1.t[2,2] + par.mod1.t[2,1])
par.mod1.spec <- par.mod1.t[1,1]/(par.mod1.t[1,1] + par.mod1.t[1,2])
par.mod1.acc <- (par.mod1.t[1,1]+par.mod1.t[2,2])/sum(par.mod1.t)
table(test$violator)
par.mod1.t2 <- table(test$violator, par.mod1.test >= .75)
par.mod1.t3 <- table(test$violator, par.mod1.test >= .25)
par.mod1.acc3 <- (par.mod1.t3[1,1]+par.mod1.t3[2,2])/sum(par.mod1.t3)
require(ROCR)
par.mod1.rocr <- prediction(par.mod1.test, test$violator)
par.mod1.perf <- performance(par.mod1.rocr, "tpr", "fpr")
par.mod1.auc <- as.numeric(performance(par.mod1.rocr, "auc")@y.values)
|
dabea44efbe2213f3ecb9b3c2bf883f22ecb5058
|
b8dbee4b91b48121bff4329ce2f37c89d8836290
|
/seqUtils/man/importFastQTLTable.Rd
|
5471b0ce3a2980c5aa2aed3003f0636cfe0fe0ea
|
[
"Apache-2.0"
] |
permissive
|
kauralasoo/macrophage-tuQTLs
|
18cc359c9052bd0eab45bd27f1c333566fb181d8
|
3ca0b9159f3e5d7d1e0a07cdeadbeb492e361dcb
|
refs/heads/master
| 2021-03-27T19:29:12.456109
| 2019-02-19T13:05:26
| 2019-02-19T13:05:26
| 93,025,290
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 484
|
rd
|
importFastQTLTable.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qtl_fastqtl.R
\name{importFastQTLTable}
\alias{importFastQTLTable}
\title{Import fastQTL output table into R.}
\usage{
importFastQTLTable(file_path)
}
\arguments{
\item{file_path}{Path to the fastQTL output file}
}
\value{
data_frame containing gene_ids, snp ids and p-values.
}
\description{
Detect if the table is from nominal run or permutation run and add proper column names.
}
\author{
Kaur Alasoo
}
|
5b1df12f07406750459e3d34876c135be84b2bad
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/MXM/R/pc.skel.R
|
ec91a9411ee5750593584f9761a075c82f1a2154
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,320
|
r
|
pc.skel.R
|
pc.skel <- function(dataset, method = "pearson", alpha = 0.05, rob = FALSE, R = 1, graph = FALSE) {
## dataset contains the data, it must be a matrix
## type can be either "pearson" or "spearman" for continuous variables OR
## "cat" for categorical variables
## alpha is the level of significance, set to 0.05 by default
## rob is TRUE or FALSE and is supported by type = "pearson" only, i.e. it is to be used
## for robust estimation of Pearson correlation coefficient only
## if graph is true, the graph will appear
alpha <- log(alpha)
title <- deparse( substitute(dataset) )
### if you want to use Spearman, simply use Spearman on the ranks
if (method == "spearman") {
dat = apply(dataset, 2, rank)
rob = FALSE
}
if (method == "spearman" || method == "pearson") {
if (R == 1) {
ci.test = condi
type = method
rob = rob
R = 1
} else if (R > 1) {
ci.test = condi.perm
}
} else {
ci.test = cat.ci
type = NULL
rob = FALSE
}
n = ncol(dataset)
m = nrow(dataset)
k <- 0 ## initial size of the conditioning set
G = matrix(2, n, n) # 3 sep-set indicates a subset of variables which eliminate given edge
## If an element has the number 2 it means there is connection, otherwiser it will have 0
diag(G) = -100
durat = proc.time()
if ( method == "pearson" || method == "spearman") {
if (R == 1) {
if ( rob == FALSE ) {
r = cor(dataset)
if (type == "pearson") {
stata = abs( 0.5 * log( (1 + r) / (1 - r) ) * sqrt(m - 3) ) ## absolute of the test statistic
} else if (type == "spearman") {
stata = abs( 0.5 * log( (1 + r) / (1 - r) ) * sqrt(m - 3) ) / 1.029563 ## absolute of the test statistic
}
pv = pvalue = log(2) + pt(stata, m - 3, lower.tail = FALSE, log.p = TRUE) ## logged p-values
dof = matrix(m - 3, n, n)
diag(stata) = diag(dof) = 0
stadf = stata / dof
} else {
stat = pv = matrix(0, n, n)
for ( i in 1:c(n - 1) ) {
for ( j in c(i + 1):n ) {
ro <- condi(i, j, 0, dataset, type = "pearson", rob = TRUE)
stat[i, j] = ro[1]
pv[i, j] = ro[2]
}
}
pvalue = pv + t(pv) ## p-values
stata = stat + t(stat)
dof = matrix(m - 3, n, n)
stadf = stata / dof
}
} else if (R > 1) {
stat = pv = matrix(0, n, n)
for ( i in 1:c(n - 1) ) {
for ( j in c(i + 1):n ) {
ro <- permcor(dataset[, c(i, j)], R = R)
stat[i, j] = ro$result[1]
pv[i, j] = log( ro$result[2] )
}
}
pvalue = pv + t(pv) ## p-values
stata = stat + t(stat)
dof = matrix(m - 3, n, n)
stadf = stata / dof
}
} else { ## type = cat
stat = pv = dof = matrix(0, n, n)
for ( i in 1:c(n - 1) ) {
for ( j in c(i + 1):n ) {
ro <- cat.ci(i, j, 0, dataset)
stat[i, j] = ro[1]
pv[i, j] = ro[2]
dof[i, j] = ro[3]
}
}
pvalue = pv + t(pv) ## p-values
stata = stat + t(stat)
diag(stata) <- 0
dof = dof + t(dof) ## p-values
stadf = stata / dof
}
pv = pvalue
#stat[ lower.tri(stat) ] = 2
pv[ lower.tri(pv) ] = 2
G[pvalue > alpha] <- 0 ## removes edges from non significantly related pairs
if ( is.null( colnames(dataset) ) ) {
colnames(G) = rownames(G) = paste("X", 1:n, sep = "")
} else colnames(G) = rownames(G) = colnames(dataset)
diag(pvalue) = diag(pv) = 0
ina = 1:n
sep = list()
n.tests = NULL
pval = list()
#### some more initial stuff
dial = which( pv <= alpha, arr.ind = T )
zeu = cbind( dial, stadf[ dial ], pv[ dial ] ) ## all significant pairs of variables
zeu = zeu[ order( - zeu[, 4], zeu[, 3] ), ] ## order of the pairs based on their strength
if ( !is.matrix(zeu) ) zeu = matrix(zeu, nrow = 1)
duo = nrow(zeu) ## number of pairs to be checkd for conditional independence
n.tests[1] = n * (n - 1) / 2
#### main search
if (duo == 0) {
diag(G) = 0
final = list(kappa = k, G = G)
} else {
ell = 2
## Execute PC algorithm: main loop
while ( k < ell & nrow(zeu) > 0 ) {
k = k + 1 ## size of the seperating set will change now
tes = 0
met = matrix(nrow = nrow(zeu), ncol = k + 2)
for ( i in 1:nrow(zeu) ) {
adjx = ina[ G[ zeu[i, 1], ] == 2 ] ; lx = length(adjx) ## adjacents to x
adjy = ina[ G[ zeu[i, 2], ] == 2 ] ; ly = length(adjy) ## adjacents to y
if ( lx >= k ) {
pvalx = pvalue[ zeu[i, 1], adjx ]
infox = cbind( adjx, pvalx)
infox = infox[ order( - pvalx ), ]
if ( !is.matrix(infox) ) {
samx = cbind( infox[1], infox[2] )
} else samx = cbind( t( combn(infox[, 1], k) ), t( combn(infox[, 2], k) ) ) ## factorial, all possible unordered pairs
}
if ( ly >= k ) {
pvaly = pvalue[ zeu[i, 2], adjy ]
infoy = cbind(adjy, pvaly)
infoy = infoy[ order( - pvaly ), ]
if ( !is.matrix(infoy) ) {
samy = cbind( infoy[1], infoy[2] )
} else samy = cbind( t( combn(infoy[, 1], k) ), t( combn(infoy[, 2], k) ) ) ## factorial, all possible unordered pairs
}
if ( !is.null(samx) ) sx = 1 else sx = 0
if ( !is.null(samy) ) sy = 1 else sy = 0
sam = rbind( samx * sx, samy * sy )
sam = as.matrix(sam)
sam = unique(sam)
## sam contains either the sets of k neighbours of X, or of Y or of both
## if the X and Y have common k neighbours, they are removed below
rem = intersect( zeu[i, 1:2], sam )
if ( length(rem) > 0 ) {
pam = list()
for ( j in 1:length(rem) ) {
pam[[ j ]] = as.vector( which(sam == rem[j], arr.ind = TRUE)[, 1] )
}
}
pam = unlist(pam)
sam = sam[ - pam, ]
if ( !is.matrix(sam) ) {
sam = matrix( sam, nrow = 1 )
} else if ( nrow(sam) == 0 ) {
G = G
} else {
if (k == 1) {
sam = sam[ order( sam[, 2 ] ), ]
} else {
an <- t( apply(sam[, -c(1:2)], 1, sort, decreasing = TRUE) )
sam <- cbind(sam[, 1:2], an)
nc <- ncol(sam)
sam2 <- as.data.frame( sam[, nc:1] )
sam2 <- sam2[ do.call( order, as.list( sam2 ) ), ]
sam <- as.matrix( sam2[, nc:1] )
}
}
if ( nrow(sam) == 0 ) {
G = G
} else {
a = ci.test( zeu[i, 1], zeu[i, 2], sam[1, 1:k], dataset, type = type, rob = rob, R = R )
if ( a[2] > alpha ) {
G[ zeu[i, 1], zeu[i, 2] ] = 0 ## remove the edge between two variables
G[ zeu[i, 2], zeu[i, 1] ] = 0 ## remove the edge between two variables
met[i, ] = c( sam[1, 1:k], a[1:2] )
tes = tes + 1
} else {
m = 1
while ( a[2] < alpha & m < nrow(sam) ) {
m = m + 1
a = ci.test( zeu[i, 1], zeu[i, 2], sam[m, 1:k], dataset, type = type, rob = rob, R = R )
tes = tes + 1
}
if (a[2] > alpha) {
G[ zeu[i, 1], zeu[i, 2] ] = 0 ## remove the edge between two variables
G[ zeu[i, 2], zeu[i, 1] ] = 0 ## remove the edge between two variables
met[i, ] = c( sam[m, 1:k], a[1:2] )
}
}
}
sam = samx = samy = NULL
}
ax = ay = list()
lx = ly = numeric( nrow(zeu) )
for ( i in 1:nrow(zeu) ) {
ax[[ i ]] = ina[ G[ zeu[i, 1], ] == 2 ] ; lx[i] = length( ax[[ i ]] )
ay[[ i ]] = ina[ G[ zeu[i, 2], ] == 2 ] ; ly[i] = length( ay[[ i ]] )
}
ell = max(lx, ly)
id = which( rowSums(met) > 0 )
if (length(id) == 1) {
sep[[ k ]] = c( zeu[id, 1:2], met[id, ] )
} else {
sep[[ k ]] = cbind( zeu[id, 1:2], met[id, ] )
}
zeu = zeu[-id, ]
if ( class(zeu) != "matrix" ) {
zeu <- matrix(zeu, ncol = 4)
}
n.tests[ k + 1 ] = tes
}
G <- G / 2
diag(G) = 0
durat = proc.time() - durat
###### end of the algorithm
for ( l in 1:k ) {
if ( is.matrix(sep[[ l ]]) ) {
if ( nrow(sep[[ l ]]) > 0) {
sepa = sep[[ l ]]
colnames( sepa )[1:2] = c("X", "Y")
colnames( sepa )[ 2 + 1:l ] = paste("SepVar", 1:l)
colnames( sepa )[ c(l + 3):c(l + 4) ] = c("stat", "logged.p-value")
sepa = sepa[ order(sepa[, 1], sepa[, 2] ), ]
sep[[ l ]] = sepa
}
} else {
if ( length(sep[[ l ]]) > 0) {
names( sep[[ l ]] )[1:2] = c("X", "Y")
names( sep[[ l ]] )[ 2 + 1:l ] = paste("SepVar", 1:l)
names( sep[[ l ]] )[ c(l + 3):c(l + 4) ] = c("stat", "logged.p-value")
}
}
}
}
n.tests = n.tests[ n.tests>0 ]
k = length(n.tests) - 1
sepset = list()
if (k == 0) {
sepset = NULL
} else {
for ( l in 1:k ) {
if ( is.matrix( sep[[ l ]] ) ) {
nu <- nrow( sep[[ l ]] )
if ( nu > 0 ) sepset[[ l ]] = sep[[ l ]][1:nu, ]
} else sepset[[ l ]] = sep[[ l ]]
}
}
names(n.tests) = paste("k=", 0:k, sep ="")
info <- summary( rowSums(G) )
density <- sum(G) / ( n * ( n - 1 ) )
if(graph == TRUE)
{
if(requireNamespace("Rgraphviz", quietly = TRUE, warn.conflicts = FALSE) == TRUE)
{
am.graph <- new("graphAM", adjMat = G, edgemode = "undirected")
plot( am.graph, main = paste("Skeleton of the PC algorithm for", title ) )
}else{
warning('In order to plot the generated network, package Rgraphviz is required.')
}
}
list(stat = stata, pvalue = pvalue, runtime = durat, kappa = k, n.tests = n.tests, density = density, info = info, G = G, sepset = sepset, title = title )
}
|
8ca09997d890ab6758c9ac94fcf42495605f519f
|
aa66922233141af22e5aca895e5b1f05fea78702
|
/Testing2.R
|
00eb95670ac3a235ef9dffca1393b731321a1886
|
[] |
no_license
|
Saza-02/Test2proj
|
cf248ac557e2acfea7bca685966a133ebadec49a
|
757023387ba3b1cf44eb1d64864530cb10fe0669
|
refs/heads/main
| 2023-06-23T22:52:55.645205
| 2021-07-28T00:15:57
| 2021-07-28T00:15:57
| 390,160,786
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 133
|
r
|
Testing2.R
|
"Hello Testing"
var1 <- c(2,4,6,8,10)
var2 <- c("Aby","Mary","Danny","James","Lewis")
##adding sex
Var3 <- c("F","F","M","M","M")
|
5501b3f711f93d53e2d8fc73847e98495bd053eb
|
854e26b5063c7844bc99c771a591757f5148ac71
|
/index.R
|
794c6cee933d77cb5c7fd821392d2d8cae051583
|
[] |
no_license
|
walbertusd/solar-prediction
|
38b42111ffda26fb50784bfdd692096efbe56cf3
|
2991bd47725a8f11e6e1c9799abd3d7a9d3be2e1
|
refs/heads/master
| 2020-12-03T00:35:00.872538
| 2017-07-18T14:50:59
| 2017-07-18T14:50:59
| 96,042,820
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 34,188
|
r
|
index.R
|
# R script
# NOTE: uncomment str({var}) to inspect var, use print.nc instead for NetCDF data
# Each different section separated by 3 newline
# Sys.setenv('MC_CORES' = 3L)
# required library
library('RNetCDF')
# library('parallel')
# library('dplyr')
# Read train data
train <- read.csv('./input/train.csv')
# str(train)
# The plan: train1 and train2 used as training data, train3 used as test data
# split train data
train1 <- train[floor(train$Date/10000)<1999,]
train2 <- train[floor(train$Date/10000)>=1999 & floor(train$Date/10000)<2004,]
train3 <- train[floor(train$Date/10000)>=2004,]
# nrow(train)
# str(train1)
# str(train2)
# str(train3)
# Get station data
station_info <- read.csv('./input/station_info.csv')
# str(station_info)
# convert station longitude to use positive degrees from the Prime Meridian (GEFS use this)
station_info$elon <- station_info$elon+360
# Get GEFS Data
gefs_elevations <- open.nc('./input/gefs_elevations.nc')
# print.nc(gefs_elevations)
# Read attribute data from GEFS
# What is this? ans: use print.nc to find out
apcp_sfc <- open.nc('./input/train/apcp_sfc_latlon_subset_19940101_20071231.nc')
dlwrf_sfc <- open.nc('./input/train/dlwrf_sfc_latlon_subset_19940101_20071231.nc')
dswrf_sfc <- open.nc('./input/train/dswrf_sfc_latlon_subset_19940101_20071231.nc')
pres_msl <- open.nc('./input/train/pres_msl_latlon_subset_19940101_20071231.nc')
pwat_eatm <- open.nc('./input/train/pwat_eatm_latlon_subset_19940101_20071231.nc')
spfh_2m <- open.nc('./input/train/spfh_2m_latlon_subset_19940101_20071231.nc')
tcdc_eatm <- open.nc('./input/train/tcdc_eatm_latlon_subset_19940101_20071231.nc')
tcolc_eatm <- open.nc('./input/train/tcolc_eatm_latlon_subset_19940101_20071231.nc')
tmax_2m <- open.nc('./input/train/tmax_2m_latlon_subset_19940101_20071231.nc')
tmin_2m <- open.nc('./input/train/tmin_2m_latlon_subset_19940101_20071231.nc')
tmp_2m <- open.nc('./input/train/tmp_2m_latlon_subset_19940101_20071231.nc')
tmp_sfc <- open.nc('./input/train/tmp_sfc_latlon_subset_19940101_20071231.nc')
ulwrf_sfc <- open.nc('./input/train/ulwrf_sfc_latlon_subset_19940101_20071231.nc')
ulwrf_tatm <- open.nc('./input/train/ulwrf_tatm_latlon_subset_19940101_20071231.nc')
uswrf_sfc <- open.nc('./input/train/uswrf_sfc_latlon_subset_19940101_20071231.nc')
# Get GEFS longitude and latitude data
gefsLongitude <- var.get.nc(apcp_sfc, "lon")
gefsLatitude <- var.get.nc(apcp_sfc, "lat")
# Building station data
data <- as.data.frame(cbind(station_info$elon, station_info$nlat, station_info$elev))
colnames(data) <- c("stationLon", "stationLat", "stationLev")
# add 4 closest gefs from the station
# if neccessary (tradeoff between memory and processor)
# i choose to sacrifice memory for processor and ease of code
data$idLon1 <- sapply(data$stationLon, function(x) match(ceiling(x), gefsLongitude))
data$idLon2 <- data$idLon1+1
data$idLat1 <- sapply(data$stationLat, function(x) match(ceiling(x), gefsLatitude))
data$idLat2 <- data$idLat1+1
# add gefs latitude and longitude
data$gefsLon1 <- ceiling(data$stationLon)
data$gefsLon2 <- data$gefsLon1+1
data$gefsLat1 <- ceiling(data$stationLat)
data$gefsLat2 <- data$gefsLat1+1
# add each gefs levitation
helper <- function(lonId, latId) {
var.get.nc(gefs_elevations, "elevation_control", c(lonId, latId), c(1,1))
}
data$gefsLev1 <- mapply(helper, data$idLon1, data$idLat1)
data$gefsLev2 <- mapply(helper, data$idLon1, data$idLat2)
data$gefsLev3 <- mapply(helper, data$idLon2, data$idLat1)
data$gefsLev4 <- mapply(helper, data$idLon2, data$idLat2)
dateId <- 1
# helper <- function(lonId, latId, hourId, ensId = 1) {
# var.get.nc(apcp_sfc, "Total_precipitation", c(lonId, latId, hourId, ensId, dateId), c(1,1,1,1,1))[1]
# }
# # apcp_sfc{gefs}{hour}
# data$apcp_sfc11 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 1)
# data$apcp_sfc12 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 2)
# data$apcp_sfc13 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 3)
# data$apcp_sfc14 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 4)
# data$apcp_sfc15 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 5)
# data$apcp_sfc21 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 1)
# data$apcp_sfc22 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 2)
# data$apcp_sfc23 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 3)
# data$apcp_sfc24 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 4)
# data$apcp_sfc25 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 5)
# data$apcp_sfc31 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 1)
# data$apcp_sfc32 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 2)
# data$apcp_sfc33 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 3)
# data$apcp_sfc34 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 4)
# data$apcp_sfc35 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 5)
# data$apcp_sfc41 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 1)
# data$apcp_sfc42 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 2)
# data$apcp_sfc43 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 3)
# data$apcp_sfc44 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 4)
# data$apcp_sfc45 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 5)
# helper <- function(lonId, latId, hourId, ensId = 1) {
# var.get.nc(dlwrf_sfc, "Downward_Long-Wave_Rad_Flux", c(lonId, latId, hourId, ensId, dateId), c(1,1,1,1,1))[1]
# }
# # dlwrf_sfc{gefs}{hour}
# data$dlwrf_sfc11 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 1)
# data$dlwrf_sfc12 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 2)
# data$dlwrf_sfc13 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 3)
# data$dlwrf_sfc14 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 4)
# data$dlwrf_sfc15 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 5)
# data$dlwrf_sfc21 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 1)
# data$dlwrf_sfc22 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 2)
# data$dlwrf_sfc23 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 3)
# data$dlwrf_sfc24 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 4)
# data$dlwrf_sfc25 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 5)
# data$dlwrf_sfc31 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 1)
# data$dlwrf_sfc32 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 2)
# data$dlwrf_sfc33 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 3)
# data$dlwrf_sfc34 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 4)
# data$dlwrf_sfc35 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 5)
# data$dlwrf_sfc41 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 1)
# data$dlwrf_sfc42 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 2)
# data$dlwrf_sfc43 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 3)
# data$dlwrf_sfc44 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 4)
# data$dlwrf_sfc45 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 5)
# helper <- function(lonId, latId, hourId, ensId = 1) {
# var.get.nc(dswrf_sfc, "Downward_Short-Wave_Rad_Flux", c(lonId, latId, hourId, ensId, dateId), c(1,1,1,1,1))[1]
# }
# # dswrf_sfc{gefs}{hour}
# data$dswrf_sfc11 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 1)
# data$dswrf_sfc12 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 2)
# data$dswrf_sfc13 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 3)
# data$dswrf_sfc14 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 4)
# data$dswrf_sfc15 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 5)
# data$dswrf_sfc21 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 1)
# data$dswrf_sfc22 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 2)
# data$dswrf_sfc23 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 3)
# data$dswrf_sfc24 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 4)
# data$dswrf_sfc25 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 5)
# data$dswrf_sfc31 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 1)
# data$dswrf_sfc32 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 2)
# data$dswrf_sfc33 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 3)
# data$dswrf_sfc34 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 4)
# data$dswrf_sfc35 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 5)
# data$dswrf_sfc41 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 1)
# data$dswrf_sfc42 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 2)
# data$dswrf_sfc43 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 3)
# data$dswrf_sfc44 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 4)
# data$dswrf_sfc45 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 5)
# helper <- function(lonId, latId, hourId, ensId = 1) {
# var.get.nc(pres_msl, "Pressure", c(lonId, latId, hourId, ensId, dateId), c(1,1,1,1,1))[1]
# }
# # pres_msl{gefs}{hour}
# data$pres_msl11 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 1)
# data$pres_msl12 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 2)
# data$pres_msl13 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 3)
# data$pres_msl14 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 4)
# data$pres_msl15 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 5)
# data$pres_msl21 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 1)
# data$pres_msl22 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 2)
# data$pres_msl23 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 3)
# data$pres_msl24 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 4)
# data$pres_msl25 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 5)
# data$pres_msl31 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 1)
# data$pres_msl32 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 2)
# data$pres_msl33 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 3)
# data$pres_msl34 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 4)
# data$pres_msl35 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 5)
# data$pres_msl41 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 1)
# data$pres_msl42 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 2)
# data$pres_msl43 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 3)
# data$pres_msl44 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 4)
# data$pres_msl45 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 5)
# helper <- function(lonId, latId, hourId, ensId = 1) {
# var.get.nc(pwat_eatm, "Precipitable_water", c(lonId, latId, hourId, ensId, dateId), c(1,1,1,1,1))[1]
# }
# # pwat_eatm{gefs}{hour}
# data$pwat_eatm11 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 1)
# data$pwat_eatm12 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 2)
# data$pwat_eatm13 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 3)
# data$pwat_eatm14 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 4)
# data$pwat_eatm15 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 5)
# data$pwat_eatm21 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 1)
# data$pwat_eatm22 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 2)
# data$pwat_eatm23 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 3)
# data$pwat_eatm24 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 4)
# data$pwat_eatm25 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 5)
# data$pwat_eatm31 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 1)
# data$pwat_eatm32 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 2)
# data$pwat_eatm33 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 3)
# data$pwat_eatm34 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 4)
# data$pwat_eatm35 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 5)
# data$pwat_eatm41 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 1)
# data$pwat_eatm42 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 2)
# data$pwat_eatm43 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 3)
# data$pwat_eatm44 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 4)
# data$pwat_eatm45 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 5)
# helper <- function(lonId, latId, hourId, ensId = 1) {
# var.get.nc(spfh_2m, "Specific_humidity_height_above_ground", c(lonId, latId, hourId, ensId, dateId), c(1,1,1,1,1))[1]
# }
# # spfh_2m{gefs}{hour}
# data$spfh_2m11 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 1)
# data$spfh_2m12 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 2)
# data$spfh_2m13 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 3)
# data$spfh_2m14 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 4)
# data$spfh_2m15 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 5)
# data$spfh_2m21 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 1)
# data$spfh_2m22 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 2)
# data$spfh_2m23 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 3)
# data$spfh_2m24 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 4)
# data$spfh_2m25 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 5)
# data$spfh_2m31 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 1)
# data$spfh_2m32 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 2)
# data$spfh_2m33 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 3)
# data$spfh_2m34 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 4)
# data$spfh_2m35 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 5)
# data$spfh_2m41 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 1)
# data$spfh_2m42 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 2)
# data$spfh_2m43 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 3)
# data$spfh_2m44 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 4)
# data$spfh_2m45 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 5)
# helper <- function(lonId, latId, hourId, ensId = 1) {
# var.get.nc(tcdc_eatm, "Total_cloud_cover", c(lonId, latId, hourId, ensId, dateId), c(1,1,1,1,1))[1]
# }
# # tcdc_eatm{gefs}{hour}
# data$tcdc_eatm11 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 1)
# data$tcdc_eatm12 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 2)
# data$tcdc_eatm13 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 3)
# data$tcdc_eatm14 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 4)
# data$tcdc_eatm15 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 5)
# data$tcdc_eatm21 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 1)
# data$tcdc_eatm22 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 2)
# data$tcdc_eatm23 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 3)
# data$tcdc_eatm24 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 4)
# data$tcdc_eatm25 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 5)
# data$tcdc_eatm31 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 1)
# data$tcdc_eatm32 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 2)
# data$tcdc_eatm33 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 3)
# data$tcdc_eatm34 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 4)
# data$tcdc_eatm35 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 5)
# data$tcdc_eatm41 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 1)
# data$tcdc_eatm42 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 2)
# data$tcdc_eatm43 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 3)
# data$tcdc_eatm44 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 4)
# data$tcdc_eatm45 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 5)
# helper <- function(lonId, latId, hourId, ensId = 1) {
# var.get.nc(tcolc_eatm, "Total_Column-Integrated_Condensate", c(lonId, latId, hourId, ensId, dateId), c(1,1,1,1,1))[1]
# }
# # tcolc_eatm{gefs}{hour}
# data$tcolc_eatm11 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 1)
# data$tcolc_eatm12 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 2)
# data$tcolc_eatm13 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 3)
# data$tcolc_eatm14 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 4)
# data$tcolc_eatm15 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 5)
# data$tcolc_eatm21 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 1)
# data$tcolc_eatm22 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 2)
# data$tcolc_eatm23 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 3)
# data$tcolc_eatm24 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 4)
# data$tcolc_eatm25 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 5)
# data$tcolc_eatm31 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 1)
# data$tcolc_eatm32 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 2)
# data$tcolc_eatm33 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 3)
# data$tcolc_eatm34 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 4)
# data$tcolc_eatm35 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 5)
# data$tcolc_eatm41 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 1)
# data$tcolc_eatm42 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 2)
# data$tcolc_eatm43 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 3)
# data$tcolc_eatm44 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 4)
# data$tcolc_eatm45 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 5)
# helper <- function(lonId, latId, hourId, ensId = 1) {
# var.get.nc(tmax_2m, "Maximum_temperature", c(lonId, latId, hourId, ensId, dateId), c(1,1,1,1,1))[1]
# }
# # tmax_2m{gefs}{hour}
# data$tmax_2m11 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 1)
# data$tmax_2m12 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 2)
# data$tmax_2m13 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 3)
# data$tmax_2m14 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 4)
# data$tmax_2m15 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 5)
# data$tmax_2m21 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 1)
# data$tmax_2m22 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 2)
# data$tmax_2m23 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 3)
# data$tmax_2m24 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 4)
# data$tmax_2m25 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 5)
# data$tmax_2m31 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 1)
# data$tmax_2m32 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 2)
# data$tmax_2m33 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 3)
# data$tmax_2m34 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 4)
# data$tmax_2m35 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 5)
# data$tmax_2m41 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 1)
# data$tmax_2m42 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 2)
# data$tmax_2m43 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 3)
# data$tmax_2m44 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 4)
# data$tmax_2m45 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 5)
# helper <- function(lonId, latId, hourId, ensId = 1) {
# var.get.nc(tmin_2m, "Minimum_temperature", c(lonId, latId, hourId, ensId, dateId), c(1,1,1,1,1))[1]
# }
# # tmin_2m{gefs}{hour}
# data$tmin_2m11 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 1)
# data$tmin_2m12 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 2)
# data$tmin_2m13 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 3)
# data$tmin_2m14 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 4)
# data$tmin_2m15 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 5)
# data$tmin_2m21 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 1)
# data$tmin_2m22 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 2)
# data$tmin_2m23 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 3)
# data$tmin_2m24 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 4)
# data$tmin_2m25 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 5)
# data$tmin_2m31 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 1)
# data$tmin_2m32 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 2)
# data$tmin_2m33 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 3)
# data$tmin_2m34 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 4)
# data$tmin_2m35 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 5)
# data$tmin_2m41 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 1)
# data$tmin_2m42 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 2)
# data$tmin_2m43 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 3)
# data$tmin_2m44 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 4)
# data$tmin_2m45 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 5)
# helper <- function(lonId, latId, hourId, ensId = 1) {
# var.get.nc(tmp_2m, "Temperature_height_above_ground", c(lonId, latId, hourId, ensId, dateId), c(1,1,1,1,1))[1]
# }
# # tmp_2m{gefs}{hour}
# data$tmp_2m11 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 1)
# data$tmp_2m12 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 2)
# data$tmp_2m13 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 3)
# data$tmp_2m14 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 4)
# data$tmp_2m15 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 5)
# data$tmp_2m21 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 1)
# data$tmp_2m22 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 2)
# data$tmp_2m23 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 3)
# data$tmp_2m24 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 4)
# data$tmp_2m25 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 5)
# data$tmp_2m31 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 1)
# data$tmp_2m32 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 2)
# data$tmp_2m33 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 3)
# data$tmp_2m34 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 4)
# data$tmp_2m35 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 5)
# data$tmp_2m41 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 1)
# data$tmp_2m42 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 2)
# data$tmp_2m43 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 3)
# data$tmp_2m44 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 4)
# data$tmp_2m45 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 5)
# helper <- function(lonId, latId, hourId, ensId = 1) {
# var.get.nc(tmp_sfc, "Temperature_surface", c(lonId, latId, hourId, ensId, dateId), c(1,1,1,1,1))[1]
# }
# # tmp_sfc{gefs}{hour}
# data$tmp_sfc11 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 1)
# data$tmp_sfc12 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 2)
# data$tmp_sfc13 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 3)
# data$tmp_sfc14 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 4)
# data$tmp_sfc15 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 5)
# data$tmp_sfc21 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 1)
# data$tmp_sfc22 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 2)
# data$tmp_sfc23 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 3)
# data$tmp_sfc24 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 4)
# data$tmp_sfc25 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 5)
# data$tmp_sfc31 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 1)
# data$tmp_sfc32 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 2)
# data$tmp_sfc33 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 3)
# data$tmp_sfc34 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 4)
# data$tmp_sfc35 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 5)
# data$tmp_sfc41 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 1)
# data$tmp_sfc42 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 2)
# data$tmp_sfc43 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 3)
# data$tmp_sfc44 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 4)
# data$tmp_sfc45 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 5)
# helper <- function(lonId, latId, hourId, ensId = 1) {
# var.get.nc(ulwrf_sfc, "Upward_Long-Wave_Rad_Flux_surface", c(lonId, latId, hourId, ensId, dateId), c(1,1,1,1,1))[1]
# }
# # ulwrf_sfc{gefs}{hour}
# data$ulwrf_sfc11 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 1)
# data$ulwrf_sfc12 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 2)
# data$ulwrf_sfc13 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 3)
# data$ulwrf_sfc14 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 4)
# data$ulwrf_sfc15 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 5)
# data$ulwrf_sfc21 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 1)
# data$ulwrf_sfc22 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 2)
# data$ulwrf_sfc23 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 3)
# data$ulwrf_sfc24 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 4)
# data$ulwrf_sfc25 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 5)
# data$ulwrf_sfc31 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 1)
# data$ulwrf_sfc32 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 2)
# data$ulwrf_sfc33 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 3)
# data$ulwrf_sfc34 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 4)
# data$ulwrf_sfc35 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 5)
# data$ulwrf_sfc41 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 1)
# data$ulwrf_sfc42 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 2)
# data$ulwrf_sfc43 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 3)
# data$ulwrf_sfc44 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 4)
# data$ulwrf_sfc45 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 5)
# helper <- function(lonId, latId, hourId, ensId = 1) {
# var.get.nc(ulwrf_tatm, "Upward_Long-Wave_Rad_Flux", c(lonId, latId, hourId, ensId, dateId), c(1,1,1,1,1))[1]
# }
# # ulwrf_tatm{gefs}{hour}
# data$ulwrf_tatm11 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 1)
# data$ulwrf_tatm12 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 2)
# data$ulwrf_tatm13 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 3)
# data$ulwrf_tatm14 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 4)
# data$ulwrf_tatm15 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 5)
# data$ulwrf_tatm21 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 1)
# data$ulwrf_tatm22 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 2)
# data$ulwrf_tatm23 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 3)
# data$ulwrf_tatm24 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 4)
# data$ulwrf_tatm25 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 5)
# data$ulwrf_tatm31 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 1)
# data$ulwrf_tatm32 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 2)
# data$ulwrf_tatm33 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 3)
# data$ulwrf_tatm34 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 4)
# data$ulwrf_tatm35 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 5)
# data$ulwrf_tatm41 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 1)
# data$ulwrf_tatm42 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 2)
# data$ulwrf_tatm43 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 3)
# data$ulwrf_tatm44 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 4)
# data$ulwrf_tatm45 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 5)
# helper <- function(lonId, latId, hourId, ensId = 1) {
# var.get.nc(uswrf_sfc, "Upward_Short-Wave_Rad_Flux", c(lonId, latId, hourId, ensId, dateId), c(1,1,1,1,1))[1]
# }
# # uswrf_sfc{gefs}{hour}
# data$uswrf_sfc11 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 1)
# data$uswrf_sfc12 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 2)
# data$uswrf_sfc13 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 3)
# data$uswrf_sfc14 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 4)
# data$uswrf_sfc15 <- mapply(helper, lonId = data$idLon1, latId = data$idLat1, hour = 5)
# data$uswrf_sfc21 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 1)
# data$uswrf_sfc22 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 2)
# data$uswrf_sfc23 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 3)
# data$uswrf_sfc24 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 4)
# data$uswrf_sfc25 <- mapply(helper, lonId = data$idLon1, latId = data$idLat2, hour = 5)
# data$uswrf_sfc31 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 1)
# data$uswrf_sfc32 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 2)
# data$uswrf_sfc33 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 3)
# data$uswrf_sfc34 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 4)
# data$uswrf_sfc35 <- mapply(helper, lonId = data$idLon2, latId = data$idLat1, hour = 5)
# data$uswrf_sfc41 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 1)
# data$uswrf_sfc42 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 2)
# data$uswrf_sfc43 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 3)
# data$uswrf_sfc44 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 4)
# data$uswrf_sfc45 <- mapply(helper, lonId = data$idLon2, latId = data$idLat2, hour = 5)
# drops <- c(
# "stationLon",
# "stationLat",
# # "stationLev",
# "idLon1",
# "idLon2",
# "idLat1",
# "idLat2",
# "gefsLon1",
# "gefsLon2",
# "gefsLat1",
# "gefsLat2",
# "gefsLev1",
# "gefsLev2",
# "gefsLev3",
# "gefsLev4")
# # drops <- c("stationLon",
# # "stationLat",
# # "stationLev",
# # "idLon1",
# # "idLon2",
# # "idLat1",
# # "idLat2",
# # "gefsLon1",
# # "gefsLon2",
# # "gefsLat1",
# # "gefsLat2",
# # "gefsLev1",
# # "gefsLev2",
# # "gefsLev3",
# # "gefsLev4")
# data <- data[ , !(names(data) %in% drops)]
# saveRDS(data, file = "data.Rda")
# str(data)
proc.time()
# print.nc(uswrf_sfc)
# var.get.nc(apcp_sfc, "ens", c(1), c(1))
#
# junk area, not important but don't delete it
# print.nc(gefs_elevations)
# var.get.nc(gefs_elevations, "latitude")
# var.get.nc(gefs_elevations, "longitude")
# apcp_sfc
# print.nc(apcp_sfc)
# att.get.nc(apcp_sfc, "lat", "actual_range")
# var.get.nc(apcp_sfc, "ens")
# var.get.nc(apcp_sfc, "lat")
# var.get.nc(apcp_sfc, "Total_precipitation", c(1,1,1,1,1), c(NA,NA,1,1,1))
# var.get.nc(apcp_sfc, "Total_precipitation", c(1,1,1,11,1), c(NA,NA,1,1,1))
# class(apcp_sfc)
|
db57e565558baac8a94f4f762b86d5170c50dde3
|
0078429c9abba55467bfb46cdecbcda79c31dac4
|
/inst/article/annotation.sets.R
|
32c07e2f9804c577aa3ec8b0ad76894e77948838
|
[] |
no_license
|
Bhanditz/bams
|
fb2e4fa0f1ddc9665febbb0bc6263faa39652709
|
1d61aa458c42522cc35e6e6e819e19ffab80d18e
|
refs/heads/master
| 2020-04-19T08:42:46.562481
| 2013-03-13T00:00:00
| 2013-03-13T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 780
|
r
|
annotation.sets.R
|
data(neuroblastoma,package="neuroblastoma")
data(neuroblastomaDetailed,package="bams")
annotation.sets <- list(original=neuroblastoma$annotations,
detailed=neuroblastomaDetailed)
## standardize annotation levels.
standard <- c(breakpoint=">0breakpoints",
normal="0breakpoints")
for(set.name in names(annotation.sets)){
nonstandard <- annotation.sets[[set.name]]$ann%in%names(standard)
annotation.sets[[set.name]]$annotation <-
as.character(annotation.sets[[set.name]]$annotation)
annotation.sets[[set.name]][nonstandard,"annotation"] <-
standard[annotation.sets[[set.name]][nonstandard,"annotation"]]
stopifnot(all(!annotation.sets[[set.name]]$ann%in%names(standard)))
}
save(annotation.sets,file="annotation.sets.RData")
|
6f749aeab5f58639f0507c1d2c02fdf0ff1f58ef
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/minval/examples/downloadChEBI.Rd.R
|
efe0fd676ad207bdac12431f8ac20557983b6069
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 379
|
r
|
downloadChEBI.Rd.R
|
library(minval)
### Name: downloadChEBI
### Title: Download the ChEBI database
### Aliases: downloadChEBI
### ** Examples
## Not run:
##D # Download ChEBI database with associations
##D ChEBI <- downloadChEBI(release = '142')
##D
##D # Download ChEBI database without associations
##D ChEBI <- downloadChEBI(release = '142', woAssociations = TRUE)
##D
## End(Not run)
|
0ab119c650d4ff1891e03b46c380af29c7b49e16
|
cf91203113637dc04746da3b6a5305baafe1067b
|
/datasets and codes for cleaning up the data/Tests/test.R
|
229e730cb70c0015d60ca5d1cf96bc4b5ff443fc
|
[] |
no_license
|
lysiabean/Data-Science-Group-Project
|
4e510d6b0bb47430ce5fb498b0e496c2843df4cf
|
bf503160b709fd779ddb16857b05bd6c9caa18f3
|
refs/heads/master
| 2021-01-10T07:42:45.592075
| 2015-10-20T16:42:54
| 2015-10-20T16:42:54
| 44,619,933
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 363
|
r
|
test.R
|
mylist = c(1354, 2028, 2690, 3502, 2882, 3182, 3434, 4064, 4158, 4009,
4698, 4523, 2318, 2024, 1639, 1366, 1228, 894, 835, 952, 447, 385, 442, 782)
temp = c(0.24, 0.46, 0.95, 1.4, 1.14, 0.73, 0.58, 0.62, 0.49, 0.29, 0.24, 0.18, 0.08, 0.07,
0.06, 0.04, 0.05, 0.04, 0.04, 0.07, 0.04, 0.05, 0.07, 0.17)
hour = c(0:23)
t.test(hour, mylist)
|
d3de338dc3580031d5b72d21fe5adfd6366c1422
|
8d32387ef0d9bf05e9cf7aec8f48dfbaa8d4e31d
|
/vignettes/StatComp18052.R
|
efbeeb81708b7f66b6ad66007a1b042404add59d
|
[] |
no_license
|
Miwa1996/StatComp18052
|
9ee18561e65a82db0e285c8ee22839b4ff449b47
|
70b34f1a535c55ed6855879ea23eb1165a8d4587
|
refs/heads/master
| 2020-04-16T03:51:32.774750
| 2019-01-18T10:08:54
| 2019-01-18T10:08:54
| 165,247,797
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,751
|
r
|
StatComp18052.R
|
## ------------------------------------------------------------------------
a=rnorm(10)
a
## ------------------------------------------------------------------------
matrix(1:6, 2, 3)
## ------------------------------------------------------------------------
x=rnorm(10)
y=rnorm(10)
plot(x,y)
## ----Code chunk1, echo = FALSE-------------------------------------------
x <- 0:4; p <- c(0.1,0.2,0.2,0.2,0.3)
cp <- cumsum(p); m <- 1e3; r <- numeric(m)
r <- x[findInterval(runif(m),cp)+1]
ct <- as.vector(table(r)); ct/sum(ct)
## ----Code chunk2, echo = FALSE-------------------------------------------
n <- 1e3;j<-k<-0;y <- numeric(n)
while (k < n) {
u <- runif(1)
j <- j + 1
x <- runif(1) #random variate from g
if (27/4*x^2 * (1-x) > u) {
#we accept x
k <- k + 1
y[k] <- x
}
}
hist(y,breaks=20,freq=F)
f<- function(x) return(12*x^2*(1-x))
curve(f,add=T)
## ----Code chunk3, echo = FALSE-------------------------------------------
n <- 1e3; r <- 4; beta <- 3
lambda <- rgamma(n, shape=r, scale=beta)
y <- rexp(n, lambda)
hist(y,breaks=20,freq=F)
## ----Code chunk4, echo = FALSE-------------------------------------------
Pbeta<-function(y){
if(y<=0) value=0
if(y>1) value=1
if(y>=0&y<1){
m <- 1e4; x <- runif(m, min=0, max=y)
value <- mean(30*x^2*(1-x)^2)*(y-0)
}
value
}
sapply((1:9)/10,Pbeta)
pbeta1<-function(x){
pbeta(x,3,3)
}
sapply((1:9)/10,pbeta1)
## ----Code chunk5_1,echo = FALSE------------------------------------------
MC.Phi<-function(x,sigma,R= 10000, antithetic = TRUE) {
u <- runif(R/2)
if (!antithetic)
v<- runif(R/2)
else
v<-1-u
w<-c(u, v)
cdf <- numeric(length(x))
for (i in 1:length(x)) {
g <-x[i]*w/(sigma)^2*exp((-(x[i]*w)^2/(2*sigma*sigma)))*x[i]
cdf[i] <- mean(g)
}
return(cdf)
}
x <- seq(.1,2.5,length=5);
pRayleigh<-function(x,sigma){
s<-sigma;
p<-numeric(length(x));
intergrand<-function(x){
x/(s^2)*exp((-x^2/(2*(s^2))))};
for(i in 1:length(x)){
p[i]<-integrate(intergrand,0,x[i])$value;
}
return(p)
}
Phi<-pRayleigh(x,sigma=2)
set.seed(123)
MC1<- MC.Phi(x,sigma=2,anti=FALSE) #for (X1+X2)/2 which X1,X2 is independent
set.seed(123)
MC2<- MC.Phi(x,sigma=2,anti=TRUE) #for antithetic variables (X+X')/2
print(round(rbind(x, MC1, MC2, Phi),5))
## ----Code chunk5_2,echo = FALSE------------------------------------------
m <- 1000
MC1 <- MC2 <- numeric(m)
x <- 1.95
for (i in 1:m) {
MC1[i] <- MC.Phi(x,2,R = 1000, anti = FALSE)
MC2[i] <- MC.Phi(x,2,R = 1000,anti=TRUE)
}
print(sd(MC1))
## ----Code chunk5_3,echo = FALSE------------------------------------------
print(sd(MC2))
## ----Code chunk5_4,echo = FALSE------------------------------------------
print((var(MC1) - var(MC2))/var(MC1))
## ----Code chunk6, echo = FALSE-------------------------------------------
x <- seq(1,5,length.out = 20)
g <- exp(-x^2/2)*x^2/sqrt(2*pi)
f1 <- exp(-x+1)
f2 <- 1/2*(x-1)^2 *exp(-x+1)
#figure (a)
plot(g~x,type = "l",col=1)
lines(f1~x,col=2)
lines(f2~x,col=3)
legend("topright", legend =c("g", "f1", "f2"),
lty = 1:3, lwd = 2, inset = 0.02,col=1:3)
#figure (b)
plot(g/f1~x,type = "l",col=1)
lines(g/f2~x,col=2)
legend("topright", legend =c("f1", "f2"),
lty = 1:2, lwd = 2, inset = 0.02,col=1:2)
m <- 10000
theta.hat <- se <- numeric(2)
g <- function(x) {
exp(-x^2/2)*x^2/sqrt(2*pi) * (x > 1)
}
x <- rexp(m, rate= 1)+1 #using f1
fg <- g(x)/exp(-x+1)
theta.hat[1] <- mean(fg)
se[1] <- sd(fg)
x <- rgamma(m, shape=3, rate = 1)+1 #using f2
fg <- g(x)/(1/2*(x-1)^2 *exp(-x+1))
theta.hat[2] <- mean(fg)
se[2] <- sd(fg)
res <- rbind(theta=round(theta.hat,3), se=round(se,3))
colnames(res) <- paste0('f',1:2)
knitr::kable(res,align='c')
## ----Code chunk7, echo = FALSE-------------------------------------------
m <- 10000
theta.hat <- se <- numeric(1)
g <- function(x) {
exp(-x^2/2)*x^2/sqrt(2*pi) * (x > 1)
}
x <- rexp(m, rate= 1)+1 #using f
fg <- g(x)/exp(-x+1)
theta.hat[1] <- mean(fg)
se[1] <- sd(fg)
res <- rbind(theta=round(theta.hat,3), se=round(se,3))
colnames(res) <- 'f'
knitr::kable(res,align='c')
## ----Code chunk8, echo = FALSE-------------------------------------------
m <- 1000 # Number of Monte Carlo trials
n <- 100
set.seed(1)
G.hat1 <- numeric(m) # Storage for test statistics from the MC trials
g1 <- numeric(n)
Id <- 1:n
## Start the simulation
for (i in 1:m){
x1 <- rlnorm(n) # x1 generated from standard lognormal distribution
mu_hat1 <- mean(x1) # The estimation of mu
x1_order <- sort(x1) # x1_order is the order statistic of x1
G.hat1[i] <- (2 * Id -n-1)%*%x1_order/(n^2 *mu_hat1)# Estimate the value of G
}
print(c(mean(G.hat1),median(G.hat1))) # the mean and median of G.hat1
print(quantile(G.hat1,probs=seq(0.1,1,0.1))) # the deciles of G.hat1
hist(G.hat1,prob = TRUE)
m <- 1000 # Number of Monte Carlo trials
n <- 100
set.seed(12)
G.hat2 <- numeric(m) # Storage for test statistics from the MC trials
g2 <- numeric(n)
Id <- 1:n
## Start the simulation
for (i in 1:m){
x2 <- runif(n) # x2 generated from uniform distribution
mu_hat2 <- mean(x2) # The estimation of mu
x2_order <- sort(x2) # x2_order is the order statistic of x2
G.hat2[i] <- (2 * Id -n-1)%*%x2_order/(n^2 *mu_hat2)# Estimate the value of G
}
print(c(mean(G.hat2),median(G.hat2))) # the mean and median of G.hat2
print(quantile(G.hat2,probs=seq(0.1,1,0.1))) # the deciles of G.hat2
hist(G.hat2,prob = TRUE)
m <- 1000 # Number of Monte Carlo trials
n <- 100
set.seed(123)
G.hat3 <- numeric(m) # Storage for test statistics from the MC trials
g3 <- numeric(n)
Id <- 1:n
## Start the simulation
for (i in 1:m){
x3 <- rbinom(n,1,0.1) # x3 generated from Bernoulli(0.1) distribution
mu_hat3 <- mean(x3) # The estimation of mu
x3_order <- sort(x3) # x3_order is the order statistic of x3
G.hat3[i] <- (2 * Id -n-1)%*%x3_order/(n^2 *mu_hat3)# Estimate the value of G
}
print(c(mean(G.hat3),median(G.hat3))) # the mean and median of G.hat3
print(quantile(G.hat3,probs=seq(0.1,1,0.1))) # the deciles of G.hat3
hist(G.hat3,prob = TRUE)
## ----Code chunk9_1,echo = FALSE------------------------------------------
# function to calculate the confidence interval
compu.interval <- function(a,b){
m<-1e3
G<-numeric(m)
I<-2*c(1:m)-m-1
set.seed(123)
for(i in 1:m){
x<-rlnorm(m,a,b) #generate random numbers
x<-sort(x) #sorting x
mu=mean(x)
G[i]<-1/m^2/mu*(t(I)%*%x) #compute G
}
CI<-c(mean(G)-1.96*sd(G)/sqrt(m),mean(G)+1.96*sd(G)/sqrt(m))#compute confidence interval
return(CI)
}
## ----Code chunk9_2,echo = FALSE------------------------------------------
#approximate Coverage probability(ECP) of confidence interval
N<-100
bar<-numeric(N)
k<-0
a <- 0
b <- 1
I<-2*c(1:m)-m-1
G.true<-numeric(m)
CI <- compu.interval(a,b)
set.seed(1234)
for(j in 1:N){
for(i in 1:m){
x<-rlnorm(m,0,1)
x<-sort(x)
mu<-mean(x)
G.true[i]<-1/m^2/mu*(t(I)%*%x)
}
bar[j]<-mean(G.true)
if(bar[j]>CI[1]&bar[j]<CI[2]){
k<-k+1}
}
k/N
## ----Code chunk11, echo = FALSE------------------------------------------
set.seed(12345)
LSAT=c(576,635,558,578,666,580,555,661,651,605,653,575,545,572,594)
GPA=c(339,330,281,303,344,307,300,343,336,313,312,274,276,288,296)
x=cbind(LSAT,GPA)
n=15
b.cor <- function(x,i) cor(x[i,1],x[i,2])
theta.hat <- b.cor(x,1:n)
theta.jack <- numeric(n)
for(i in 1:n){
theta.jack[i] <- b.cor(x,(1:n)[-i])
}
bias.jack <- (n-1)*(mean(theta.jack)-theta.hat)
se.jack <- sqrt((n-1)*mean((theta.jack-theta.hat)^2))
bias.jack
se.jack
## ----Code chunk12_1,echo = FALSE-----------------------------------------
set.seed(12345)
library(boot)
x=c(3,5,7,18,43,85,91,98,100,130,230,487)
boot.mean=function(x,i) mean(x[i])
de=boot(data=x,statistic=boot.mean,R=1024)
ci=boot.ci(de,type=c("norm","basic","perc","bca"))
ci
## ----Code chunk13, echo = FALSE------------------------------------------
library(bootstrap)
data=scor
u=c(mean(data[,1]),mean(data[,2]),mean(data[,3]),mean(data[,4]),mean(data[,5]))
m=matrix(0,5,5)
for (i in 1:88) m=m+(as.numeric(data[i,])-u)%*%t(as.numeric(data[i,])-u)
m=m/88 ##MLE of covariance matrix
lambda=eigen(m)$values ## the eigenvalues
theta.hat=lambda[1]/sum(lambda)
theta.jack=numeric(5)
for (i in 1:5) theta.jack[i]=lambda[1]/sum(lambda[-i])
bias.jack <- (n-1)*(mean(theta.jack)-theta.hat)
se.jack <- sqrt((n-1)*mean((theta.jack-theta.hat)^2))
bias.jack
se.jack
## ----Code chunk14_1,echo = FALSE-----------------------------------------
##leave-one-out (n-fold) cross validation
library(DAAG); attach(ironslag)
n <- length(magnetic) #in DAAG ironslag
e1 <- e2 <- e3 <- e4 <- numeric(n)
# for n-fold cross validation
# fit models on leave-one-out samples
for (k in 1:n) {
y <- magnetic[-k]
x <- chemical[-k]
J1 <- lm(y ~ x)
yhat1 <- J1$coef[1] + J1$coef[2] * chemical[k]
e1[k] <- magnetic[k] - yhat1
J2 <- lm(y ~ x + I(x^2))
yhat2 <- J2$coef[1] + J2$coef[2] * chemical[k] +
J2$coef[3] * chemical[k]^2
e2[k] <- magnetic[k] - yhat2
J3 <- lm(log(y) ~ x)
logyhat3 <- J3$coef[1] + J3$coef[2] * chemical[k]
yhat3 <- exp(logyhat3)
e3[k] <- magnetic[k] - yhat3
J4 <- lm(log(y) ~ log(x))
logyhat4 <- J4$coef[1] + J4$coef[2] * log(chemical[k])
yhat4 <- exp(logyhat4)
e4[k] <- magnetic[k] - yhat4
}
c(mean(e1^2), mean(e2^2), mean(e3^2), mean(e4^2))
##According to the prediction error criterion, Model 2, the quadratic model,
##would be the best fit for the data.
##leave-two-out cross validation
n <- length(magnetic) #in DAAG ironslag
e1 <- e2 <- e3 <- e4 <- numeric(n)
E1 <- E2 <- E3 <- E4 <-rep(0,2)
subscript<-t(combn(n,2))
for (k in 1:choose(n,2)) {
K<-subscript[k,]
y <- magnetic[-K]
x <- chemical[-K]
J1 <- lm(y ~ x)
yhat1 <- J1$coef[1] + J1$coef[2] * chemical[K]
E1<- magnetic[K] - yhat1
e1[k]<-sum(abs(E1))
J2 <- lm(y ~ x + I(x^2))
yhat2 <- J2$coef[1] + J2$coef[2] * chemical[K] +J2$coef[3] * chemical[K]^2
E2 <- magnetic[K] - yhat2
e2[k]<-sum(abs(E2))
J3 <- lm(log(y) ~ x)
logyhat3 <- J3$coef[1] + J3$coef[2] * chemical[K]
yhat3 <- exp(logyhat3)
E3<- magnetic[K] - yhat3
e3[k]<-sum(abs(E3))
J4 <- lm(log(y) ~ log(x))
logyhat4 <- J4$coef[1] + J4$coef[2] * log(chemical[K])
yhat4 <- exp(logyhat4)
E4<- magnetic[K] - yhat4
e4[k]<-sum(abs(E4))
}
c(mean(e1^2), mean(e2^2), mean(e3^2), mean(e4^2))
##According to the prediction error criterion, Model 2, the quadratic model,
##would be the best fit for the data.
## ----setup, include=FALSE------------------------------------------------
library(latticeExtra)
library(RANN)
library(energy)
library(Ball)
library(boot)
library(ggplot2)
## ----Code chunk15, echo = FALSE------------------------------------------
##function:two-sample Cramer-von Mises test for equal distributions
cvm <- function(x,y,data){
r <- 1000 #permutation samples
reps <- numeric(r)
n <- length(x)
m <- length(y)
v.n <- numeric(n)
v1.n <- numeric(n)
v.m <- numeric(m)
v1.m <- numeric(m)
z <- c(x,y)
N <- length(z)
Ix <- seq(1:n)
Iy <- seq(1:m)
v.n <- (x-Ix)**2
v.m <- (y-Iy)**2
#test statistic
reps_0 <- ((n * sum(v.n)+m * sum(v.m))/(m * n * N))-(4 * m * n - 1)/(6 * N)
for (k in 1:r){#permutation samples
w <- sample(N,size=n,replace=FALSE)
x1 <- sort(z[w])
y1 <- sort(z[-w])
v1.n <- (x1-Ix)**2
v1.m <- (y1-Iy)**2
reps[k] <- ((n * sum(v1.n)+m * sum(v1.m))/(m * n * N))-(4 * m * n - 1)/(6 * N)
}
p <- mean(c(reps_0,reps) >= reps_0)
return(
histogram(c(reps_0,reps),
type="density",
col="#0080ff",
xlab="Replicates of Cramer-Von Mises statistic",
ylab=list(rot=0),
main=paste0("Data:",data),
sub=list(substitute(paste(hat(p),"=",pvalue),list(pvalue=p)),col=2),
panel=function(...){
panel.histogram(...)
panel.abline(v=reps_0,col=2,lwd=2)
})
)
}
##Data: Example 8.1
attach(chickwts)
x <- sort(as.vector(weight[feed == "soybean"]))
y <- sort(as.vector(weight[feed == "linseed"]))
cvm1 <- cvm(x,y,"Example 8.1")
##Data: Example 8.2
x <- sort(as.vector(weight[feed == "sunflower"]))
y <- sort(as.vector(weight[feed == "linseed"]))
detach(chickwts)
cvm2 <- cvm(x,y,"Example 8.2")
##Results
print(cvm1)
print(cvm2)
## ------------------------------------------------------------------------
## variable definition
m <- 500 #permutation samples
p<-2 # dimension of data
n1 <- n2 <- 50 #the sample size of x and y
R<-999 #boot parameter
k<-3 #boot parameter
n <- n1 + n2
N = c(n1,n2)
# the function of NN method
Tn <- function(z, ix, sizes,k){
n1 <- sizes[1]; n2 <- sizes[2]; n <- n1 + n2
if(is.vector(z)) z <- data.frame(z,0);
z <- z[ix, ];
NN <- nn2(data=z, k=k+1)
block1 <- NN$nn.idx[1:n1,-1]
block2 <- NN$nn.idx[(n1+1):n,-1]
i1 <- sum(block1 < n1 + .5)
i2 <- sum(block2 > n1+.5)
(i1 + i2) / (k * n)
}
eqdist.nn <- function(z,sizes,k){
boot.obj <- boot(data=z,statistic=Tn,R=R,sim = "permutation",
sizes = sizes,k=k)
ts <- c(boot.obj$t0,boot.obj$t)
p.value <- mean(ts>=ts[1])
list(statistic=ts[1],p.value=p.value)
}
p.values <- matrix(NA,m,3) #p<U+05B5>
## ------------------------------------------------------------------------
##(1)Unequal variances and equal expectations
set.seed(1)
sd <- 1.5
for(i in 1:m){
x <- matrix(rnorm(n1*p),ncol=p)
y <- matrix(rnorm(n2*p,sd=sd),ncol=p)
z <- rbind(x,y)
p.values[i,1] <- eqdist.nn(z,N,k)$p.value#NN method
p.values[i,2] <- eqdist.etest(z,sizes=N,R=R)$p.value#energy methods
p.values[i,3] <- bd.test(x=x,y=y,R=999,seed=i*12345)$p.value# ball method
}
alpha <- 0.05;
pow <- colMeans(p.values<alpha)
power <- data.frame(methods = c('NN','energy','Ball'),pow)
ggplot(power,aes(methods,pow))+#plot
geom_col(fill = 'palegreen3')+
coord_flip()
## ------------------------------------------------------------------------
##(2)Unequal variances and unequal expectations
set.seed(1)
mu <- 0.5
sd <- 1.5
for(i in 1:m){
x <- matrix(rnorm(n1*p),ncol=p)
y <- matrix(rnorm(n2*p,mean=mu,sd=sd),ncol=p)
z <- rbind(x,y)
p.values[i,1] <- eqdist.nn(z,N,k)$p.value#NN method
p.values[i,2] <- eqdist.etest(z,sizes=N,R=R)$p.value#energy methods
p.values[i,3] <- bd.test(x=x,y=y,R=999,seed=i*12345)$p.value# ball method
}
alpha <- 0.05;
pow <- colMeans(p.values<alpha)
pow
power <- data.frame(methods = c('NN','energy','Ball'),pow)
ggplot(power,aes(methods,pow))+#plot
geom_col(fill = 'palegreen3')+
coord_flip()
## ------------------------------------------------------------------------
##Non-normal distributions: t distribution with 1 df (heavy-tailed
##distribution), bimodal distribution (mixture of two normal
##distributions)
set.seed(1)
mu <- 0.5
sd <- 2
for(i in 1:m){
x <- matrix(rt(n1*p,df=1),ncol=p)
y1 = rnorm(n2*p); y2 = rnorm(n2*p,mean=mu,sd=sd)
w = rbinom(n, 1, .5) # 50:50 random choice
y <- matrix(w*y1 + (1-w)*y2,ncol=p)# normal mixture
z <- rbind(x,y)
p.values[i,1] <- eqdist.nn(z,N,k)$p.value#NN method
p.values[i,2] <- eqdist.etest(z,sizes=N,R=R)$p.value#energy methods
p.values[i,3] <- bd.test(x=x,y=y,R=999,seed=i*12345)$p.value# ball method
}
alpha <- 0.05;
pow <- colMeans(p.values<alpha)
pow
power <- data.frame(methods = c('NN','energy','Ball'),pow)
ggplot(power,aes(methods,pow))+#plot
geom_col(fill = 'palegreen3')+
coord_flip()
## ------------------------------------------------------------------------
##Unbalanced samples
set.seed(1)
mu <- 0.5
N = c(n1,n2*2)
for(i in 1:m){
x <- matrix(rnorm(n1*p),ncol=p);
y <- cbind(rnorm(n2*2),rnorm(n2*2,mean=mu));
z <- rbind(x,y)
p.values[i,1] <- eqdist.nn(z,N,k)$p.value#NN method
p.values[i,2] <- eqdist.etest(z,sizes=N,R=R)$p.value#energy methods
p.values[i,3] <- bd.test(x=x,y=y,R=999,seed=i*12345)$p.value# ball method
}
alpha <- 0.05;
pow <- colMeans(p.values<alpha)
pow
power <- data.frame(methods = c('NN','energy','Ball'),pow)
ggplot(power,aes(methods,pow))+#plot
geom_col(fill = 'palegreen3')+
coord_flip()
## ----Code chunk16, echo = FALSE------------------------------------------
set.seed(1)
n <- 10000 #Sample size
x <- numeric(n)
u <- runif(n)
theta=1
eta=0
x[1] <- rnorm(1)
k <- 0
# cauchy functions
f <- function(x, theta=1, eta=0){
out <- 1/(pi * theta * (1+((x-eta)/theta)^2))
return(out)
}
for(i in 2:n){
xt <- x[i-1]
y <- rnorm(1,mean=xt)
R <- f(y)*dnorm(xt,mean=y)/(f(xt)*dnorm(y,mean=xt))
if(u[i] <= R){
x[i] <- y
}else{
x[i] <- xt
k <- k+1
}
}
is <- 1001:n
par(mfrow=c(1,2))
plot(is,x[is],type="l")
hist(x[is], probability=TRUE,breaks=100)
plot.x <- seq(min(x[is]),max(x[is]),0.01)
lines(plot.x,f(plot.x))
par(mfrow=c(1,1))
#compare the deciles
observations <- quantile(x[is],seq(0,1,0.1))
expectations <- qcauchy(seq(0,1,0.1))
decile <- data.frame(observations,expectations)
decile
## ----Code chunk19_1,echo = FALSE-----------------------------------------
Sk_1 <- function(a,k){
q <- sqrt(a^2*(k-1)/(k-a^2))
return (1-pt(q,df=k-1))
}
Sk <- function(a,k){
q <- sqrt(a^2*k/(k+1-a^2))
return (1-pt(q,df=k))
}
difSK <- function(x,k) {
Sk_1(x,k)-Sk(x,k)
}
kset <- c(4:25,100,500,1000)
out <- 1:length(kset)
for (i in 1:length(kset)){
out[i] <- uniroot( difSK
, lower = 0+1e-5, upper = sqrt(kset[i])-1e-5,k=kset[i]) $root
}
out
kset[ abs(out-sqrt(kset)) < sqrt(kset)*0.01]
n <- 1:length(kset)
Kwrongnum <- n[abs(out-sqrt(kset)) < sqrt(kset)*0.01]
#Example : k=23
k=23
xx <- seq(0.01,sqrt(k)-1e-5,length=1000)
y <- difSK(xx,k)
plot(xx,y,type="l")
#Example : k=1000
k=1000
xx <- seq(0.01,sqrt(k)-1e-5,length=1000)
y <- difSK(xx,k)
plot(xx,y,type="l")
#change upper to 3
for (i in Kwrongnum){
out[i] <- uniroot( difSK
, lower = 0+1e-5, upper =3,k=kset[i]) $root
}
names(out) <- kset
out
## ----Code chunk20, echo = FALSE------------------------------------------
f<-function(y,theta,eta){
1/(theta*3.141592653*(1+((y-eta)/theta)^2))
}
pdf<-function(x,theta,eta,lower.tail=TRUE){
if(lower.tail) res<-integrate(f,lower = -Inf,upper = x,rel.tol=.Machine$double.eps^0.25,theta=theta,eta=eta)
else res<-integrate(f,lower = x,upper = Inf,rel.tol=.Machine$double.eps^0.25,theta=theta,eta=eta)
return(res$value)
}
pdf(x=0,theta = 1,eta = 0)
pcauchy(0,location = 0,scale = 1)
pdf(x=2,theta = 2,eta =1,lower.tail = F )
pcauchy(2,location = 1,scale = 2,lower.tail = F)
## ----echo=FALSE----------------------------------------------------------
dat <- rbind(Genotype=c('AA','BB','OO','AO','BO','AB'),
Frequency=c('p2','q2','r2','2pr','2qr','2pq',1),
Count=c('nAA','nBB','nOO','nAO','nBO','nAB','n'))
knitr::kable(dat,format='markdown',caption = "Comparation of them",align = "c")
## ----Code chunk21_1,echo = FALSE-----------------------------------------
library(nloptr)
# Mle
eval_f0 <- function(x,x1,n.A=28,n.B=24,nOO=41,nAB=70) {
r1<-1-sum(x1)
nAA<-n.A*x1[1]^2/(x1[1]^2+2*x1[1]*r1)
nBB<-n.B*x1[2]^2/(x1[2]^2+2*x1[2]*r1)
r<-1-sum(x)
return(-2*nAA*log(x[1])-2*nBB*log(x[2])-2*nOO*log(r)-
(n.A-nAA)*log(2*x[1]*r)-(n.B-nBB)*log(2*x[2]*r)-nAB*log(2*x[1]*x[2]))
}
# constraint
eval_g0 <- function(x,x1,n.A=28,n.B=24,nOO=41,nAB=70) {
return(sum(x)-0.999999)
}
opts <- list("algorithm"="NLOPT_LN_COBYLA",
"xtol_rel"=1.0e-8)
mle<-NULL
r<-matrix(0,1,2)
r<-rbind(r,c(0.2,0.35))# the beginning value of p0 and q0
j<-2
while (sum(abs(r[j,]-r[j-1,]))>1e-8) {
res <- nloptr( x0=c(0.3,0.25),
eval_f=eval_f0,
lb = c(0,0), ub = c(1,1),
eval_g_ineq = eval_g0,
opts = opts, x1=r[j,],n.A=28,n.B=24,nOO=41,nAB=70 )
j<-j+1
r<-rbind(r,res$solution)
mle<-c(mle,eval_f0(x=r[j,],x1=r[j-1,]))
}
r #the result of EM algorithm
mle #the max likelihood values
## ----Code chunk23, echo = FALSE------------------------------------------
attach(mtcars)
formulas <- list(
mpg ~ disp,
mpg ~ I(1 / disp),
mpg ~ disp + wt,
mpg ~ I(1 / disp) + wt
)
#1 for loops
f3<- vector("list", length(formulas))
for (i in seq_along(formulas)){
f3[[i]] <- lm(formulas[[i]], data = mtcars)
}
f3
#2 lapply
la3<-lapply(formulas, function(x) lm(formula = x, data = mtcars))
la3
## ----Code chunk24,echo = FALSE-------------------------------------------
set.seed(123)
bootstraps <- lapply(1:10, function(i) {
rows <- sample(1:nrow(mtcars), rep = TRUE)
mtcars[rows, ]
})
# for loops
f4<- vector("list", length(bootstraps))
for (i in seq_along(bootstraps)){
f4[[i]] <- lm(mpg ~ disp, data = bootstraps[[i]])
}
f4
# lapply without anonymous function
la4<- lapply(bootstraps, lm, formula = mpg ~ disp)
la4
## ----Code chunk25,echo = FALSE-------------------------------------------
rsq <- function(mod) summary(mod)$r.squared
#3
sapply(la3, rsq)
sapply(f3, rsq)
#4
sapply(la4,rsq)
sapply(f4,rsq)
## ----Code chunk26,echo = FALSE-------------------------------------------
set.seed(123)
trials <- replicate(
100,
t.test(rpois(10, 10), rpois(7, 10)),
simplify = FALSE
)
# anonymous function:
sapply(trials, function(x) x[["p.value"]])
## ----Code chunk27,echo = FALSE-------------------------------------------
#example
options(warn = -1)
testlist <- list(iris, mtcars, cars)
lapply(testlist, function(x) vapply(x, mean, numeric(1)))
#a more specialized function:
lmapply <- function(X, FUN, FUN.VALUE, simplify = FALSE){
out <- Map(function(x) vapply(x, FUN, FUN.VALUE), X)
if(simplify == TRUE){return(simplify2array(out))}
out
}
lmapply(testlist, mean, numeric(1))
## ------------------------------------------------------------------------
chisq.test2 <- function(x, y){
# Input
if (!is.numeric(x)) {
stop("x must be numeric")}
if (!is.numeric(y)) {
stop("y must be numeric")}
if (length(x) != length(y)) {
stop("x and y must have the same length")}
if (length(x) <= 1) {
stop("length of x must be greater one")}
if (any(c(x, y) < 0)) {
stop("all entries of x and y must be greater or equal zero")}
if (sum(complete.cases(x, y)) != length(x)) {
stop("there must be no missing values in x and y")}
if (any(is.null(c(x, y)))) {
stop("entries of x and y must not be NULL")}
# compute the theoretical value
m <- rbind(x, y)#the actual value
margin1 <- rowSums(m)
margin2 <- colSums(m)
n <- sum(m)
me <- tcrossprod(margin1, margin2) / n #the theoretical value
# Output
STATISTIC = sum((m - me)^2 / me)
dof <- (length(margin1) - 1) * (length(margin2) - 1)#degree of freedom
p <- pchisq(STATISTIC, df = dof, lower.tail = FALSE)
return(list(X_squared = STATISTIC, df = dof, `p-value` = p))
}
## ------------------------------------------------------------------------
a <- 11:15
b <- c(11,12.5,13.5,14.5,15.5)
m_test <- cbind(a,b)
identical(chisq.test(m_test),chisq.test2(a, b))
## ------------------------------------------------------------------------
chisq.test(m_test)
chisq.test2(a, b)
## ------------------------------------------------------------------------
chisq.test2c <- compiler::cmpfun(chisq.test2)
microbenchmark::microbenchmark(
chisq.test(m_test),
chisq.test2(a,b),
chisq.test2c(a,b)
)
## ------------------------------------------------------------------------
table2 <- function(x,y){
x_val <- unique(x)
y_val <- unique(y)
mat <- matrix(0L, length(x_val), length(y_val))
for (i in seq_along(x)) {
mat[which(x_val == x[[i]]), which(y_val == y[[i]])] <-
mat[which(x_val == x[[i]]), which(y_val == y[[i]])] + 1L
}
dimnames <- list(x_val, y_val)
names(dimnames) <- as.character(as.list(match.call())[-1]) # R has names for dimnames... :/
tab <- array(mat, dim = dim(mat), dimnames = dimnames)
class(tab) <- "table"
tab
}
## ------------------------------------------------------------------------
x <- c(1, 2, 3, 1, 2, 3)
y <- c(2, 3, 4, 2, 3, 4)
identical(table(x,y), table2(x,y))
## ------------------------------------------------------------------------
microbenchmark::microbenchmark(table(x,y), table2(x,y))
|
4a2d9ef96013ab44ca340d6e45f6ae9399d5d1b5
|
00a6e8378c523b048399b3a7438f0fe22a6f5d4e
|
/R/Pre_Analysis.R
|
4ec80f9f5d8af7b720e6a43f71188dcb00103f70
|
[] |
no_license
|
sxinger/DKD_PM_temporal
|
46c117401ff7757ab440b216e4074efd5cf0bcb4
|
dbbb35a2e18411422665958e27ecb1be7f675a62
|
refs/heads/master
| 2020-04-10T12:12:30.682542
| 2019-04-23T01:03:17
| 2019-04-23T01:03:17
| 161,014,841
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,405
|
r
|
Pre_Analysis.R
|
#### Pre-Analysis ####
rm(list=ls())
gc()
source("./R/util.R")
require_libraries(c( "Matrix"
,"pROC"
,"xgboost"
,"dplyr"
,"tidyr"
,"magrittr"
))
pat_tbl<-readRDS("./data2/pat_episode2.rda")
fact_stack<-readRDS("./data2/DKD_heron_facts_prep.rda")
#### eGFR update frequencies ####
eGFR_ud_freq<-pat_tbl %>%
group_by(PATIENT_NUM) %>%
dplyr::summarize(day_delta_mean = mean(DAY_SINCE_delta,na.rm=T),
day_delta_sd = sd(DAY_SINCE_delta,na.rm=T),
day_delta_median = median(DAY_SINCE_delta,na.rm=T),
day_delta_IQR = quantile(DAY_SINCE_delta,probs=0.75,na.rm=T)-quantile(DAY_SINCE_delta,probs=0.25,na.rm=T)) %>%
ungroup %>%
dplyr::summarize(overall_mean = mean(day_delta_mean,na.rm=T),
within_pat_sd = mean(day_delta_sd,na.rm=T),
acr_pat_sd = sd(day_delta_mean,na.rm=T),
overall_median = median(day_delta_median,na.rm=T),
within_pat_IQR = mean(day_delta_IQR,na.rm=T),
acr_pat_IQR = sd(day_delta_IQR,na.rm=T))
#### clinical fact intensity ####
fact_ud_freq<-fact_stack %>%
dplyr::select(PATIENT_NUM,VARIABLE_CATEG,day_from_dm) %>%
dplyr::mutate(day_from_dm = pmax(0,day_from_dm)) %>%
unique %>%
group_by(PATIENT_NUM,VARIABLE_CATEG) %>%
arrange(day_from_dm) %>%
dplyr::mutate(day_from_dm_lag = lag(day_from_dm,n=1L)) %>%
dplyr::mutate(day_from_dm_delta = day_from_dm - day_from_dm_lag) %>%
dplyr::summarize(day_delta_mean = mean(day_from_dm_delta,na.rm=T),
day_delta_sd = sd(day_from_dm_delta,na.rm=T),
day_delta_median = median(day_from_dm_delta,na.rm=T),
day_delta_IQR = quantile(day_from_dm_delta,probs=0.75,na.rm=T)-quantile(day_from_dm_delta,probs=0.25,na.rm=T)) %>%
ungroup %>%
group_by(VARIABLE_CATEG) %>%
dplyr::summarize(size=length(unique(PATIENT_NUM)),
overall_mean = mean(day_delta_mean,na.rm=T),
within_pat_sd = mean(day_delta_sd,na.rm=T),
acr_pat_sd = sd(day_delta_mean,na.rm=T),
overall_median = median(day_delta_median,na.rm=T),
within_pat_IQR = mean(day_delta_IQR,na.rm=T),
acr_pat_IQR = sd(day_delta_IQR,na.rm=T))
|
698da318fbba217e728f09db679999f242d59971
|
a020b9ef9587b5bc883f6283b1fa6ecd46f02676
|
/PCR.R
|
47dea103ab9b2b195a72a2470bb9c60bab67b4d6
|
[] |
no_license
|
mariondechallens/First-Internship
|
b279528581e0421f7488f934ee6f790d98514985
|
e15b6eb60893329cade8276ad8b3d9872fb375b1
|
refs/heads/master
| 2021-05-11T11:12:19.807367
| 2018-03-29T12:53:51
| 2018-03-29T12:53:51
| 118,123,150
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,939
|
r
|
PCR.R
|
.libPaths(c("C:/Marion/Rstudio/packages_install",.libPaths()))
library(pls)
source(file = "C:/Marion/T2S_LabStatistics/MOTI_NTS_analysis/MOTI_regressions/moti_reg_facto.R") ##for functions
##trying principal components regression
data<-read.table("C:/Marion/T2S_LabStatistics/SOA/total_cleaned_data.csv",header=TRUE,sep=";", dec=",")
table<-na.omit(regression(data,pval)$dataframe[,-1]) #on enlève la colonne date
##creating training and testing sets
set.seed(1)
train<-sample(seq(nrow(table)),round(0.7*nrow(table)),replace=FALSE)
traindata<-table[train,]
testdata<-table[-train,]
error<-rep(0,nrow(traindata))
for (i in 1:nrow(traindata)){
error[i]<-error[i]+mean(traindata$Value...NTS)-traindata$Value...NTS[i]
}
SST<-sum(error^2) ##total sum of squares
pcr.fit <- pcr(Value...NTS ~., data=traindata, validation="CV", ncomp=20)
summary(pcr.fit)
validationplot(pcr.fit, val.type='MSEP',legendpos="top",main="Prediction error") ##7 or 13 components is the best
axis(1,1:20)
validationplot(pcr.fit, val.type='R2',legendpos="top")
nbcompo<-13
pcr.pred1 <- predict(pcr.fit, traindata, ncomp=nbcompo)
MSE<-sum((pcr.pred1 - traindata$Value...NTS)^2)
plot(traindata$Value...NTS,ylab="Value NTS",xlab="Business days",type='l',col="red",main=paste0("Principal components regression model"))
lines(pcr.pred1,col="blue")
legend("bottomleft",legend=c("Real values","Predicted values"),fill=c("red","blue"),border=c("red","blue"))
pcr.pred <- predict(pcr.fit, testdata, ncomp=nbcompo)
MSEpred<-sum((pcr.pred - testdata$Value...NTS)^2)
plot(testdata$Value...NTS,ylab="Value NTS",xlab="Business days",type='l',col="red",main=paste0("Principal components regression model"))
lines(pcr.pred,col="blue")
legend("bottomleft",legend=c("Real values","Predicted values"),fill=c("red","blue"),border=c("red","blue"))
r2pcr<-1-MSE/SST
r2adjpcr<-1-(1-r2pcr)*(nrow(traindata)-1)/(nrow(traindata)-1-nbcompo)
|
8c52d288408852cf960128db0a7d3c95b79a5c04
|
89d5a7062a6991a49efcd21313c9f2daeb26261c
|
/R/tidy_cashflows.R
|
2f018765b311613601e2f814fc739e398d2b3274
|
[] |
no_license
|
anttsou/qmj
|
3786eb2bdff69831ae6e4bdda9d37d9c03af27a6
|
ffc56ea6d7a00e8f2f958df9c44a6008211882d3
|
refs/heads/master
| 2021-01-19T00:47:23.680802
| 2016-07-10T21:48:59
| 2016-07-10T21:48:59
| 29,163,706
| 10
| 7
| null | 2016-01-10T23:36:58
| 2015-01-13T00:08:25
|
R
|
UTF-8
|
R
| false
| false
| 1,442
|
r
|
tidy_cashflows.R
|
#' Makes raw cash flow data usable and readable.
#'
#' Processes raw cash flow data from quantmod to return
#' a tidied data frame. Raw cash flow data must be formatted
#' in a list such that every element is a data frame or
#' matrix containing quantmod data.
#'
#' \code{tidy_cashflows} produces a data frame that is 'tidy'
#' or more readily readable by a user and usable by other
#' functions within this package.
#'
#' @param x A list of raw cash flow data produced from quantmod
#'
#' @return Returns a data set that's been 'tidied' up for use
#' by other functions in this package.
#'
#' @seealso \code{\link{get_info}}
#' @seealso \code{\link{tidyinfo}}
#' @seealso \code{\link{tidy_balancesheets}}
#' @seealso \code{\link{tidy_incomestatements}}
tidy_cashflows <- function(x) {
## Calls tidy_helper to construct a list of data.frames and merges the list elements into one large data.frame
cashflows <- do.call(rbind, lapply(x, tidy_helper))
## Remove all rows that are solely NAs.
cashflows <- cashflows[rowSums(!is.na(cashflows)) >= 1, ]
rownames(cashflows) <- NULL
## These are the categories we expect from the raw data, with abbreviations for each of the variables found in the cash flows
names(cashflows) <- c("ticker", "year", "order", "NI.SL", "DP.DPL", "AM", "DT", "NCI", "CWC", "COA", "CX", "OICF", "CIA", "FCFI", "TCDP", "ISN", "IDN",
"CFA", "FEE", "NCC", "CIP", "CTP")
cashflows
}
|
e1514d6084b61bfa8bc4337cd1958739448d8fe3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Evapotranspiration/examples/ET.GrangerGray.Rd.R
|
b3c96257e5ee107887073b2066009ab3dbef3aa3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 558
|
r
|
ET.GrangerGray.Rd.R
|
library(Evapotranspiration)
### Name: ET.GrangerGray
### Title: Granger-Gray Formulation
### Aliases: ET.GrangerGray
### Keywords: Granger-Gray evapotranspiration open-water evaporation
### potential evapotranspiration
### ** Examples
# Use processed existing data set and constants from kent Town, Adelaide
data("processeddata")
data("constants")
# Call ET.GrangerGray under the generic function ET
results <- ET.GrangerGray(data, constants, ts="daily",
solar="sunshine hours", windfunction_ver=1948, alpha=0.23,
message="yes", save.csv="yes")
|
c74637ecaaf4610dcd9aa4e79a3df6ebe7493f45
|
cb4b8d511a14f1655120bb8737266296c5e46059
|
/R/birds/GLLVMs/gllvm_treatments_again_nomid.R
|
26a5f6ff7ef5df936c6b139e7438287bd607f644
|
[] |
no_license
|
Josh-Lee1/JL_honours
|
40361e2f8b78fac9676ff32a8e0ce7a0603f6152
|
db6792a039d824fdb518f9e06c3cc27ecca6da8a
|
refs/heads/master
| 2023-03-29T22:28:19.500012
| 2021-04-15T04:40:20
| 2021-04-15T04:40:20
| 295,877,409
| 0
| 0
| null | 2021-03-16T06:17:06
| 2020-09-16T00:02:18
|
HTML
|
UTF-8
|
R
| false
| false
| 10,989
|
r
|
gllvm_treatments_again_nomid.R
|
library(gllvm)
library(tidyverse)
library(lattice)
library(janitor)
library(ggpubr)
library(ggplotify)
#read in data created in Traits.R
df <- read.csv("Data/Processed/ALLdata.csv") %>%
select(-c(X)) %>%
mutate(Burnt = Fire =="Burnt") %>%
mutate(Rainforest = Formation =="Rainforest")
#make some string changes so it will work with gllvm
df$Burnt<- as.integer(as.logical(df$Burnt))
df$Rainforest<- as.integer(as.logical(df$Rainforest))
df$location.id <- as.numeric(factor(df$Location, levels=unique(df$Location)))
#split into 4 dfs
br <- df %>%
filter(Treatment == "RainforestBurnt") %>%
select(-c(id)) %>%
group_by(Species) %>%
mutate(total = sum(Count)) %>%
filter(total > 0) %>%
ungroup() %>%
as.data.frame()
br$id <- as.integer(as.factor(br$Site))
ur <- df %>%
filter(Treatment == "RainforestUnburnt") %>%
select(-c(id))%>%
group_by(Species) %>%
mutate(total = sum(Count)) %>%
filter(total > 0)%>%
ungroup()
ur$id <- as.integer(as.factor(ur$Site))
bs <- df %>%
filter(Treatment == "Dry SclerophyllBurnt") %>%
select(-c(id))%>%
group_by(Species) %>%
mutate(total = sum(Count)) %>%
filter(total > 0)%>%
ungroup()
bs$id <- as.integer(as.factor(bs$Site))
us <- df %>%
filter(Treatment == "Dry SclerophyllUnburnt") %>%
select(-c(id))%>%
group_by(Species) %>%
mutate(total = sum(Count)) %>%
filter(total > 0)%>%
ungroup()
us$id <- as.integer(as.factor(us$Site))
#format each df for 4thCM, then run in gllvm
##br###################################################################
Xbr <- br %>%
dplyr::select (Burnt, location.id, Rainforest, id, Litter.Depth, Litter.Cover, Understory, Mid.height, Canopy.Cover) %>%
dplyr::distinct() %>%
select(-c(id))
as.matrix()
ybr <- br %>%
dplyr::select (Species, id, Count) %>%
tidyr::pivot_wider(values_from = Count, names_from = Species, id_cols = id)%>%
dplyr::select (-id) %>%
as.matrix()
TRbr <- br %>%
dplyr::select (Species,
X99_Body_mass_average_8,
X163_Food_Fruit_10,
X164_Food_Nectar_or_pollen_10,
X165_Food_Seeds_10,
X166_Food_Foliage_or_herbs_10,
X168_Food_Terrestrial_invertebrates_10,
X169_Food_Terrestrial_vertebrates_10) %>%
dplyr::distinct() %>%
dplyr::select(-Species) %>%
rename(Frugivore = X163_Food_Fruit_10,
Nectarivore = X164_Food_Nectar_or_pollen_10,
Granivore = X165_Food_Seeds_10,
Folivore = X166_Food_Foliage_or_herbs_10,
Insectivore = X168_Food_Terrestrial_invertebrates_10,
Carnivore = X169_Food_Terrestrial_vertebrates_10,
Size = X99_Body_mass_average_8) %>%
as.matrix()
#running Burnt Rainforest
fit_4thbr <- gllvm(ybr, Xbr, TRbr, family = "negative.binomial", num.lv = 2,
formula = ~ (Litter.Depth + Litter.Cover + Understory + Canopy.Cover) +
(Litter.Depth + Litter.Cover + Understory + Canopy.Cover) :
(Frugivore + Nectarivore + Granivore + Folivore + Insectivore + Carnivore + Size), seed = 123,
row.eff = "random", control.start =list(n.init = 3, jitter.var = 0.01),
randomX = ~ Litter.Depth + Litter.Cover + Understory + Canopy.Cover)
br_coef<- coefplot(fit_4thbr, mar = c(4, 11, 1, 1), cex.ylab = 0.8)
fourthbr <- fit_4thbr$fourth.corner
a <- max( abs(fourthbr) )
colort <- colorRampPalette(c("blue", "white", "red"))
plot.4thbr <- levelplot((as.matrix(fourthbr)), xlab = "Environmental Variables",
ylab = "Species traits", col.regions = colort(100), cex.lab = 1.3,
at = seq(-a, a, length = 100), scales = list(x = list(rot = 45)))
plot.4thbr
##bs###################################################################
Xbs <- bs %>%
dplyr::select (Burnt, location.id, Rainforest, id, Litter.Depth, Litter.Cover, Understory, Mid.height, Canopy.Cover) %>%
dplyr::distinct() %>%
select(-c(id)) %>%
as.matrix()
ybs <- bs %>%
dplyr::select (Species, id, Count) %>%
tidyr::pivot_wider(values_from = Count, names_from = Species, id_cols = id)%>%
dplyr::select (-id) %>%
as.matrix()
TRbs <- bs %>%
dplyr::select (Species,
X99_Body_mass_average_8,
X163_Food_Fruit_10,
X164_Food_Nectar_or_pollen_10,
X165_Food_Seeds_10,
X166_Food_Foliage_or_herbs_10,
X168_Food_Terrestrial_invertebrates_10,
X169_Food_Terrestrial_vertebrates_10) %>%
dplyr::distinct() %>%
dplyr::select(-Species) %>%
rename(Frugivore = X163_Food_Fruit_10,
Nectarivore = X164_Food_Nectar_or_pollen_10,
Granivore = X165_Food_Seeds_10,
Folivore = X166_Food_Foliage_or_herbs_10,
Insectivore = X168_Food_Terrestrial_invertebrates_10,
Carnivore = X169_Food_Terrestrial_vertebrates_10,
Size = X99_Body_mass_average_8) %>%
as.matrix()
#running Burnt Sclerophyll
fit_4thbs <- gllvm(ybs, Xbs, TRbs, family = "negative.binomial", num.lv = 2,
formula = ~ (Litter.Depth + Litter.Cover + Understory + Canopy.Cover) +
(Litter.Depth + Litter.Cover + Understory + Canopy.Cover) :
(Frugivore + Nectarivore + Granivore + Folivore + Insectivore + Carnivore + Size), seed = 123,
row.eff = "random", control.start =list(n.init = 3, jitter.var = 0.01),
randomX = ~ Litter.Depth + Litter.Cover + Understory + Canopy.Cover)
bs_coef<- coefplot(fit_4thbs, mar = c(4, 11, 1, 1), cex.ylab = 0.8)
fourthbs <- fit_4thbs$fourth.corner
b <- max( abs(fourthbs) )
colort <- colorRampPalette(c("blue", "white", "red"))
plot.4thbs <- levelplot((as.matrix(fourthbs)), xlab = "Environmental Variables",
ylab = "Species traits", col.regions = colort(100), cex.lab = 1.3,
at = seq(-b, b, length = 100), scales = list(x = list(rot = 45)))
plot.4thbs
##ur###################################################################
Xur <- ur %>%
dplyr::select (Burnt, location.id, Rainforest, id, Litter.Depth, Litter.Cover, Understory, Mid.height, Canopy.Cover) %>%
dplyr::distinct() %>%
select(-c(id)) %>%
as.matrix()
yur <- ur %>%
dplyr::select (Species, id, Count) %>%
tidyr::pivot_wider(values_from = Count, names_from = Species, id_cols = id)%>%
dplyr::select (-id) %>%
as.matrix()
TRur <- ur %>%
dplyr::select (Species,
X99_Body_mass_average_8,
X163_Food_Fruit_10,
X164_Food_Nectar_or_pollen_10,
X165_Food_Seeds_10,
X166_Food_Foliage_or_herbs_10,
X168_Food_Terrestrial_invertebrates_10,
X169_Food_Terrestrial_vertebrates_10) %>%
dplyr::distinct() %>%
dplyr::select(-Species) %>%
rename(Frugivore = X163_Food_Fruit_10,
Nectarivore = X164_Food_Nectar_or_pollen_10,
Granivore = X165_Food_Seeds_10,
Folivore = X166_Food_Foliage_or_herbs_10,
Insectivore = X168_Food_Terrestrial_invertebrates_10,
Carnivore = X169_Food_Terrestrial_vertebrates_10,
Size = X99_Body_mass_average_8) %>%
as.matrix()
#running Unburnt Rainforest
fit_4thur <- gllvm(yur, Xur, TRur, family = "negative.binomial", num.lv = 2,
formula = ~ (Litter.Depth + Litter.Cover + Understory + Canopy.Cover) +
(Litter.Depth + Litter.Cover + Understory + Canopy.Cover) :
(Frugivore + Nectarivore + Granivore + Folivore + Insectivore + Carnivore + Size), seed = 123,
row.eff = "random", control.start =list(n.init = 3, jitter.var = 0.01),
randomX = ~ Litter.Depth + Litter.Cover + Understory + Canopy.Cover)
ur_coef<- coefplot(fit_4thur, mar = c(4, 11, 1, 1), cex.ylab = 0.8)
fourthur <- fit_4thur$fourth.corner
c <- max( abs(fourthur) )
colort <- colorRampPalette(c("blue", "white", "red"))
plot.4thur <- levelplot((as.matrix(fourthur)), xlab = "Environmental Variables",
ylab = "Species traits", col.regions = colort(100), cex.lab = 1.3,
at = seq(-c, c, length = 100), scales = list(x = list(rot = 45)))
plot.4thur
##us###################################################################
Xus <- us %>%
dplyr::select (Burnt, location.id, Rainforest, id, Litter.Depth, Litter.Cover, Understory, Mid.height, Canopy.Cover) %>%
dplyr::distinct() %>%
select(-c(id)) %>%
as.matrix()
yus <- us %>%
dplyr::select (Species, id, Count) %>%
tidyr::pivot_wider(values_from = Count, names_from = Species, id_cols = id)%>%
dplyr::select (-id) %>%
as.matrix()
TRus <- us %>%
dplyr::select (Species,
X99_Body_mass_average_8,
X163_Food_Fruit_10,
X164_Food_Nectar_or_pollen_10,
X165_Food_Seeds_10,
X166_Food_Foliage_or_herbs_10,
X168_Food_Terrestrial_invertebrates_10,
X169_Food_Terrestrial_vertebrates_10) %>%
dplyr::distinct() %>%
dplyr::select(-Species) %>%
rename(Frugivore = X163_Food_Fruit_10,
Nectarivore = X164_Food_Nectar_or_pollen_10,
Granivore = X165_Food_Seeds_10,
Folivore = X166_Food_Foliage_or_herbs_10,
Insectivore = X168_Food_Terrestrial_invertebrates_10,
Carnivore = X169_Food_Terrestrial_vertebrates_10,
Size = X99_Body_mass_average_8) %>%
as.matrix()
#running Unburnt Rainforest
fit_4thus <- gllvm(yus, Xus, TRus, family = "negative.binomial", num.lv = 2,
formula = ~ (Litter.Depth + Litter.Cover + Understory + Canopy.Cover) +
(Litter.Depth + Litter.Cover + Understory + Canopy.Cover) :
(Frugivore + Nectarivore + Granivore + Folivore + Insectivore + Carnivore + Size), seed = 123,
row.eff = "random", control.start =list(n.init = 3, jitter.var = 0.01),
randomX = ~ Litter.Depth + Litter.Cover + Understory + Canopy.Cover)
us_coef<- coefplot.gllvm(fit_4thus, mar = c(4, 11, 1, 1), cex.ylab = 0.8)
fourthus <- fit_4thus$fourth.corner
d <- max( abs(fourthus) )
colort <- colorRampPalette(c("blue", "white", "red"))
plot.4thus <- levelplot((as.matrix(fourthus)), xlab = "Environmental Variables",
ylab = "Species traits", col.regions = colort(100), cex.lab = 1.3,
at = seq(-d, d, length = 100), scales = list(x = list(rot = 45)))
plot.4thus
#put into a 4 panel figure
#a-bR, b-ur, c-bs, d-us
as.ggplot(plot.4thbr) + ggtitle("a)")
as.ggplot(plot.4thur)+ ggtitle("b)")
as.ggplot(plot.4thbs)+ ggtitle("c)")
as.ggplot(plot.4thus)+ ggtitle("d)")
ggarrange(plot.4thbr, plot.4thur, plot.4thbs, plot.4thus,
labels = c("a", "b", "c", "d"),
ncol = 2, nrow = 2)
|
3fa3e004f6976aa1253ad1e82f4c21167ecf23d3
|
a3eda6ec1641566de1546df9113320ed68e8a33b
|
/1205 Crime Shiny R - Jue.R
|
5161ad47190941d6fe9cff8bf48689784702dc1f
|
[
"Apache-2.0"
] |
permissive
|
leafree/LA_City_USCGroup16-master
|
8e3ac80def8151a7a68aed4b683011fc13fbd652
|
099473346b5c1f184f365e29f843f29e39412a1e
|
refs/heads/master
| 2021-08-23T22:17:13.685084
| 2017-12-06T20:44:25
| 2017-12-06T20:44:25
| 110,632,279
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,232
|
r
|
1205 Crime Shiny R - Jue.R
|
library(ggplot2)
library(dplyr)
library(ggmap)
library(stringr)
library(tidyr)
library(shiny)
crime=read.csv("crime.csv")
la_map = get_map(location = "Los Angeles", zoom = 10)
crime$VICTIM.DESCENT = factor(ifelse(crime$VICTIM.DESCENT == "B", "Black",
ifelse(crime$VICTIM.DESCENT == "H", "Hispanic",
ifelse(crime$VICTIM.DESCENT=="W","White",
ifelse(crime$VICTIM.DESCENT=="A","Asian","Others")))))
crime$VICTIM.DESCENT=ordered(crime$VICTIM.DESCENT, levels=c("Asian","White","Black","Hispanic","Others"))
ui <- fluidPage(
tabsetPanel(
# titlePanel("hhhh"),
tabPanel("Crime Type",
sidebarPanel(
helpText("Crime Types in Los Angeles"),
checkboxGroupInput(inputId = "type",
label = "Choose a crime type to display",
choices = list ("AGGRAVATED ASSAULT", "SIMPLE ASSAULT", "ROBBERY","THEFT","RAPE","OTHERS"),
selected = "AGGRAVATED ASSAULT")),
mainPanel(
verticalLayout(
h2("Crime Type in LA", align = "center"),
plotOutput(outputId = "crime_type"),
splitLayout(
plotOutput(outputId = "type_month"),
plotOutput(outputId = "type_hour")),
splitLayout(
plotOutput(outputId = "age_type"),
plotOutput(outputId = "gender_type"),
plotOutput(outputId = "ethnicity_type"))
))
),
tabPanel("Crime Premise",
sidebarPanel(
helpText("Crime Premise in Los Angeles"),
checkboxGroupInput(inputId = "premise",
label = "Choose a crime premise to display",
choices = list ("STREET", "SIDEWALK", "PARKING LOT","PARK/PLAYGROUND","DWELLING","OTHERS"),
selected = "STREET")),
mainPanel(
verticalLayout(
h2("Crime Premise in LA", align = "center"),
plotOutput(outputId = "crime_premise"),
splitLayout(
plotOutput(outputId = "premise_month"),
plotOutput(outputId = "premise_hour")),
splitLayout(
plotOutput(outputId = "age_premise"),
plotOutput(outputId = "gender_premise"),
plotOutput(outputId = "ethnicity_premise"))
))
),
tabPanel("Crime Weapon",
sidebarPanel(
helpText("Weapon used in Crime"),
checkboxGroupInput(inputId = "weapon",
label = "Choose a kind of weapon to display",
choices = list ("STRONG-ARM", "KNIFE", "STICK","GUN","PIPE","OTHERS"),
selected = "STRONG-ARM")),
mainPanel(
verticalLayout(
h2("Weapon used in Crime", align = "center"),
plotOutput(outputId = "crime_weapon"),
splitLayout(
plotOutput(outputId = "weapon_month"),
plotOutput(outputId = "weapon_hour")),
splitLayout(
plotOutput(outputId = "age_weapon"),
plotOutput(outputId = "gender_weapon"),
plotOutput(outputId = "ethnicity_weapon"))
))
)
)
)
server=function(input,output) {
output$crime_type = renderPlot({
crime_type_shiny = reactive({
crime %>%
filter(crime_type %in% input$type)})
ggmap(la_map) +
geom_point(data=crime_type_shiny(),
aes(x=LONGITUDE,
y=LATITUDE,color=crime_type))+
theme(legend.position = "none")+
theme_void()
})
output$type_month = renderPlot ({
crime_type_shiny = reactive({
crime_type_shiny = crime %>%
filter(crime_type %in% input$type)})
ggplot(crime_type_shiny(),aes(as.factor(newdate)))+
geom_histogram(stat="count",fill="#009E73")+
geom_line(stat="count",group=1,adjust=5,color="#D55E00")+
ggtitle("Month Distribution of Crime Occurred")+
xlab("Month")+
theme_bw()
})
output$type_hour = renderPlot ({
crime_type_shiny = reactive({
crime_type_shiny = crime %>%
filter(crime_type %in% input$type)})
ggplot(crime_type_shiny(),aes(as.factor(H)))+
geom_histogram(stat="count",fill="#009E73")+
geom_line(stat="count",group=1,adjust=5,color="#D55E00")+
ggtitle("Hour Distribution of Crime Occurred")+
xlab("Hour")+
theme_bw()
})
output$age_type = renderPlot ({
crime_type_shiny = reactive({
crime_type_shiny = crime %>%
filter(crime_type %in% input$type)})
ggplot(crime_type_shiny(),aes(x=VICTIM.AGE))+
geom_histogram(aes(y=..density..), fill="#009E73",binwidth=3)+
geom_density(aes(y=..density..),color="#D55E00")+
ggtitle("Age Distribution of the Victims")+
xlab("Age of Victim")+
ylab("")+
theme_bw()
})
output$gender_type = renderPlot ({
crime_type_shiny = reactive({
crime_type_shiny = crime %>%
filter(crime_type %in% input$type)})
ggplot(crime_type_shiny(),aes(x=VICTIM.SEX,fill=VICTIM.SEX))+
geom_histogram(stat="count")+
xlab("Gender")+
ggtitle("Gender of the Victims")+
theme_bw()+
theme(legend.position="none")
})
output$ethnicity_type = renderPlot ({
crime_type_shiny = reactive({
crime_type_shiny = crime %>%
filter(crime_type %in% input$type)})
ggplot(crime_type_shiny(),aes(x=VICTIM.DESCENT))+
geom_histogram(stat="count",fill="#009E73")+
xlab("Ethnicity of the Victims")+
ggtitle("Ethnicity Distribution of the Victims")+
theme_bw()
})
output$crime_premise = renderPlot ({
crime_premise_shiny = reactive({
crime_premise_shiny = crime %>%
filter(crime_premise %in% input$premise)})
ggmap(la_map) +
geom_point(data=crime_premise_shiny(),
aes(x=LONGITUDE,
y=LATITUDE,color=crime_premise))+
theme(legend.position = "none")
})
output$premise_month = renderPlot ({
crime_premise_shiny = reactive({
crime_premise_shiny = crime %>%
filter(crime_premise %in% input$premise)})
ggplot(crime_premise_shiny(),aes(as.factor(newdate)))+
geom_histogram(stat="count",fill="#009E73")+
geom_line(stat="count",group=1,adjust=5,color="#D55E00")+
ggtitle("Month Distribution of Crime Occurred")+
xlab("Month")+
theme_bw()
})
output$premise_hour = renderPlot ({
crime_premise_shiny = reactive({
crime_premise_shiny = crime %>%
filter(crime_premise %in% input$premise)})
ggplot(crime_premise_shiny(),aes(as.factor(H)))+
geom_histogram(stat="count",fill="#009E73")+
geom_line(stat="count",group=1,adjust=5,color="#D55E00")+
ggtitle("Hour Distribution of Crime Occurred")+
xlab("Hour")+
theme_bw()
})
output$age_premise = renderPlot ({
crime_premise_shiny = reactive({
crime_premise_shiny = crime %>%
filter(crime_premise %in% input$premise)})
ggplot(crime_premise_shiny(),aes(x=VICTIM.AGE))+
geom_histogram(aes(y=..density..), fill="#009E73",binwidth=3)+
geom_density(aes(y=..density..),color="#D55E00")+
ggtitle("Age Distribution of the Victims")+
xlab("Age of Victim")+
ylab("")+
theme_bw()
})
output$gender_premise = renderPlot ({
crime_premise_shiny = reactive({
crime_premise_shiny = crime %>%
filter(crime_premise %in% input$premise)})
ggplot(crime_premise_shiny(),aes(x=VICTIM.SEX,fill=VICTIM.SEX))+
geom_histogram(stat="count")+
xlab("Gender")+
ggtitle("Gender of the Victims")+
theme_bw()+
theme(legend.position="none")
})
output$ethnicity_premise = renderPlot ({
crime_premise_shiny = reactive({
crime_premise_shiny = crime %>%
filter(crime_premise %in% input$premise)})
ggplot(crime_premise_shiny(),aes(x=VICTIM.DESCENT))+
geom_histogram(stat="count",fill="#009E73")+
xlab("Ethnicity of the Victims")+
ggtitle("Ethnicity Distribution of the Victims")+
theme(legend.position="none")+
theme_bw()
})
output$crime_weapon = renderPlot ({
crime_weapon_shiny = reactive({
crime_weapon_shiny = crime %>%
filter(crime_weapon %in% input$weapon)})
ggmap(la_map) +
geom_point(data=crime_weapon_shiny(),
aes(x=LONGITUDE,
y=LATITUDE,color=crime_weapon))+
theme(legend.position = "none")
})
output$weapon_month = renderPlot ({
crime_weapon_shiny = reactive({
crime_weapon_shiny = crime %>%
filter(crime_weapon %in% input$weapon)})
ggplot(crime_weapon_shiny(),aes(as.factor(newdate)))+
geom_histogram(stat="count",fill="#009E73")+
geom_line(stat="count",group=1,adjust=5,color="#D55E00")+
ggtitle("Month Distribution of Crime Occurred")+
xlab("Month")+
theme_bw()
})
output$weapon_hour = renderPlot ({
crime_weapon_shiny = reactive({
crime_weapon_shiny = crime %>%
filter(crime_weapon %in% input$weapon)})
ggplot(crime_weapon_shiny(),aes(as.factor(H)))+
geom_histogram(stat="count",fill="#009E73")+
geom_line(stat="count",group=1,adjust=5,color="#D55E00")+
ggtitle("Hour Distribution of Crime Occurred")+
xlab("Hour")+
theme_bw()
})
output$age_weapon = renderPlot ({
crime_weapon_shiny = reactive({
crime_weapon_shiny = crime %>%
filter(crime_weapon %in% input$weapon)})
ggplot(crime_weapon_shiny(),aes(x=VICTIM.AGE))+
geom_histogram(aes(y=..density..), fill="#009E73",binwidth=3)+
geom_density(aes(y=..density..),color="#D55E00")+
ggtitle("Age Distribution of the Victims")+
xlab("Age of Victim")+
ylab("")+
theme_bw()
})
output$gender_weapon = renderPlot ({
crime_weapon_shiny = reactive({
crime_weapon_shiny = crime %>%
filter(crime_weapon %in% input$weapon)})
ggplot(crime_weapon_shiny(),aes(x=VICTIM.SEX,fill=VICTIM.SEX))+
geom_histogram(stat="count")+
xlab("Gender")+
ggtitle("Gender of the Victims")+
theme_bw()+
theme(legend.position="none")
})
output$ethnicity_weapon = renderPlot ({
crime_weapon_shiny = reactive({
crime_weapon_shiny = crime %>%
filter(crime_weapon %in% input$weapon)})
ggplot(crime_weapon_shiny(),aes(x=VICTIM.DESCENT))+
geom_histogram(stat="count",fill="#009E73")+
xlab("Ethnicity of the Victims")+
ggtitle("Ethnicity Distribution of the Victims")+
theme_bw()
})
}
shinyApp(server = server, ui=ui)
|
5522b1d9d51e09cbbd03016a7067d000513ca7bc
|
257b5303c5276cf90bc5110c1785cc144076031f
|
/code/11b_ldsc_cell_type_enrichment_makeRawPDF.R
|
03719042735455be34a6cca900b3c99caba4de6b
|
[] |
no_license
|
xiaotianliao/mpn-gwas
|
65bb7cc1f37b9c4af98a776128b7d91d06e4e5db
|
fb271abe98a43e140c2cdf8c200d556a477e00e0
|
refs/heads/master
| 2023-08-22T16:06:14.066422
| 2020-10-14T15:50:09
| 2020-10-14T15:50:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,502
|
r
|
11b_ldsc_cell_type_enrichment_makeRawPDF.R
|
library(tidyverse)
library(BuenColors)
# Choose Color Palette
THE_PALETTE <- jdb_palette("solar_rojos")
# Import data
enrichments <- fread("../data/ldsc/MPN_meta_finngen_r4_ukid_heme_1000GP3_UK10K.cell_type_results.txt") %>%
dplyr::rename(pvalue = "Coefficient_P_value")
# Set up coordinates
cellCoordsDF <- data.frame(
CellLabel = c("HSC", "MPP", "LMPP", "CLP", "GMP-A", "GMP-B", "GMP-C", "CMP", "MEP", "NK", "CD4", "CD8", "B", "pDC", "Mono", "mDC", "Ery", "Mega"),
x = c( 0, 0, -5, -5, 0, -2, 2, 5, 7, -10, -8, -6, -4, -2, 2, 4, 8, 10),
y = c(10, 8, 7, 5, 6, 5, 5, 7, 5, 2, 2, 2, 2, 2, 2, 2, 2, 2)
)
#--------------------------
# gchromVAR plots
#--------------------------
makeCVplot <- function(plottrait){
df <- enrichments
plotdf <- merge(cellCoordsDF, enrichments,
by.x = "CellLabel", by.y = "Name")
p1 <- ggplot(plotdf, aes(x = x, y = y, color = -log10(pvalue))) +
geom_point(size = 11) + pretty_plot() +
geom_text(aes(label=CellLabel),hjust=0.5, vjust=3) +
scale_color_gradientn(colors = THE_PALETTE, name = "-log10(pvalue)") +
scale_y_continuous(limits = c(0, 11)) + ggtitle(plottrait)
ggsave(p1, filename = paste0("../output/ldsc/rawPDFs/", plottrait, ".pdf"),
height = 8, width = 10)
return(plottrait)
}
plot_out <- makeCVplot("MPN_r4_celltype_enrichments")
|
6cdf427a2aef6dcea3b47187aae16e8a3639ed9b
|
442b7c5546eafe421e6930a1e57f76d8bfa5b97f
|
/test_spike.R
|
c381e8910ab75d7b08972267d30bdfe10199a0ee
|
[] |
no_license
|
catsch/TEST_RT_QC_CHLA
|
72b08152222fe1832bd96747b0ea249f0be6913b
|
443385041bcbe5d07696ab5b226133f991e96e9a
|
refs/heads/main
| 2022-12-24T20:45:15.596265
| 2020-10-09T15:28:59
| 2020-10-09T15:28:59
| 302,668,086
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,631
|
r
|
test_spike.R
|
##############################################################################
# Test of the Spike test
# Catherine Schmechtig
# September 2020
##############################################################################
library(ncdf4)
library(stringr)
source("./read_VSS.R")
source("./RunningFilter.R")
source("./READ_CTD.R")
source("./MLD.R")
uf=commandArgs()
mission <- uf[2]
liste_to_do=read.table("./liste_all",sep=" ",header=FALSE)
# List of the file to process
LIST_nc=liste_to_do$V1
print(LIST_nc)
# We are working on CHLA
PARAM_STRING=str_pad("CHLA",64,"right")
# text_file for the whole float
path_out_txt=paste(mission,".txt",sep="")
for (IDnc in LIST_nc) {
# Open the B file
filenc=nc_open(IDnc,readunlim=FALSE,write=FALSE)
# Get the corresponding C file name
file_in_C=str_replace(IDnc,"/B","/")
# if B and C are not in the same mode
if (!file.exists(file_in_C)) file_in_C=str_replace(file_in_C,"profiles/R","profiles/D")
if (!file.exists(file_in_C)) file_in_C=str_replace(file_in_C,"profiles/D","profiles/R")
# open the C file
filenc_C=nc_open(file_in_C,readunlim=FALSE,write=FALSE)
###################################################################################
#### Read the B file PARAMETER to check the availability of CHLA
###################################################################################
PARAMETER=ncvar_get(filenc,"PARAMETER")
index_param=which(PARAMETER == PARAM_STRING , arr.ind=TRUE)
### Very IMPORTANT
### Next iteration if the parameter is not in the file
if ( length(index_param)==0 ) {
next
}
###################################################################################
#### Read the C file and estimate the MLD (for quenching test)
###################################################################################
#### READ Core file
CTD=read_CTD(filenc_C)
# we get : CTD$PRES
# : CTD$PSAL
# : CTD$TEMP
#### Estimation of the MLD
MLD=CALC_MLD(CTD$PRES, CTD$PSAL , CTD$TEMP)
if ( is.na(MLD) ) {
next
}
###################################################################################
#### Read the B file
###################################################################################
### studied Profile
i_param_param =index_param[1]
i_prof_param = index_param[3]
### Read the BFILE
PRES=ncvar_get(filenc,"PRES")
CHLA=ncvar_get(filenc,"CHLA")
CYCLE_NUMBER=unique(ncvar_get(filenc,"CYCLE_NUMBER"))
### working only on the studied profile
PRES_CHLA=PRES[!is.na(CHLA)]
CHLA_CHLA=CHLA[!is.na(CHLA)]
MED_CHLA=rep(NA,length(CHLA_CHLA))
SPIKE_CHLA_C=rep(FALSE,length(CHLA_CHLA))
SPIKE_CHLA_A=rep(FALSE,length(CHLA_CHLA))
RESOLUTION=read_VSS(filenc,i_prof_param)
### Let s calculate all the different median filters 5,7,11
MED_CHLA_5=RunningFilter(2,CHLA_CHLA,na.fill=T, ends.fill=T, Method="Median")
MED_CHLA_7=RunningFilter(3,CHLA_CHLA,na.fill=T, ends.fill=T, Method="Median")
MED_CHLA_11=RunningFilter(5,CHLA_CHLA,na.fill=T, ends.fill=T, Method="Median")
### Calculate the profile of MED_CHLA
for (i in seq(1,length(CHLA_CHLA))) {
if ( RESOLUTION[i] < 1 ) MED_CHLA[i]=MED_CHLA_11[i]
if ( (RESOLUTION[i] >= 1) & (RESOLUTION[i] < 3) ) MED_CHLA[i]=MED_CHLA_7[i]
if ( RESOLUTION[i] >= 3 ) MED_CHLA[i]=MED_CHLA_5[i]
}
### CALCULATE the RESIDUALS
### Christina's proposal
RESID_C=abs(CHLA_CHLA-MED_CHLA)
### Previous version sliding median of 5
RESID_A=abs(CHLA_CHLA-MED_CHLA_5)
### Calculate the percentile of both methods
Q10_C=rep(quantile(RESID_C,0.90),length(CHLA_CHLA))
Q10_A=rep(2*quantile(RESID_A,0.90),length(CHLA_CHLA))
### Spike
SPIKE_CHLA_C[which(RESID_C>Q10_C)]=TRUE
SPIKE_CHLA_A[which(RESID_A>Q10_A)]=TRUE
### Nb spikes
NB_SPIKE_C=length(CHLA_CHLA[SPIKE_CHLA_C])
NB_SPIKE_A=length(CHLA_CHLA[SPIKE_CHLA_A])
### Quenching correction
### max de la CHLORO despike dans 0.9*MLD
NPQ_C_1=max(CHLA_CHLA[!SPIKE_CHLA_C & (PRES_CHLA<0.9*MLD)])
DEPTH_NPQ_C_1=max(PRES_CHLA[CHLA_CHLA==NPQ_C_1 & (PRES_CHLA<0.9*MLD) & !SPIKE_CHLA_C ])
### max de la CHLORO filtree
NPQ_C_2=max(MED_CHLA[(PRES_CHLA<0.9*MLD)])
DEPTH_NPQ_C_2=max(PRES_CHLA[MED_CHLA==NPQ_C_2 & (PRES_CHLA<0.9*MLD)])
### max de la CHLORO despike avec la version actuelle de detection des spikes
NPQ_A=max(CHLA_CHLA[!SPIKE_CHLA_A & (PRES_CHLA<0.9*MLD)])
DEPTH_NPQ_A=max(PRES_CHLA[CHLA_CHLA==NPQ_A & (PRES_CHLA<0.9*MLD) & !SPIKE_CHLA_A])
### What would be the median value of the CHLA in the quenching Area without quenching
MEDIAN_RAW_C_1=median(CHLA_CHLA[PRES_CHLA<DEPTH_NPQ_C_1])
MEDIAN_RAW_C_2=median(CHLA_CHLA[PRES_CHLA<DEPTH_NPQ_C_2])
MEDIAN_RAW_A=median(CHLA_CHLA[PRES_CHLA<DEPTH_NPQ_A])
### Writing a txt file
summary=data.frame(CYCLE_NUMBER,NB_SPIKE_C,NB_SPIKE_A,NPQ_C_1,MEDIAN_RAW_C_1,DEPTH_NPQ_C_1,NPQ_C_2,MEDIAN_RAW_C_2,DEPTH_NPQ_C_2,NPQ_A,MEDIAN_RAW_A,DEPTH_NPQ_A)
### Adding some plots with the quenching correction
CHLA_CHLA_NPQ_C_1=CHLA_CHLA
CHLA_CHLA_NPQ_C_1[PRES_CHLA<DEPTH_NPQ_C_1]=NPQ_C_1
CHLA_CHLA_NPQ_C_2=CHLA_CHLA
CHLA_CHLA_NPQ_C_2[PRES_CHLA<DEPTH_NPQ_C_2]=NPQ_C_2
CHLA_CHLA_NPQ_A=CHLA_CHLA
CHLA_CHLA_NPQ_A[PRES_CHLA<DEPTH_NPQ_A]=NPQ_A
write.table(file=path_out_txt,summary,col.names=F,row.names=F,append=TRUE)
###########################################################################
### CLOSING the NCFILE
###########################################################################
nc_close(filenc)
nc_close(filenc_C)
###########################################################################
## Some plots : localisation of the spikes
###########################################################################
path_out_jpeg=paste(substr(IDnc,start=36,stop=49),"jpeg",sep="")
path_out_zoomjpeg=paste(substr(IDnc,start=36,stop=48),"_zoom.jpeg",sep="")
path_out_quenchingjpeg=paste(substr(IDnc,start=36,stop=48),"_quench.jpeg",sep="")
jpeg(file=path_out_zoomjpeg)
# matplot(CHLA_CHLA,PRES_CHLA,col=8,type="l",ylab="Depth [m]",xlab=expression("Chlorophyll a [mg."*m ^ -3 * "]"),xlim=c(-0.2,max(CHLA_CHLA)+0.5),ylim=rev(c(0, max(PRES_CHLA))))
matplot(CHLA_CHLA,PRES_CHLA,col=8,type="l",ylab="Depth [m]",xlab=expression("Chlorophyll a [mg."*m ^ -3 * "]"),xlim=c(-0.2,max(CHLA_CHLA)+0.5),ylim=rev(c(0, MLD)))
matplot(CHLA_CHLA[SPIKE_CHLA_C],PRES_CHLA[SPIKE_CHLA_C],type="p",pch=1, col=1,cex=2,add=TRUE)
matplot(CHLA_CHLA[SPIKE_CHLA_A],PRES_CHLA[SPIKE_CHLA_A],type="p",pch=1, col=2,cex=3,add=TRUE)
legend("bottomright",c("Chl-a","Spike_C","Spike_A"),pch=c(20,20,20),col=c(8,1,2))
dev.off()
jpeg(file=path_out_jpeg)
matplot(CHLA_CHLA,PRES_CHLA,col=8,type="l",ylab="Depth [m]",xlab=expression("Chlorophyll a [mg."*m ^ -3 * "]"),xlim=c(-0.2,max(CHLA_CHLA)+0.5),ylim=rev(c(0, max(PRES_CHLA))))
matplot(CHLA_CHLA[SPIKE_CHLA_C],PRES_CHLA[SPIKE_CHLA_C],type="p",pch=1, col=1,cex=2,add=TRUE)
matplot(CHLA_CHLA[SPIKE_CHLA_A],PRES_CHLA[SPIKE_CHLA_A],type="p",pch=1, col=2,cex=3,add=TRUE)
legend("bottomright",c("Chl-a","Spike_C","Spike_A"),pch=c(20,20,20),col=c(8,1,2))
dev.off()
jpeg(file=path_out_quenchingjpeg)
matplot(CHLA_CHLA,PRES_CHLA,col=8,type="l",ylab="Depth [m]",xlab=expression("Chlorophyll a [mg."*m ^ -3 * "]"),xlim=c(-0.2,max(CHLA_CHLA)+0.5),ylim=rev(c(0, MLD)))
matplot(CHLA_CHLA_NPQ_C_1,PRES_CHLA,type="l",pch=1, col=1,cex=3,add=TRUE)
matplot(CHLA_CHLA_NPQ_C_2,PRES_CHLA,type="l",pch=1, col=5,cex=3,add=TRUE)
matplot(CHLA_CHLA_NPQ_A,PRES_CHLA,type="l",pch=1, col=2,cex=3,add=TRUE)
legend("bottomright",c("Chl-a","NPQ_C_1","NPQ_C_2","NPQ_A"),pch=c(20,20,20,20),col=c(8,1,5,2))
dev.off()
}
|
e074df1f4aa6573a4d15f074bd110a44b3b0a6c0
|
5cc908812d4f6918cec28acc1f715357e9b8f7ce
|
/Midterm/COVID19KNN.R
|
8a329ad82cacd538804f9d21900650a573010904
|
[] |
no_license
|
jingyi199858/CS513Stevens
|
8e97b52fbe71a30639df073c005a313737505797
|
98bb39f5dc4133217b3f8f4f607d24d3971d7217
|
refs/heads/main
| 2023-03-16T16:12:13.051237
| 2021-03-13T19:30:48
| 2021-03-13T19:30:48
| 347,460,141
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 523
|
r
|
COVID19KNN.R
|
remove(list=ls())
dev.off()
file<-file.choose()
bc<- read.csv(file, na.strings = "?", colClasses=c("Infected"="factor" ))
is.factor(bc$Class)
bc_clean<-na.omit(bc)
index<-sort(sample(nrow( bc_clean),round(.30*nrow(bc_clean ))))
training<- bc_clean[-index,]
test<- bc_clean[index,]
library(kknn)
predict_k1 <- kknn(formula= Infected~., training[,c(-1)] , test[,c(-1)], k=5,kernel ="rectangular" )
fit <- fitted(predict_k1)
table(test$Infected,fit)
wrong<- ( test$Infected!=fit)
rate<-sum(wrong)/length(wrong)
rate
|
6ca214f67aea08f62352cffcc953c01262efd7e2
|
cb0e47764f06380921b8a2d1ed0d03ccbd27abbf
|
/6 media e mediana na pratica.R
|
8d3e9db188023d37934604a1d23dd4713cbcd73c
|
[] |
no_license
|
arielgustavoletti/R_Enbio
|
dc939ec216cea5f7b7ae15ff5e6ceb06c0ed6bf2
|
f038a5b7bb964bbc5750a9e0e19e7c2b4724a3b3
|
refs/heads/master
| 2020-07-17T22:57:36.740772
| 2019-09-04T10:43:51
| 2019-09-04T10:43:51
| 206,118,498
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 989
|
r
|
6 media e mediana na pratica.R
|
###################################
#Média e mediana na prática #
#Prof. Ariel Gustavo Letti #
#Minicurso R - IV ENBIO #
###################################
#Carregando os dados:
setwd("C:/R_Enbio")
dir()
dados<-read.table("inseto.txt", h=T)
summary(dados)
#############################
#Histograma do comprimento dos bichinhos
hist(dados$Comprimento, col="gray")
#Agora, um histograma para cada espécie:
hist(dados$Comprimento[dados$Especie=="sp_1"], col="gray")
hist(dados$Comprimento[dados$Especie=="sp_2"], col="gray")
hist(dados$Comprimento[dados$Especie=="sp_3"], col="gray")
#Boxplot:
boxplot(dados$Comprimento ~ dados$Especie)
#Aqui usamos o formato y~x
#Que significa y em resposta à x
#Ou, y em função de x
#Ou ainda, y dependendo de x
#Vamos olhar os números:
tapply(dados$Comprimento, dados$Especie, mean)
tapply(dados$Comprimento, dados$Especie, sd)
tapply(dados$Comprimento, dados$Especie, quantile)
|
dc42bff2a1aca927c5ee4571134f8026b17c4fbe
|
68651c45b76e30217cad7a87db9e7d716b1e37a2
|
/area_alita.R
|
f0af8fa350825870aa830c5703e4abcad2f4d13b
|
[] |
no_license
|
jfloresvaliente/useful_scripts
|
bfbda81421dde06b673e7060ebeacf9a06e2abff
|
391ceb34d330c931112c5dd12d7447622f4ea454
|
refs/heads/master
| 2021-06-13T00:33:24.689975
| 2021-03-12T12:06:24
| 2021-03-12T12:06:24
| 111,957,568
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,210
|
r
|
area_alita.R
|
# library(pracma)
dat <- read.table('C:/Users/jflores/Documents/JORGE/ECOGRANJA_WALLPAKUNA/terreno/Alita.csv', header = F, sep = ';')
plot(dat[,2], dat[,1], type = 'l')
# for(i in 1:dim(dat)[1]){
# points(dat[i,2], dat[i,1], col = 'red')
# print(i)
# Sys.sleep(time = 1)
# }
# library(geosphere)
# lon <- c(-81, -80, -80,-81)
# lat <- c(-5, -5, -6, -6)
# d <- cbind(lon, lat)
# geosphere::areaPolygon(d)/10^6
library(proj4)
proj4string <- "+proj=utm +zone=19 +south +ellps=WGS84 +datum=WGS84 +units=m +no_defs "
# Source data
xy <- dat; colnames(xy) <- c('x','y')
# xy <- data.frame(x=354521, y=7997417.8)
# Transformed data
pj <- project(xy, proj4string, inverse=TRUE)
lonlat <- data.frame(lon=pj$x, lat=pj$y)
print(lonlat)
geosphere::areaPolygon(lonlat)/10000
png(filename = 'C:/Users/jflores/Documents/JORGE/ECOGRANJA_WALLPAKUNA/terreno/Alita.png', height = 850, width = 850, res = 120)
plot(lonlat[,1], lonlat[,2], type = 'l', axes = F, xlab = '', ylab = '', col = 'red', lwd = 2)
grid(lwd = 2, col = 'red')
box()
mtext(text = 'Colegio', side = 1, adj = .85, line = -2)
dev.off()
for(i in 1:dim(lonlat)[1]){
points(lonlat[i,1], lonlat[i,2], col = 'red')
print(i)
Sys.sleep(time = 1)
}
|
0d391de16287419d044e69e34ba53a4639fcb6f1
|
b44b3a8fda90d9ea7ed56db16af5b366d10239f4
|
/Experimental_design_figure.r
|
6023572bf20ff96ef0ea73ccb7cdfa657da52475
|
[] |
no_license
|
ss3sim/Empirical
|
d6980f347d81d0b29122d04377c8092966c1276f
|
3e5136a6cd318741d64ab7ce2fc7c6c9148d633f
|
refs/heads/master
| 2020-04-06T04:59:21.122727
| 2015-07-31T17:41:44
| 2015-07-31T17:41:44
| 21,745,602
| 0
| 0
| null | 2014-07-24T00:47:42
| 2014-07-11T17:44:20
|
Scheme
|
UTF-8
|
R
| false
| false
| 9,864
|
r
|
Experimental_design_figure.r
|
print.letter <- function(label="(a)",xy=c(0.1,0.925),...) {
tmp <- par("usr")
text.x <- tmp[1]+xy[1]*diff(tmp[1:2]) #x position, diff=difference
text.y <- tmp[3]+xy[2]*diff(tmp[3:4]) #y position
text(x=text.x, y=text.y, labels=label, ...)
}
###############################################################################
## This creates a figure with the experimental design for the empirical paper
# setwd("C:/Users/Felipe/Dropbox/Fish 600/Growth")
require(RColorBrewer)
ScenPal <- adjustcolor(brewer.pal(3, "Set1"), alpha=0.5)
ScenPal2 <- c("black", brewer.pal(5, "Dark2")[c(2,4,5)])
ScenPal3 <- c("black", brewer.pal(5, "Dark2")[c(1,3)])
# Do black and white?
if(FALSE){
ScenPal <- adjustcolor(c("gray30","gray50","gray70"), alpha=0.5)
ScenPal2 <- c("black","gray30","gray50","gray70")
ScenPal3 <- c("black","gray30","gray50")
}
#windows(width=7, height=9)
# tiff("Experimental_design.tiff", width=7,height=9, res=500, units='in', compression='lzw')
dev.new(width = 5, height = 5, units = 'in')
cex <- 1
plot_experimental_design <- function(cex = 1){
par(mar=c(4,4.5,0,1), oma=c(0,0,1,0), mgp = c(.5, .5, 0))
matlay <- c(1,1,
2,2,
3,3)
matlay <- matrix(matlay,ncol=2,byrow=T)
layout(mat=matlay, heights=c(0.25,0.25,0.25,0.25), widths=c(0.5,0.5))
################################################################################
### First, do the panels with fishing mortality
# F0 <- c(rep(0,25), rep(0.95,75))
F1 <- c(rep(0,26), ((1/37)*(1:37))*0.95,(-(1/38)*(1:38))*0.5+0.95)
plot(F1, type='l', axes=F, ylim=c(0,1.5), ylab=NA, lwd=2, ann = F)
# mtext(side = 1, text = "Year", outer = F, line = 2, cex = .8 * cex)
mtext(side=2, text=expression(F/F[MSY]), line=2.5, cex = .8 * cex)
# lines(F1+0.01, col="gray", lwd=2)
# legend("topleft", legend=c("Constant","Two-way trip"), lwd=2, col=c("black","gray"), bty='n')
# legend("bottomright", legend=c("Constant"), lwd=2, col=c("black"), bty='n')
axis(2, las = 1)
axis(1)
print.letter(paste0(letters[1], "."), xy = c(.03, 0.95), cex = 1.4 * cex)
# text(0, 1.5, labels="a.", cex = 1.4 * cex)
####################################################################################
### Second, do the one with data
#Data rich
fishery1 <- data.frame(years = seq(26, 100, by = 1),
Nsamps = c(rep(35, 25), rep(75, 25), rep(100, 25)))
survey1 <- data.frame(years = seq(41,100, by=3),
Nsamps = rep(100, 1))
#Data rich, late survey
fishery2 <- data.frame(years = seq(26,100, by=1),
Nsamps = c(rep(35, 25), rep(75, 25), rep(100, 25)))
survey2 <- data.frame(years = seq(67,100, by=3),
Nsamps = rep(100, 1))
#Data Moderate Scenario
survey3 <- data.frame(years = seq(76,100, by=2), Nsamps = rep(100, 13))
fishery3 <- data.frame(years = c(36,46,seq(51,66,by=5),71:100),
Nsamps = c(rep(35, 1), rep(75, 5), rep(100, 30)))
base <- exp(1)
scaler <- 1.5
plot(fishery1$years, rep(4, nrow(fishery1)), cex = log(fishery1[, 2] / 7, base = base)/scaler,
pch = 19, col = ScenPal[1], xlim = c(0, 100), ylim = c(0, 4.5),
axes = F, ylab = NA, xlab = NA)
points(fishery2$years, rep(3.5, nrow(fishery2)), cex = log(fishery2[, 2] / 7, base = base)/scaler,
pch = 19, col = ScenPal[2])
points(fishery3$years, rep(3, nrow(fishery3)), cex = log(fishery3[, 2] / 7, base = base)/scaler,
pch = 19, col = ScenPal[3])
points(survey1$years, rep(1.5, nrow(survey1)), cex = log(survey1[, 2] / 7, base = base)/scaler,
pch = 19, col = ScenPal[1])
points(survey2$years, rep(1, nrow(survey2)), cex = log(survey2[, 2] / 7, base = base)/scaler,
pch = 19, col = ScenPal[2])
points(survey3$years, rep(.5, nrow(survey3)), cex = log(survey3[, 2] / 7, base = base)/scaler,
pch = 19, col = ScenPal[3])
print.letter(paste0(letters[2], "."), xy = c(.03, 0.95), cex = 1.4 * cex)
mtext(side = 3, text = 'Fishery', line = -.5, cex = .7 * cex)
mtext(side = 3, text = 'Survey', line = -5.5, cex = .7 * cex)
# mtext(side = 3, text = 'Survey', line = -15)
# text(40, 4.3, labels="Fishery")
# text(40, 1.9, labels="Survey")
axis(1)
# legend("topleft", legend=c("Data-rich late-survey","Data-rich", "Data-unrealistic"), pch=19, col=ScenPal, bty='n',
# pt.cex=1.5)
legend(x = par('usr')[1], y = 4.1, legend = c(35, 75, 100), pch = 21, pt.cex = log(c(35, 75, 100)/7, base = base)/scaler,
bty = 'n')
legend('bottomleft', legend = c('Rich', "Rich - Late Survey", "Moderate"),
pch = 19, col = ScenPal, bty = 'n', pt.cex = 1.5 * cex )
# legend(0, 1.5, legend=c(35,100,500), pch=21, pt.cex=log(c(35,100,500)/7, base=base)/scaler, bty='n')
# text(0, 4.5, labels="b.", cex = 1.4)
#################################################################################
### Third, do the time varing stuff
### From Peter's cases - Just copy and pasted
dev2 <- c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5.5, 6.11111111111111, 6.72222222222222,
7.33333333333334, 7.94444444444444, 8.55555555555556, 9.16666666666667, 9.77777777777777,
10.3888888888889, 11, 11, 10.3888888888889, 9.77777777777777, 9.16666666666667, 8.55555555555556,
7.94444444444444, 7.33333333333334, 6.72222222222222, 6.11111111111111, 5.5, 0, 0, 0, 11, 12.375,
13.75, 15.125, 16.5, 16.5, 16.5, 16.5, 16.1206896551724, 15.7413793103448, 15.3620689655172,
14.9827586206897, 14.6034482758621, 14.2241379310345, 13.8448275862069, 13.4655172413793,
13.0862068965517, 12.7068965517241, 12.3275862068966, 11.948275862069, 11.5689655172414, 11.1896551724138,
10.8103448275862, 10.4310344827586, 10.051724137931, 9.67241379310344, 9.29310344827586, 8.91379310344828,
8.53448275862069, 8.1551724137931, 7.77586206896552, 7.39655172413793, 7.01724137931035, 6.63793103448276,
6.25862068965517, 5.87931034482759, 5.5)
dev1 <- c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 11.1543469723957, -1.03799715870328, 6.27059091500808, 5.24846841315007,
3.22652602127025, -11.9754232243074, -0.0526090743126986, -3.12714384822269, 1.38037313068654,
-4.00157636120656, 3.62107311002566, 3.05977112637004, -9.27486033552133, 3.78601884209986,
-2.34454069724591, 10.6520816462052, -1.46523662447839, 1.88682414704984, 2.97859029110509,
7.3942568593842, 4.36496863923671, -7.47000245308514, -0.0321423310383082, -1.48306128205327,
-0.626755273801159, 2.44141960355427, -10.7953451448666, 4.59696179157652, -1.42476214492493,
4.03153247653532, 3.40989869879991, -12.9354073654904, -0.555392092413555, -3.28526301675421,
-10.2630150335296, 4.11610249886079, -0.381917674531685, -8.96350850022787, 9.93298211587771,
3.19234894036513, 1.18115525489218, 0.406258973007837, 1.92855346126796, 7.21742692439339,
-2.6273188872232, -3.03569696263516, 0.774943235140995, -0.96832060149864, 4.30206232448288,
-4.48587431086561, -2.5534780691864, 1.83328746202287, -3.95449410471966, 5.83695126454094,
-8.05461284096399, 8.10283107964042, -5.11805342509933, -7.00780100424766, 2.30079532029411,
0.80455259331152)
dev3 <- c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1.83333333333334, 3.66666666666666, 5.5, 7.33333333333334, 9.16666666666667, 11,
12.8333333333333, 14.6666666666667, 16.5, 16.5, 14.6666666666667, 12.8333333333333, 11, 9.16666666666667,
7.33333333333334, 5.5, 3.66666666666666, 1.83333333333334, 0, 0, -0.868421052631582, -1.73684210526316,
-2.60526315789474, -3.47368421052632, -4.3421052631579, -5.21052631578947, -6.07894736842105,
-6.94736842105263, -7.81578947368421, -8.68421052631579, -9.55263157894737, -10.4210526315789,
-11.2894736842105, -12.1578947368421, -13.0263157894737, -13.8947368421053, -14.7631578947368,
-15.6315789473684, -16.5, -16.5, -15.6315789473684, -14.7631578947368, -13.8947368421053,
-13.0263157894737, -12.1578947368421, -11.2894736842105, -10.4210526315789, -9.55263157894737,
-8.68421052631579, -7.81578947368421, -6.94736842105263, -6.07894736842105, -5.21052631578947,
-4.3421052631579, -3.47368421052632, -2.60526315789474, -1.73684210526316, -0.868421052631582, 0)
dev1 <- dev1/max(abs(c(dev1,dev2,dev3)))
dev2 <- dev2/max(abs(c(dev1,dev2,dev3)))
dev3 <- dev3/max(abs(c(dev1,dev2,dev3)))
plot(1:100, rep(0,100), ylim=c(-1.2,1.2), axes=F, ann = FALSE, pch=19)
axis(1)
axis(2, at=c(-1,0,1), labels = c(-30, 0, 30), las = 2)
mtext(side=2, text="Deviation (%)", line=2.5, cex=0.8 * cex)
mtext(side=1, text="Year", line=2.5, cex = cex)
#points(dev1, col=ScenPal2[2], pch=19)
#points(dev2+0.02, col=ScenPal2[3], pch=19)
points(dev3-0.02, col=ScenPal2[4], pch=19)
points(rep(0,100), pch=19, col=ScenPal2[2])
#legend("topleft", legend=c("Time invariant", "Random noise", "Time variance 1", "Time variance 2"), pch=19,
# col=ScenPal2, bty='n')
legend("bottomleft", legend=c("Time invariant", "Time-varying"), pch=19,
col=ScenPal2[c(2,4)], bty='n')
print.letter(paste0(letters[3], "."), xy = c(.03, 0.95), cex = 1.4 * cex)
# text(0, 1.2, labels="c.", cex = 1.4)
}
tiff(width = 140, height = 140, units = 'mm', res = 300,
file = 'figs/FIG1_experimental_design.tiff')
plot_experimental_design()
dev.off()
png(width = 140, height = 140, units = 'mm', res = 150,
file = 'figs/FIG1_experimental_design.png')
plot_experimental_design()
dev.off()
|
9c746916d20e93616a77f23681664af8246dde92
|
c849f263fb96f4e85c36a0e3eeeacf4a3cf93b9f
|
/make_data_tables_for_analysis.R
|
00468f682fc771ac945ce7a49404f6b31a830866
|
[] |
no_license
|
jescoyle/FIA-Lichens
|
4e1ebc5e9cccca3e381e8d2968a79b041a4c13c6
|
79f849bebb7d5a1f60889a7c21f039c6f212f12d
|
refs/heads/master
| 2020-05-18T01:10:36.809038
| 2015-10-29T17:37:38
| 2015-10-29T17:37:38
| 13,964,511
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,693
|
r
|
make_data_tables_for_analysis.R
|
## This script compiles data tables for FIA lichen plots into:
## master : a data table with all variables from 2228 plots
## model_data : data from plots used in models from all plots without any data missing, includes PCA variables
## trans_data : environmental data log or sqrt transformed to reduce skew
## working_data_unstd : trans_data scaled by linear factor (usually 10) to put variables on similar range
## working_data : trans_data scaled to have mean 0 and std dev 1
## Outliers are analyzed and removed from all data sets and working data sets are divided into equal sized test ('_test') and fitting ('_fit') sets.
setwd('C://Users/jrcoyle/Documents/UNC/Projects/FIA Lichen')
options(stringsAsFactors=F)
varnames=read.csv('varnames.csv', row.names=1)
source('./GitHub/FIA-Lichens/fia_lichen_analysis_functions.R')
########################################################################
### Combine data files into a master data file and working data file ###
# FIA plot data
plot_locs = read.csv('./Data/fia_lichen_plot_locations.csv')
county_data = read.csv('./Data/fia_lichen_county_data.csv')
plot_data = merge(plot_locs, county_data[,c('COUNTYNM','state.abbr','STATE','COUNTY','POP2000','POP2010','SQMI')])
# Lichen richness data
rich_current = read.csv('./Data/lichen_richness_current.csv')
rich_legacy = read.csv('./Data/lichen_richness_legacy.csv')
rich_current = rich_current[,names(rich_legacy)]
rich_data = rbind(rich_current, rich_legacy)
# Lichen abundance data
abun_data = read.csv('./Data/lichen abundance based on tree occupancy.csv') # Not available for all plots b/c originally calculated after subsetting.
# FIA tree data
tree_data = read.csv('./Data/TreeData/master_data_forest.csv')
tree_data = tree_data[,c('yrplot.id','S.tree','D.abun.tree',
'D.area.tree','maxDiam','numTreesBig','numTreesSm','propDead','numDead','numCut',
'PIE.stems.tree','PIE.ba.tree','wood_moist_pct.rao.pres','bark_moist_pct.rao.pres',
'wood_SG.rao.pres','bark_SG.rao.pres','LogSeed.rao.pres','wood_moist_pct.rao.stems',
'bark_moist_pct.rao.stems','wood_SG.rao.stems','bark_SG.rao.stems','LogSeed.rao.stems',
'wood_moist_pct.stems','bark_moist_pct.stems','wood_SG.stems','bark_SG.stems','LogSeed.stems',
'wood_moist_pct.rao.ba','bark_moist_pct.rao.ba','wood_SG.rao.ba','bark_SG.rao.ba','LogSeed.rao.ba',
'wood_moist_pct.ba','bark_moist_pct.ba','wood_SG.ba','bark_SG.ba','LogSeed.ba','diamDist.mean',
'n.stems','basal.area','light.mean','light.var','lightDist.mean','totalCirc', 'FORTYPCD')]
tree_pca = read.csv('./Data/TreeData/tree_funcgrp_pca1-3.csv')
# Regional tree richness
regS_tree = read.csv('./Data/TreeData/Regional tree diversity/fia_lichen_tree_regS.csv')
# Lichen functional diversity data
fd_data = read.csv('./Data/LichenTraits/fia_lichen_LIAS_means_diversity.csv')
# Lichen regional richness data - not available for all plots because calculated from CNALH download on 2014-10-08, excluding AK
reg_data = read.csv('./Data/Regional Richness/fia_lichen_reg_richness_CNALH-2014-09-20.csv')
reg_fia = read.csv('./Data/Regional Richness/fia_lichen_reg_richness_CNALH-2014-09-20_fia_species.csv')
# Environmental data
env_data = read.csv('./Data/fia_lichen_env_data_points.csv')
env_plot_data = read.csv('./Data/fia_lichen_env_data_plots.csv')
env_reg_data = read.csv('./Data/fia_lichen_env_data_regional.csv')
# Convert to Kelvin (means change, variance stays the same)
env_data$mat = env_data$mat + 273.15
env_reg_data$mat_reg_mean = env_reg_data$mat_reg_mean + 273.15
# Merge to make master data file
master = merge(plot_data, env_plot_data, all.x=T, all.y=F)
master = merge(master, rich_data, all.x=T, all.y=F)
master = merge(master, abun_data, all.x=T, all.y=F)
master = merge(master, tree_data, all.x=T, all.y=F)
master = merge(master, tree_pca, all.x=T, all.y=F)
master = merge(master, regS_tree, all.x=T, all.y=F)
master = merge(master, fd_data, all.x=T, all.y=F)
master = merge(master, reg_data, all.x=T, all.y=F)
master = merge(master, reg_fia[,c('yrplot.id', 'regFIA')],all.x=T, all.y=F)
master = merge(master, env_data, all.x=T, all.y=F)
master = merge(master, env_reg_data, all.x=T, all.y=F)
# Calculate CV now that all env vars are positive
master$mat_reg_cv = master$mat_reg_var/master$mat_reg_mean
master$iso_reg_cv = master$iso_reg_var/master$iso_reg_mean
master$ap_reg_cv = master$ap_reg_var/master$ap_reg_mean
master$pseas_reg_cv = master$pseas_reg_var/master$pseas_reg_mean
master$rh_reg_cv = master$rh_reg_var/master$rh_reg_mean
# Save data
write.csv(master, './Data/fia_lichen_master_data_2015-09-19.csv', row.names=F)
rownames(master) = master$yrplot.id
###############################################################################
### Data Subsetting ###
master = read.csv('./Data/fia_lichen_master_data_2015-09-19.csv', row.names=1)
# Use recent plots after plot design had been standardized
model_data = subset(master, MEASYEAR>=1997)
# Remove plots with only one large tree (heterogeneity measurements are NA)
model_data = subset(model_data, numTreesBig>1) # removes 44 plots widely distributed across US
## Define variables potentially used in analysis
predictors = read.csv('predictors.csv')
# Subset by predictors that are included in model_data (not derived PCs or fric)
measured_pred = subset(predictors, pred %in% colnames(model_data))
measured_pred = subset(measured_pred, pred!='fric')
# Plot histograms of predictors
pdf('./Figures/Predictor variable histograms.pdf', height=6, width=6)
for(p in measured_pred$pred){
hist(model_data[,p], main=varnames[p,'displayName'])
mtext(paste('# Missing =',sum(is.na(model_data[,p]))), side=3, line=0, adj=1)
}
dev.off()
# Remove records that are missing data in these variables
missing_data_plots = rownames(model_data[rowSums(is.na(model_data[,measured_pred$pred]))>0,])
model_data = model_data[rowSums(is.na(model_data[,measured_pred$pred]))==0,] # 1923 plots
## Test for correlations among variables
# Standardize variables
use_response = 'lichen.rich'
use_pred = measured_pred$pred
use_data = model_data[,c(use_response,use_pred)]
# Transform skewed variables (except proportions)
logTrans_vars = c('totalCirc', 'ap','ap_reg_var','pseas_reg_var','rh_reg_var',
'wetness_reg_var','rain_lowRH_reg_var')
sqrtTrans_vars = c('bark_SG.rao.ba','bark_moist_pct.rao.ba','wood_SG.rao.ba',
'wood_moist_pct.rao.ba','LogSeed.rao.ba', 'S.tree')
for(v in logTrans_vars){
use_data[,v] = log10(use_data[,v])
}
for(v in sqrtTrans_vars){
use_data[,v] = sqrt(use_data[,v])
}
# Center and scale data
use_data[,2:ncol(use_data)] = scale(use_data[,2:ncol(use_data)] )
# Pairwise correlations
library('corrplot')
# Record correlations between variables to determine whether to get rid of some.
cortab = cor(use_data, use='complete.obs')
which(cortab>0.7&cortab<1, arr.ind=T)
write.csv(cortab, 'correlation matrix std vars.csv', row.names=T)
cortabsig = 1-abs(cortab)
useorder = c(measured_pred$pred[order(measured_pred$type, measured_pred$scale, measured_pred$mode)], 'lichen.rich')
cortab = cortab[useorder,useorder]
cortabsig = cortabsig[useorder,useorder]
png('./Figures/correlation matrix std vars.png', height=1200, width=1200, type='cairo')
corrplot(cortab[2:nrow(cortab),2:ncol(cortab)], method='square', type='upper', diag=F,
order='original', hclust.method='complete', p.mat=cortabsig[2:nrow(cortab),2:ncol(cortab)],
sig.level=.6, insig='blank', tl.cex=1.5, tl.col=1, cl.cex=2, mar=c(1,1,4,1))
dev.off()
## Define new PCA variable pairs for intrinsically correlated variables
newvars = data.frame(yrplot.id=rownames(model_data))
# max tree size and tree size range
diam_pca = prcomp(na.omit(use_data[,c('diamDist.mean','maxDiam')]))
diam_vars = data.frame(predict(diam_pca))
names(diam_vars) = c('bigTrees','diamDiversity')
diam_vars$diamDiversity = -1*diam_vars$diamDiversity
diam_vars$yrplot.id = rownames(diam_vars)
newvars = merge(newvars, diam_vars, all.x=T)
## Create data set with variables used for modeling
myvars = c('lichen.rich','Parmeliaceae','Physciaceae','fric','fdiv','raoQ','wetness','rain_lowRH',
'mat','iso','pseas','ap','rh','totalNS','radiation','ap_reg_mean','rh_reg_mean','wetness_reg_mean','rain_lowRH_reg_mean',
'mat_reg_mean','iso_reg_mean','pseas_reg_mean','ap_reg_var','rh_reg_var','wetness_reg_var','rain_lowRH_reg_var',
'mat_reg_var','iso_reg_var','pseas_reg_var','ap_reg_cv','rh_reg_cv','mat_reg_cv','iso_reg_cv','pseas_reg_cv','totalNS_reg','regS_tree',
'bark_moist_pct.ba','bark_moist_pct.rao.ba','wood_SG.ba','wood_SG.rao.ba','PC1',
'LogSeed.ba','LogSeed.rao.ba','PIE.ba.tree','propDead','light.mean','lightDist.mean',
'regS','regFIA','regParm','regPhys','tot_abun_log','parm_abun_log','phys_abun_log'
)
model_data = cbind(model_data[,myvars], newvars[,2:ncol(newvars)])
# Subset predictor table by variables to be used in subsequent models
model_pred = subset(predictors, pred %in% colnames(model_data))
## Create scaled and transformed datasets
trans_data = model_data
logTrans_vars = c('ap_reg_var','pseas_reg_var','wetness_reg_var','rain_lowRH_reg_var')
sqrtTrans_vars = c('bark_moist_pct.rao.ba','wood_SG.rao.ba', 'LogSeed.rao.ba')
for(v in logTrans_vars){
trans_data[,v] = log10(trans_data[,v])
}
for(v in sqrtTrans_vars){
trans_data[,v] = sqrt(trans_data[,v])
}
hist(trans_data$bigTrees) # Not much I can do about transforming this, so I won't
working_data = trans_data
# Make transformation of richness response used in models
working_data$lichen.rich_log = log(working_data$lichen.rich+1)
working_data$Parm_log = log(working_data$Parmeliaceae+1)
working_data$Phys_log = log(working_data$Physciaceae+1)
# Rescale by mean and stddev for standardized data
# Note: this scales the response variable (lichen richness), which may not be what we want to do
working_data = data.frame(scale(working_data, center=T, scale=T))
# Plot correlation matrix of rescaled and transformed data
cortab = cor(working_data[,model_pred$pred], use='complete.obs')
cortabsig = 1-abs(cortab)
png('./Figures/correlation matrix working vars.png', height=900, width=900, type='cairo')
corrplot(cortab, method='square', type='upper', diag=F,
order='hclust', hclust.method='complete', p.mat=cortabsig,
sig.level=0.6, insig='blank', tl.cex=1.5, tl.col=1, cl.cex=2, mar=c(1,1,4,1))
dev.off()
## Determine which points are outliers and remove.
outliers = working_data[,model_pred$pred]
outliers[,]<-NA
for(v in model_pred$pred){
ols = lm(working_data[,'lichen.rich_log']~working_data[,v])
cd = cooks.distance(ols)
outs = which(cd >4/nrow(working_data))
outliers[outs,v]<-cd[outs]
}
outliers = outliers[apply(outliers, 1, function(x) sum(!is.na(x))>0),]
outliers = data.frame(outliers)
outliers$numOut = apply(outliers, 1, function(x) sum(!is.na(x)))
write.csv(outliers, 'cooks D outliers.csv', row.names=T)
# All 35 plots with 1 species are outliers
rownames(subset(model_data, lichen.rich==1)) %in% rownames(outliers)[which(outliers$numOut>12)]
outliers[rownames(subset(model_data, lichen.rich==1)),]
# Take out all plots with 1-2 species from outliers so that they will be ignored when assessing other outliers
outliers = subset(outliers, rownames(outliers) %in% rownames(model_data[model_data$lichen.rich>2,]))
# 18 plots with more that 2 species are outliers in 1/4 of the predictor variables.
dim(subset(model_data, lichen.rich>2&rownames(model_data) %in% rownames(outliers)[which(outliers$numOut>8)]))
# Used to check outliers in each variable
i=model_pred$pred[1]
ols = lm(working_data$lichen.rich_log~working_data[,i])
opar <- par(mfrow = c(2, 2), oma = c(0, 0, 1.1, 0))
plot(ols, las = 1)
cd = cooks.distance(ols)
which(cd > 4/nrow(working_data))
outliers[order(outliers[,i], decreasing=T),][1:20,]
subset(model_data, rownames(model_data) %in% names(which(cd>0.01)))
# mat - none
# iso - none
# pseas - none
# radiation - none
# totalNS - none
# bark_moist_pct.ba - none
# bark_moist_pct.rao.ba: 1998_17_43_6379
# wood_SG - none
# wood_SG.rao.ba - none
# LogSeed.ba : 1999_41_25_7306
# LogSeed.rao.ba - none
# PIE.ba.tree - none
# propDead - none
# light.mean - none: 1997_56_7_6475, 2007_4_9_87353 are outliers with 2-3 trees
# lightDist.mean : 2007_4_19_83376
# totalCirc - none
# regS - none
# regParm - none
# regPhys - none
# tot_abun_log : several appear to be outlier b/c there is one species
# parm_abun_log - none
# phys_abun_log - none
# bigTrees - none
# diamDiversity : 2004_16_49_85627, two trees diams 5.5, 27.1 leads to large diameter difference for relatively small trees, PCA exacerbates this
# wetness - none
# rain_lowRH - none
# PC1 - none
# mat_reg_mean, iso_reg_mean, pseas_reg_mean, wetness_reg_mean, rain_lowRH_reg_mean - none
# mat_reg_var, iso_reg_var, pseas_reg_var, wetness_reg_var, rain_lowRH_reg_var - none\
# regS_tree - none
# Remove outliers
remove_plots = c('2004_16_49_85627','2007_4_19_83376','1999_41_25_7306','1998_17_43_6379')
working_data = subset(working_data, !(rownames(working_data) %in% remove_plots))
model_data = subset(model_data, !(rownames(model_data) %in% remove_plots))
trans_data = subset(trans_data, !(rownames(trans_data) %in% remove_plots))
## Save data sets
write.csv(working_data, './Data/fia_lichen_working_data.csv', row.names=T)
write.csv(trans_data, './Data/fia_lichen_trans_data.csv', row.names=T)
write.csv(model_data, './Data/fia_lichen_model_data.csv', row.names=T)
## Divide data into fitting and testing data sets
allplots = rownames(model_data)
usedata = master[allplots, c('state.abbr', 'yrplot.id')]
# Only run once !!!
#unlist(tapply(usedata$yrplot.id, usedata$state.abbr, function(x){
# n = ifelse(runif(1)>0.5, ceiling(length(x)/2), floor(length(x)/2))
# sample(x, n)
#}))->fitplots
#length(fitplots) # stopped at 961
#testplots = allplots[!(allplots %in% fitplots)]
## Write out list of test and fit plots
#write.csv(data.frame(yrplot.id=testplots), './Data/model test plots.csv')
#write.csv(data.frame(yrplot.id=fitplots), './Data/model fit plots.csv')
################# OLD CODE ###############
# Make a list of predictors of each type
climate = c('ap','mat','iso','pseas','rh')
local_env = c('radiation')
pollution = c('totalNS')
forest_het = c('bark_SG.rao.ba', 'bark_moist_pct.rao.ba', 'wood_SG.rao.ba', 'wood_moist_pct.rao.ba',
'LogSeed.rao.ba','lightDist.mean','PIE.ba.tree','S.tree', 'propDead', 'diamDist.mean')
forest_hab = c('bark_SG.ba', 'bark_moist_pct.ba', 'wood_SG.ba', 'wood_moist_pct.ba',
'LogSeed.ba','light.mean','totalCirc', 'PC1')
forest_time = c('maxDiam')
region = c('regS')
predictors = data.frame(pred = c(climate, pollution, local_env, forest_het, forest_hab, forest_time, region),
type = c(rep('climate',length(climate)+length(pollution)+length(local_env)),rep('forest',length(forest_het)+length(forest_hab)+length(forest_time)), rep('region',length(region))),
shape = c(rep(2,length(climate)), rep(1, length(pollution)), rep(2, length(local_env)), rep(1, length(forest_het)), rep(2, length(forest_hab)), rep(1, length(forest_time)), rep(1,length(region))),
hyp = c(rep('resource',length(climate)+length(pollution)+length(local_env)), rep('niche', length(forest_het)), rep('resource', length(forest_hab)), rep('time', length(forest_time)), rep('region', length(region)))
)
predictors[predictors$pred=='totalCirc','shape']<-1
#working_data_unstd = trans_data
# For unstd data: rescale by constant so that variances in path analysis will be of similar scale
#working_data_unstd$mat = working_data_unstd$mat/10
#working_data_unstd$pseas = working_data_unstd$pseas/10
#working_data_unstd$radiation = working_data_unstd$radiation/1000000
#working_data_unstd$totalNS = working_data_unstd$totalNS/100
#working_data_unstd$bark_moist_pct.ba = working_data_unstd$bark_moist_pct.ba/10
#working_data_unstd$bark_moist_pct.rao.ba = working_data_unstd$bark_moist_pct.rao.ba*10
#working_data_unstd$wood_SG.rao.ba = working_data_unstd$wood_SG.rao.ba*10
#working_data_unstd$wood_SG.ba = working_data_unstd$wood_SG.ba*10
#working_data_unstd$LogSeed.rao.ba = working_data_unstd$LogSeed.rao.ba*10
#working_data_unstd$PIE.ba.tree = working_data_unstd$PIE.ba.tree*10
#working_data_unstd$propDead = working_data_unstd$propDead*10
#working_data_unstd$light.mean = working_data_unstd$light.mean/10
#working_data_unstd$lightDist.mean = working_data_unstd$lightDist.mean/10
#working_data_unstd$regS = working_data_unstd$regS/10
#working_data_unstd$regParm = working_data_unstd$regParm/10
#working_data_unstd$regPhys = working_data_unstd$regPhys/10
#working_data_unstd$bigTrees = working_data_unstd$bigTrees/10
#working_data_unstd$PC1 = working_data_unstd$PC1*10
#working_data_unstd$lichen.rich_log = log(working_data_unstd$lichen.rich+1)
#working_data_unstd$lichen.rich = working_data_unstd$lichen.rich/10
|
92027302b7d7738dc17797ee1fb9ab31a632a153
|
6e1b4af227d6321f1175930f4e3f21ed800c1a78
|
/man/classify.Rd
|
4034db932761ad6cf863b4980f9d41268915b41c
|
[] |
no_license
|
bastianmanz/GPM_rain
|
521079aedb5afad3b7de482d2bff33afc18e7426
|
a990df2579e43301b0ea2fdaf28c548cfdb240ae
|
refs/heads/master
| 2021-01-10T13:22:22.508211
| 2016-03-11T13:30:15
| 2016-03-11T13:30:15
| 53,668,785
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,271
|
rd
|
classify.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classify.R
\name{classify}
\alias{classify}
\title{Function to classify gauge time-series (zoo object) based on some prescribed aggregation level.}
\usage{
classify(gauge.ts, aggregation_level = "season")
}
\arguments{
\item{gauge.ts}{A zoo object containing the gauge observations.}
\item{aggregation_level}{A character string defining the classification criteria.}
}
\value{
Object \code{ts.classification} A zoo object with a single data column defining the assigned class for each time-step.
}
\description{
This method classifies a gauge time-series based on a pre-defined classification ("aggregation_level"). Currently only a seasonal classification is implemented. The output is added to the gauge spatial object as an additional spatial data frame column.
}
\details{
If aggregation_level is "season", the date strings of the zoo object guage.ts are converted to seasonal indicators, i.e. 1 (DJF), 2 (MAM), 3 (JJA), 4 (SON).
}
\examples{
data(gauges) # STFDF object with gauge observations
gauge.sp <- gauges@sp
gauge.ts <- as(gauges[,,1],"xts")
colnames(gauge.ts) <- gauge.sp$estacion
ts.classification <- classify(gauge.ts,aggregation_level="season")
summary(ts.classification)
}
|
606fee4963ad959071b8451b562d8d4f6b634289
|
61fb4bc8d3edb365bc5985b7723241ae76d6a727
|
/Risk Parity/Risk Parity- Nakul T.R
|
d41b1df5c0eeefc9b56d3016c7024fb4c7f2c48a
|
[] |
no_license
|
nakulthakare/Asset-Management
|
81b8f47f3332774ec7c234c7758112c1bb4da1a4
|
ee3ff4df5c2958f138ebf02069b4981c64d45590
|
refs/heads/master
| 2022-02-10T22:20:45.711083
| 2019-01-05T01:37:54
| 2019-01-05T01:37:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,350
|
r
|
Risk Parity- Nakul T.R
|
suppressMessages(library(data.table))
suppressMessages(library(zoo))
suppressMessages(library(moments))
suppressMessages(library(lubridate))
#Question 1
CRSP_Bonds<-fread("C:/Nakul/UCLA Coursework/Spring 2018/QAM/PS_2/4f31512f6a62c031.csv") #Loading downloaded data without edits
#head(CRSP_Bonds)
PS2_Q1 <- function(CRSP_Bonds){
data <- CRSP_Bonds
#head(data)
#head(output)
#Clean up, change -99.0 to NA
data <- data[TMRETNUA == -99.0,TMRETNUA:=NA]
#Sort, get Month, Year, Lag of Outstanding
data$MCALDT<-lubridate::mdy(as.vector(data$MCALDT)) #Get Dates in right format
data[,c("Month","Year"):=.(month(MCALDT),year(MCALDT))]
setorder(data,Year,Month) #Sort on dates
data[,c("LagOut"):=.(shift(TMTOTOUT,1)),by=KYCRSPID] #Lagging market cap
#Calculate Metrics
#Not removing NA lagged outstanding, as we need it for equal weighted
#TMTOTOUT is already in millions
output <- data[,.(Bond_lag_MV = sum(LagOut,na.rm = TRUE),
Bond_Ew_Ret = sum(TMRETNUA,na.rm = TRUE)/length(!is.na(TMRETNUA)),
Bond_Vw_Ret = sum(TMRETNUA*LagOut, na.rm = TRUE)/sum(LagOut,na.rm = TRUE))
, by = .(Year,Month)]
output<-output[-1,]
output[Year>=1926 & Year<=2017]
return(output)
}
CRSP_Bonds<-fread("C:/Nakul/UCLA Coursework/Spring 2018/QAM/PS_2/4f31512f6a62c031.csv") #Loading downloaded data without edits
Monthly_CRSP_Bonds<-PS2_Q1(CRSP_Bonds = CRSP_Bonds) #Running Function for output
#head(Monthly_CRSP_Bonds)
#tail(Monthly_CRSP_Bonds)
#Question 2
Monthly_CRSP_Riskless<-fread("C:/Nakul/UCLA Coursework/Spring 2018/QAM/PS_2/e0fe73e8af88ad4d.csv")
#head(Monthly_CRSP_Riskless)
Monthly_CRSP_Stocks<-PS1_Q1(CRSP_Stocks = CRSP_Stocks) #Running Previous Assignment Function
PS2_Q2 <- function(Monthly_CRSP_Stocks,Monthly_CRSP_Bonds,Monthly_CRSP_Riskless){
#Riskfree rate
#Getting right format for dates
dates=as.character(Monthly_CRSP_Riskless$caldt)
years=substr(dates,1,4)
months=substr(dates,5,6)
days=rep("01",length(dates))
dates_final=paste(years,months,days,sep = "-")
dates=as.Date(dates_final,format = "%Y-%m-%d")
Monthly_CRSP_Riskless$caldt<-dates
Monthly_CRSP_Riskless[,c("Month","Year"):=.(month(caldt),year(caldt))]
#Sorting Dates
setorder(Monthly_CRSP_Riskless,Year,Month)
#Merging all 3 data sets
universe <- merge(Monthly_CRSP_Stocks,Monthly_CRSP_Riskless,by=c("Year","Month"))
universe <- merge(universe,Monthly_CRSP_Bonds,by=c("Year","Month"))
head(universe)
universe<-universe[,.(Year,Month,Stock_lag_MV,Stock_Excess_Vw_Ret=(Stock_Vw_Ret-t30ret),Bond_lag_MV,Bond_Excess_Vw_Ret=(Bond_Vw_Ret-t30ret))]
return(universe[Year>=1926 & Year<=2017])
}
#Running Function for output
Monthly_CRSP_Universe<-PS2_Q2(Monthly_CRSP_Bonds = Monthly_CRSP_Bonds,Monthly_CRSP_Stocks = Monthly_CRSP_Stocks,Monthly_CRSP_Riskless = Monthly_CRSP_Riskless) #Running Function for output
#head(Monthly_CRSP_Universe)
#Question 3
PS2_Q3<-function(Monthly_CRSP_Universe){
CRSP_combined <- Monthly_CRSP_Universe
setorder(CRSP_combined,Year,Month)
#tail(Monthly_CRSP_Universe)
#Calculate value weighted returns of bond and stock
CRSP_combined[,Vw_weight := Stock_lag_MV/(Stock_lag_MV + Bond_lag_MV)]
CRSP_combined[,Excess_Vw_Ret := Vw_weight * Stock_Excess_Vw_Ret + (1-Vw_weight)*Bond_Excess_Vw_Ret]
#Calculate 60-40 portfolio of bond and stock
CRSP_combined[,Excess_60_40_Ret := 0.6 * Stock_Excess_Vw_Ret + 0.4 * Bond_Excess_Vw_Ret]
#Caluculating Stock Inverse Sigma hat and Bond Inverse Sigma hat
CRSP_combined[,c("Stock_inverse_sigma_hat","Bond_inverse_sigma_hat"):=.(1/shift(rollapply(CRSP_combined$Stock_Excess_Vw_Ret,36,sd,fill=NA,align='right')),1/shift(rollapply(CRSP_combined$Bond_Excess_Vw_Ret,36,sd,fill=NA,align='right')))]
#Find k = 1/(stock sig)^-1 + (bond sig)^-1
CRSP_combined[,Unlevered_k := 1/(Stock_inverse_sigma_hat + Bond_inverse_sigma_hat)]
#Find unlevered beta portfolio returns
CRSP_combined[,Excess_Unlevered_RP_Ret := Unlevered_k*Stock_inverse_sigma_hat*Stock_Excess_Vw_Ret+
Unlevered_k*Bond_inverse_sigma_hat*Bond_Excess_Vw_Ret]
#Calculating Levered K
sd_vw<-sd(CRSP_combined[Year>=1929 & (Year<2010 | (Year==2010 & Month<=6))]$Excess_Vw_Ret)
s1<-CRSP_combined[Year>=1929 & (Year<2010 | (Year==2010 & Month<=6))]$Stock_inverse_sigma_hat*CRSP_combined[Year>=1929 & (Year<2010 | (Year==2010 & Month<=6))]$Stock_Excess_Vw_Ret
b1<-CRSP_combined[Year>=1929 & (Year<2010 | (Year==2010 & Month<=6))]$Bond_inverse_sigma_hat*CRSP_combined[Year>=1929 & (Year<2010 | (Year==2010 & Month<=6))]$Bond_Excess_Vw_Ret
sd_unlever<-sd(s1+b1,na.rm = TRUE)
K_lever<-sd_vw/sd_unlever
#Find Levered K portfolio returns
CRSP_combined[,Levered_k:=rep(K_lever,nrow(CRSP_combined))]
CRSP_combined[,Excess_levered_RP_Ret := K_lever*Stock_inverse_sigma_hat*Stock_Excess_Vw_Ret+
K_lever*Bond_inverse_sigma_hat*Bond_Excess_Vw_Ret]
#Keep reqiured columns
CRSP_combined[,c("Stock_lag_MV","Bond_lag_MV","Vw_weight"):=NULL]
return(CRSP_combined)
}
#Running Function for output
Port_Rets<-PS2_Q3(Monthly_CRSP_Universe = Monthly_CRSP_Universe)
#Question 4
PS2_Q4 <- function(Port_Rets){
#Restrict data to data range in question
Port_Rets <- Port_Rets[Year>=1929 & (Year<2010 | (Year==2010 & Month<=6))]
answers <- matrix(ncol=6, nrow=6)
row.names(answers) <- c("CRSP Stocks","CRSP Bonds","Value-weighted portfolio","60/40 portfolio","unlevered RP","levered RP")
colnames(answers) <- c("Annualized Mean","t-stat of Annualized Mean","Annualized Standard Deviation","Annualized Sharpe Ratio",
"Skewness","Excess Kurtosis")
answers["CRSP Stocks","Annualized Mean"] <- mean(Port_Rets$Stock_Excess_Vw_Ret,na.rm = TRUE)*12
answers["CRSP Stocks","Annualized Standard Deviation"] <- sd(Port_Rets$Stock_Excess_Vw_Ret,na.rm = TRUE)*sqrt(12)
answers["CRSP Stocks","Annualized Sharpe Ratio"] <- answers["CRSP Stocks","Annualized Mean"]/answers["CRSP Stocks","Annualized Standard Deviation"]
answers["CRSP Stocks","Skewness"] <- skewness(Port_Rets$Stock_Excess_Vw_Ret,na.rm = TRUE)
answers["CRSP Stocks","Excess Kurtosis"] <- kurtosis(Port_Rets$Stock_Excess_Vw_Ret,na.rm = TRUE)-3
answers["CRSP Stocks","t-stat of Annualized Mean"] <- t.test(Port_Rets$Stock_Excess_Vw_Ret)$statistic
answers["CRSP Bonds","Annualized Mean"] <- mean(Port_Rets$Bond_Excess_Vw_Ret,na.rm = TRUE)*12
answers["CRSP Bonds","Annualized Standard Deviation"] <- sd(Port_Rets$Bond_Excess_Vw_Ret,na.rm = TRUE)*sqrt(12)
answers["CRSP Bonds","Annualized Sharpe Ratio"] <- answers["CRSP Bonds","Annualized Mean"]/answers["CRSP Bonds","Annualized Standard Deviation"]
answers["CRSP Bonds","Skewness"] <- skewness(Port_Rets$Bond_Excess_Vw_Ret,na.rm = TRUE)
answers["CRSP Bonds","Excess Kurtosis"] <- kurtosis(Port_Rets$Bond_Excess_Vw_Ret,na.rm = TRUE)-3
answers["CRSP Bonds","t-stat of Annualized Mean"] <- t.test(Port_Rets$Bond_Excess_Vw_Ret)$statistic
answers["Value-weighted portfolio","Annualized Mean"] <- mean(Port_Rets$Excess_Vw_Ret,na.rm = TRUE)*12
answers["Value-weighted portfolio","Annualized Standard Deviation"] <- sd(Port_Rets$Excess_Vw_Ret,na.rm = TRUE)*sqrt(12)
answers["Value-weighted portfolio","Annualized Sharpe Ratio"] <-
answers["Value-weighted portfolio","Annualized Mean"]/answers["Value-weighted portfolio","Annualized Standard Deviation"]
answers["Value-weighted portfolio","Skewness"] <- skewness(Port_Rets$Excess_Vw_Ret,na.rm = TRUE)
answers["Value-weighted portfolio","Excess Kurtosis"] <- kurtosis(Port_Rets$Excess_Vw_Ret,na.rm = TRUE)-3
answers["Value-weighted portfolio","t-stat of Annualized Mean"] <- t.test(Port_Rets$Excess_Vw_Ret)$statistic
answers["60/40 portfolio","Annualized Mean"] <- mean(Port_Rets$Excess_60_40_Ret,na.rm = TRUE)*12
answers["60/40 portfolio","Annualized Standard Deviation"] <- sd(Port_Rets$Excess_60_40_Ret,na.rm = TRUE)*sqrt(12)
answers["60/40 portfolio","Annualized Sharpe Ratio"] <- answers["60/40 portfolio","Annualized Mean"]/answers["60/40 portfolio","Annualized Standard Deviation"]
answers["60/40 portfolio","Skewness"] <- skewness(Port_Rets$Excess_60_40_Ret,na.rm = TRUE)
answers["60/40 portfolio","Excess Kurtosis"] <- kurtosis(Port_Rets$Excess_60_40_Ret,na.rm = TRUE)-3
answers["60/40 portfolio","t-stat of Annualized Mean"] <- t.test(Port_Rets$Excess_60_40_Ret)$statistic
answers["unlevered RP","Annualized Mean"] <- mean(Port_Rets$Excess_Unlevered_RP_Ret,na.rm = TRUE)*12
answers["unlevered RP","Annualized Standard Deviation"] <- sd(Port_Rets$Excess_Unlevered_RP_Ret,na.rm = TRUE)*sqrt(12)
answers["unlevered RP","Annualized Sharpe Ratio"] <- answers["unlevered RP","Annualized Mean"]/answers["unlevered RP","Annualized Standard Deviation"]
answers["unlevered RP","Skewness"] <- skewness(Port_Rets$Excess_Unlevered_RP_Ret,na.rm = TRUE)
answers["unlevered RP","Excess Kurtosis"] <- kurtosis(Port_Rets$Excess_Unlevered_RP_Ret,na.rm = TRUE)-3
answers["unlevered RP","t-stat of Annualized Mean"] <- t.test(Port_Rets$Excess_Unlevered_RP_Ret)$statistic
answers["levered RP","Annualized Mean"] <- mean(Port_Rets$Excess_levered_RP_Ret,na.rm = TRUE)*12
answers["levered RP","Annualized Standard Deviation"] <-sd(Port_Rets$Excess_levered_RP_Ret,na.rm = TRUE)*sqrt(12)
answers["levered RP","Annualized Sharpe Ratio"]<-mean(Port_Rets$Excess_levered_RP_Ret,na.rm = TRUE)*1.2/sd(Port_Rets$Excess_levered_RP_Ret,na.rm = TRUE)*sqrt(12)
answers["levered RP","Skewness"]<-skewness(Port_Rets$Excess_levered_RP_Ret,na.rm = TRUE)
answers["levered RP","Excess Kurtosis"]<-kurtosis(Port_Rets$Excess_levered_RP_Ret,na.rm = TRUE)-3
answers["levered RP","t-stat of Annualized Mean"]<-t.test(Port_Rets$Excess_levered_RP_Ret)$statistic
#Formatting
#mean,sd to % and 2 decimals
answers[,"Annualized Mean"] <- round(answers[,"Annualized Mean"],4)*100
answers[,"Annualized Standard Deviation"] <- round(answers[,"Annualized Standard Deviation"],4)*100
answers[,c("Annualized Sharpe Ratio","Skewness","Excess Kurtosis","t-stat of Annualized Mean")] <-
round(answers[,c("Annualized Sharpe Ratio","Skewness","Excess Kurtosis","t-stat of Annualized Mean")],2)
return(answers)
}
Final_PS2_Output<-PS2_Q4(Port_Rets = Port_Rets)
Final_PS2_Output
|
e6243cf7f166ed53ef274fae2c78851f97573a9a
|
b4eaabbe0b3b1eea7589ceff6e0f0f37d3927da0
|
/man/peloton_api.Rd
|
936063f54ddbdde9c6b54748123102bdcecb3322
|
[
"MIT"
] |
permissive
|
bweiher/pelotonR
|
80ba084e0eed53b84b6cfaaea90128e262abe3d8
|
39d5355702bd50d42a9768cf729aca4ff697b304
|
refs/heads/master
| 2022-03-12T19:29:48.177555
| 2021-01-09T01:07:19
| 2021-01-09T01:07:19
| 214,738,068
| 13
| 4
|
NOASSERTION
| 2021-01-09T01:07:21
| 2019-10-13T00:50:09
|
R
|
UTF-8
|
R
| false
| true
| 796
|
rd
|
peloton_api.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/peloton_api.R
\name{peloton_api}
\alias{peloton_api}
\title{Makes a \code{GET} request against one of Peloton's API endpoints}
\usage{
peloton_api(path, print_path = FALSE, ...)
}
\arguments{
\item{path}{API endpoint to query}
\item{print_path}{Show path/endpoint queried}
\item{...}{Additional parameters passed onto methods}
}
\description{
Users need not invoke this method directly and may instead use one of the wrappers around specific endpoints that also vectorizes inputs and processes the data returned, such as \code{\link{get_my_info}}, \code{\link{get_performance_graphs}}, \code{\link{get_all_workouts}}, \code{\link{get_workouts_data}}
}
\examples{
\dontrun{
peloton_auth()
peloton_api("api/me")
}
}
|
e102f24768cc386ee838e348570545c23842374c
|
0ca1dbdb92004d400981e72666c59d5ff890834d
|
/load_data.R
|
43cd54c6f460bb7e66fd81694545751c84d0e75b
|
[] |
no_license
|
carleenxu/ExData_Plotting1
|
e521c1cfc0aa0a9012a1d4c6bc005852124de6c8
|
fc7a9c93bcb44c84b8d35ec9044b6888adf3319b
|
refs/heads/master
| 2021-01-13T06:35:34.400162
| 2017-02-08T12:08:54
| 2017-02-08T12:13:01
| 81,177,505
| 0
| 0
| null | 2017-02-07T07:04:08
| 2017-02-07T07:04:08
| null |
UTF-8
|
R
| false
| false
| 1,215
|
r
|
load_data.R
|
## load dataset in R
## load data from the dates 2007-02-01 and 2007-02-02
## convert the Date and Time variables to Date/Time classes in R using
## the strptime() and as.Date() functions
## in this dataset missing values are coded as ?
load_data <- function() {
## Change working directory
setwd("D:/Study/DS/04_ExploratoryAnalysis/W1")
## Download dataset
if (!file.exists("household_power_consumption.txt")) {
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
temp <- "./temp.zip"
download.file(url,temp)
unzip(temp,exdir = ".")
file.remove(temp)
}
## load dataset in R
library(data.table)
files <- "./household_power_consumption.txt"
data <- read.table(text = grep("^[1,2]/2/2007",readLines(files),value=TRUE),
sep = ';',
na = "?",
col.names = c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3"))
# convert data and time to specific format
data$Date <- as.Date(data$Date, format = '%d/%m/%Y')
data$Time <- as.POSIXct(paste(data$Date, data$Time))
return(data)
}
|
08e3da8f287deb69039f6ce9e894af992bb5d47d
|
e7a58742771bed318f764e7f1d8fc205ba892558
|
/shiny_students/server.R
|
bc2ea74e4133db35132907ede7d9e2d0e3385e9c
|
[] |
no_license
|
GiveMeMoreData/Stack_analysis
|
02afef531c26b17cc6a5b87313222248e0166315
|
90f5433bb1916539ff8128390df09098f276cfb6
|
refs/heads/master
| 2020-06-04T03:13:22.098350
| 2019-06-20T06:56:49
| 2019-06-20T06:56:49
| 191,850,037
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,096
|
r
|
server.R
|
library(shiny)
options(stringsAsFactors = FALSE)
library('dplyr')
library('data.table')
library("stringi")
library("ggplot2")
# tu dane nie zależące od imputu
wd <-"C:\\Users\\Bartek\\Desktop\\pd3\\"
### STUDENTS
## Loading data
#Gaming
shinyServer(function(input, output) {
set <- reactive({
switch (input$data,
"Gaming"="gaming\\",
"Data Science"="datascience\\",
"Music"="music\\"
)
})
plik <- reactive({
switch (input$stage,
"First look"="Resoults_Cor.csv",
"cos"="BestResoults_Cor.csv",
"Final"="BestResoults_2Cor.csv"
)
})
dane <- reactive({
as.data.frame(read.csv(paste0(wd,set(),plik())))
})
output$plot <- renderPlot(
ggplot()+
geom_histogram(aes(dane()[,input$method]),fill="#54aee5",color="black",size=1)+
stat_bin(bins=20)+
ylab("Count")+
xlab(paste0(input$method,"'s rank correlation coefficient"))+
labs(title=paste(input$data,input$stage,input$method,sep = " | "))+
theme(plot.title = element_text(hjust=0.48,size=30,face = "bold"),
axis.title=element_text(hjust=0.48,size=25),
panel.background = element_rect(fill = "#FFFFFF"),
panel.grid.major = element_line(size = 0.5, linetype = 'dashed',
colour = "grey"),
panel.grid.minor = element_line(size = 0.25, linetype = 'dashed',
colour = "grey"),
axis.line = element_line(colour = "grey"),
axis.text = element_text(size=18),
plot.margin = margin(2,2,0.5,2,"cm")
)
)
})
|
1d118c1d2380326d807a6536587943b5df657c6f
|
66fc2b0b0d1e24e676d60f6a2b85dae006fd1136
|
/Section 6 Advanced Visualization with GGPlot2/Histograms and density charts.R
|
35d416f9d2a775554f8d95fbd48b08449459acb8
|
[] |
no_license
|
OmkarGurav6/Udemy-R-For-Data-Science-With-Real-Excercises
|
f8d1365707cde5e6fffd04b1126d9ff3c721f4e1
|
676dc71e576065e79e338fa4765248b821d22c26
|
refs/heads/main
| 2023-02-25T11:20:39.172558
| 2021-01-30T10:53:22
| 2021-01-30T10:53:22
| 334,387,824
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 547
|
r
|
Histograms and density charts.R
|
s <- ggplot(data = movies, aes(x = BudgetMillions))
s + geom_histogram(binwidth = 10, fill="Red")# setting red color
s + geom_histogram(binwidth = 10, aes(fill=Genre), colour= "Black")#color is used to set color of border.
s + geom_density(aes(fill=Genre), position = "stack")
t <- ggplot(data = movies)
t + geom_histogram(binwidth = 10, aes(x= AudienceRating) ,
fill="White", colour="Blue")
t + geom_histogram(binwidth = 10, aes(x= CriticRating) ,
fill="White", colour="Blue")
|
085995f1393ab714ec5f444d7ac27d2af1565ecd
|
e67f3901197c81d982db034d42ddde4f3d3c703f
|
/src/util/libraries.R
|
e27d5a2b6e80fbb11e5ac9e70595ec9bebe45bd7
|
[] |
no_license
|
sneakers-the-rat/openpolicing
|
29d3ab03a3fa9e323660521318da48b15ac515af
|
97f58158eca31c56fd3b35083d7cbf1ac6949b66
|
refs/heads/master
| 2020-05-01T04:42:59.217759
| 2019-03-23T11:46:51
| 2019-03-23T11:46:51
| 177,282,043
| 0
| 0
| null | 2019-03-23T11:44:32
| 2019-03-23T11:44:32
| null |
UTF-8
|
R
| false
| false
| 180
|
r
|
libraries.R
|
library(dplyr)
library(tidyr)
library(readr)
library(parallel)
library(sandwich)
library(rgdal)
library(xtable)
library(lmtest)
library(stringr)
library(ggplot2)
library(lubridate)
|
95984b1440161b66f528c7ee442fe1fb916b275b
|
9cf0fb3cbdfbca9aab68cc6aa0bb571b705d4e03
|
/PraceDomowe/PD_06/pd06_komosinskid/pd06_komosinskid.R
|
d4cf9f2a7594775cbe0a86b92197f278a8057644
|
[] |
no_license
|
vaidasmo/TechnikiWizualizacjiDanych2017
|
65905dc831727870774d90dfdd32212160f9b487
|
d251cf5e5d66f837704c753a092cec7594810dce
|
refs/heads/master
| 2021-05-14T17:58:10.216912
| 2017-12-22T22:07:51
| 2017-12-22T22:07:51
| 116,060,512
| 1
| 0
| null | 2018-01-02T21:48:48
| 2018-01-02T21:48:48
| null |
UTF-8
|
R
| false
| false
| 308
|
r
|
pd06_komosinskid.R
|
# pd06
#mapa kolorow
library(ggplot2)
library(plotly)
library(shiny)
v <- seq(from=0, to=255, by=51)
db <- expand.grid(v,v,v)
names(db) <- c("r", "g", "b")
db$kolor <- rgb(db$r,db$g,db$b, maxColorValue = 255)
p <- plot_ly(data=db, x=~r, y=~g, z=~b, text=~kolor, marker=list(color=~kolor))
p
|
3b6b50346d21fdeceb90122a6fc28e6684b60d07
|
f3c0608636363a56550044a7c346daa919c8fb52
|
/R/build.R
|
6b39bd03479b179616b62a5a85e0ae29fcdd9717
|
[] |
no_license
|
ShirunShen/tri2basis
|
9f442a8fef47cfe920f06e7c1b1cefb2fd5ba4d4
|
2d93bf51217b4c56da4537fa2d3dece1981d51f1
|
refs/heads/master
| 2020-12-28T13:34:37.320216
| 2020-02-10T20:58:31
| 2020-02-10T20:58:31
| 238,351,745
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 488
|
r
|
build.R
|
######################## build
######################## evaluate the matrix for inner product
build=function(d.bu){
result=indices(d.bu)
I.bu=result[,1];J.bu=result[,2];K.bu=result[,3]
m.bu=(d.bu+1)*(d.bu+2)/2
Mat.bu=matrix(0,m.bu,m.bu)
for (j in 1:m.bu){
for(k in 1:m.bu){
Mat.bu[k,j]=choose(I.bu[j]+I.bu[k],I.bu[j])*choose(J.bu[j]+J.bu[k],J.bu[j])*choose(K.bu[j]+K.bu[k],K.bu[j])
}
}
Mat.bu=Mat.bu/(choose(d.bu*2,d.bu)*choose(2*d.bu+2,2))
return(Mat.bu)
}
|
ba1a6b5e943231dd99c2ed1c66098e66430a5a2c
|
5d873a96e5024a1b7f89f676ec7e190607c34337
|
/R/create_chart1.R
|
3b54b71a9f67163242ccbff7df2850c6bb40e2f4
|
[
"MIT"
] |
permissive
|
prcleary/dhis2bulletin
|
5ae47ae15441c793c28aafde50e19540894d2bec
|
f1b418b19da57202ca896fd449d6443b9ccfc2fd
|
refs/heads/master
| 2022-01-24T01:07:42.553336
| 2022-01-05T16:01:04
| 2022-01-05T16:01:04
| 230,428,628
| 0
| 0
| null | 2019-12-27T16:34:31
| 2019-12-27T11:02:44
|
R
|
UTF-8
|
R
| false
| false
| 2,520
|
r
|
create_chart1.R
|
#' Create One Type Of Chart for Bulletin
#'
#' @param datatable placeholder
#' @param plotfilename placeholder
#' @param plotwidth placeholder
#' @param plotnrow placeholder
#' @param plotheight placeholder
#' @param plotxlabel placeholder
#' @param plotylabel placeholder
#' @param plottitle placeholder
#' @param plotsubtitle placeholder
#' @param plottag placeholder
#' @param wraplength placeholder
#'
#' @return
#' @import data.table ggplot2 govstyle stringr
#' @export
#'
#' @examples
#' # Not run:
#' # placeholder
create_chart1 <-
function(datatable,
plotfilename,
plotwidth = 11,
plotnrow = 2,
plotheight = 5,
plotxlabel = 'x',
plotylabel = 'y',
plottitle = '',
plotsubtitle = '',
plottag = '',
wraplength = 35) {
# Check variable names as expected
expected_names <-
c(
'Data',
'Period',
'Organisation unit',
'Value',
'isowk',
'isoyr',
'isoyrwk',
'weekdate',
'Data_wrap'
)
if (length(setdiff(names(datatable), expected_names) > 1))
stop(
'Expected variable names ',
paste0(expected_names, collapse = ', '),
' but got variable names ',
paste0(names(datatable), collapse = ', ')
)
# Aggregate data
figdata <- datatable[, .(N = sum(Value)), .(Data, weekdate, isoyr, isowk)]
# Wrap long strings for axes
figdata[, Data_wrap := str_wrap(Data, width = wraplength)]
# Create chart
figplot <-
ggplot(data = figdata, aes(
x = weekdate,
y = N
)) +
geom_bar(position = 'dodge', stat = 'identity', fill = '#006600') +
facet_wrap(~Data_wrap, nrow = plotnrow) +
geom_text(
data = figdata,
aes(label = N),
size = 3,
position = position_dodge(width = 0.9),
vjust = -0.2
) +
theme_gov() +
scale_color_viridis_d() +
scale_x_date(breaks = figdata$weekdate, labels = paste0('W', figdata$isowk)) +
scale_y_continuous(expand = c(0.1, 0)) +
theme(axis.text.x = element_text(vjust = 0.5), strip.text = element_text(size = 8)) +
labs(
x = plotxlabel,
y = plotylabel,
title = plottitle,
subtitle = plotsubtitle,
tag = plottag
)
# Save chart
ggsave(plot = figplot,
plotfilename,
width = plotwidth,
height = plotheight)
invisible(figplot)
}
|
b9d86e9184bb000e3aebdf80fb256f73e8ef79ca
|
bd23162e4b8c3c779557160a774bffb765adce86
|
/prepare.R
|
cefc9ca52991f8831caa382939664870be07709b
|
[
"MIT"
] |
permissive
|
ktmud/github-life
|
a8ab2ee91c85c2a62a348f6764742dcf1b00c338
|
421e46f9832879bb8c81d8731d3524ef20fc3065
|
refs/heads/master
| 2021-01-19T22:24:48.671526
| 2017-11-11T18:50:26
| 2017-11-11T18:50:26
| 88,812,727
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 130
|
r
|
prepare.R
|
#
# Prepare the repository list for scraping
#
source("scrape.R")
source("include/db.R")
ConsolidateRepoList <- function() {
}
|
3ad9a228ffc0e9409fd5a5b8db25ad968177521e
|
a68fcf7bad70e91af4b398df8bee04b9b0bda82e
|
/S34_S38_phylogenetic_comparative_methods/scripts/resources/slouch/R/model.fit.R
|
0561894e0257879394091e2d9fdc79479138f5ae
|
[] |
no_license
|
hj1994412/teleost_genomes_immune
|
44aac06190125b4dea9533823b33e28fc34d6b67
|
50f1552ebb5f19703b388ba7d5517a3ba800c872
|
refs/heads/master
| 2021-03-06T18:24:10.316076
| 2016-08-27T10:58:39
| 2016-08-27T10:58:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 96,274
|
r
|
model.fit.R
|
`model.fit` <-
function(topology, times, half_life_values, vy_values, response, me.response=NULL, fixed.fact=NULL,fixed.cov=NULL, me.fixed.cov=NULL, mecov.fixed.cov=NULL, random.cov=NULL, me.random.cov=NULL, mecov.random.cov=NULL, intercept="root", ultrametric=TRUE, support=NULL, convergence=NULL)
{
# SET DEFAULTS IF NOT SPECIFIED
if(is.null(support)) support=2;
if(is.null(convergence)) convergence=0.000001;
if(is.null(me.response)) me.response<-diag(rep(0, times=length(response[!is.na(response)]))) else me.response<-diag(me.response[!is.na(me.response)]);
# DETERMINE MODEL STRUCTURE FROM INPUT AND WRITE A SUMMARY TO THE R CONSOLE
if(is.null(fixed.fact) && is.null(fixed.cov) && is.null(random.cov)) model.type <- "IntcptReg";
if(!is.null(fixed.fact) && is.null(fixed.cov) && is.null(random.cov)) model.type <- "ffANOVA";
if(!is.null(fixed.fact) && !is.null(fixed.cov) && is.null(random.cov)) model.type <-"ffANCOVA";
if(!is.null(fixed.fact) && is.null(fixed.cov) && !is.null(random.cov)) model.type <- "mmANCOVA";
if(!is.null(fixed.fact) && !is.null(fixed.cov) && !is.null(random.cov)) model.type <- "mmfANCOVA"; if(is.null(fixed.fact) && is.null(fixed.cov) && !is.null(random.cov)) model.type <- "rReg";
if(is.null(fixed.fact) && !is.null(fixed.cov) && is.null(random.cov)) model.type <- "fReg";
if(is.null(fixed.fact) && !is.null(fixed.cov) && !is.null(random.cov)) model.type <- "mfReg";
# Write type of model to screen
message("")
message("MODEL SUMMARY")
message("")
if(model.type=="IntcptReg")
{
message("You have specified an OU model for a response variable regressed on a grand mean, i.e. one global optima");
if(ultrametric==FALSE)
{
GS_head<-c("Ya", "Theta_Global");
n.par<-2;
}
else
{
GS_head<-("Theta_Global");
n.par<-1;
}
}
else
if(model.type=="ffANOVA" )
{
message("You have specified an OU model for a response variable modeled on optima determined by fixed, categorical predictor variables");
if(is.null(intercept)) GS_head<-c("Ya", levels(as.factor(fixed.fact))) else GS_head<-levels(as.factor(fixed.fact));
}
else
if(model.type=="ffANCOVA")
{
message("You have specified an OU model for a response variable modeled on optima determined by both fixed categorical predictors and an instantaneous scaling with a fixed covariate");
if(is.null(intercept)) GS_head<-c("Ya", levels(as.factor(fixed.fact))) else GS_head<-levels(as.factor(fixed.fact));
}
else
if(model.type=="mmANCOVA")
{
message("You have specified an OU model for a response variable modeled on optima determined by both fixed, categorical factors as well as covariates which themselves randomly evolve (modeled as Brownian-motions)");
if(is.null(intercept)) GS_head<-c("Ya", levels(as.factor(fixed.fact))) else GS_head<-levels(as.factor(fixed.fact));
}
if(model.type=="mmfANCOVA")
{
message("You have specified an OU model for a response variable modeled on optima determined by both fixed, categorical factors as well as covariates which themselves randomly evolve (modeled as Brownian-motions)");
if(is.null(intercept)) GS_head<-c("Ya", levels(as.factor(fixed.fact))) else GS_head<-levels(as.factor(fixed.fact));
}
else
if(model.type=="rReg") message("You have specified an OU model for a response variable modeled on optima that are determined by randomly evolving covariates (modeled as Brownian-motions)")
else
if(model.type=="fReg") message("You have specified an OU model for a response variable modeled on optima that are determined by an instantaneous scaling with fixed covariates")
else
if(model.type=="mfReg") message("You have specified an OU model for a response variable modeled on optima that are determined by both an instantaneous scaling with fixed covariates and randomly evolving covariates (modeled as Brownian-motions)");
message("")
# Summarize dataset, response, predictors, tree height and sample size and write to screen
ms<-list(Dataset=search()[2], Response=deparse(substitute(response)), Fixed.factor=deparse(substitute(fixed.fact)),Fixed.covariates=deparse(substitute(fixed.cov)), Random.covariates=deparse(substitute(random.cov)), Sample.size=length(response[!is.na(response)]), Tree.height=max(times))
ms<-as.matrix(ms)
colnames(ms)<-"Summary"
print(ms)
message("")
message("GRID SEARCH PARAMETER SUPPORT")
message("")
# SPECIFY COMPONENTS THAT ARE COMMON TO ALL MODELS
Y <- response[!is.na(response)];
N <- length(Y);
T <- times[terminal.twigs(topology)];
tia<-tsia(topology, times);
tja<-tsja(topology, times);
term<-terminal.twigs(topology);
pt<-parse.tree(topology, times);
ta<-pt$bt;
tij<-pt$dm;
num.prob<-matrix(data=0, nrow=N, ncol=N) #this matrix is included for cases where species split at the root;
cm2<-matrix(data=0, nrow=N, ncol=N);
gof<-matrix(data=0, nrow=length(half_life_values), ncol=length(vy_values), dimnames=list(half_life_values, vy_values));
h.lives<-matrix(data=0, nrow=length(half_life_values), ncol=length(vy_values))
ln2<-log(2)
half_life_values<-rev(half_life_values)
# EVALUATE IF IT IS A FIXED FACTOR PREDICTOR OR INTERCEPT ONLY MODEL THEN SET UP APPROPRIATE DESIGN AND VARIANCE MATRICES AND ESTIMATE PARAMETERS WITHOUT ITERATED GLS
if(model.type =="IntcptReg" || model.type == "ffANOVA")
{
if(model.type=="IntcptReg") regime.specs<-rep(1, times=length(topology)) else regime.specs<-fixed.fact;
cat(c(" ", "t1/2 ", "Vy ", "Supp ", GS_head), sep=" ");
message(" ");
for(i in 1:length(half_life_values))
{
for(k in 1:length(vy_values))
{
vy <- vy_values[k];
if(half_life_values[i]==0)
{
a<-1000000000000000000000;
V<-diag(rep(vy, times=N)) + me.response;
}
else
{
a <- ln2/half_life_values[i];
V<-((vy)*(1-exp(-2*a*ta))*exp(-a*tij))+me.response;
}
if(model.type=="IntcptReg")
{
if(half_life_values[i]==0 ||a>=1000000000000000000000) X<-matrix(data=1, nrow=N, ncol=1) else
if(ultrametric==TRUE) X<-matrix(data=1, nrow=N, ncol=1)
else
{
X<-matrix(data=0, nrow=N, ncol=2);
X[,1]<-1-exp(-a*T);
X[,2]<-exp(-a*T)
}
}
else
X<-weight.matrix(a, topology,times, N, regime.specs, fixed.cov, intercept)
# GLS estimation of parameters for fixed model
V.inverse<-solve(V)
beta.i<-pseudoinverse(t(X)%*%V.inverse%*%X)%*%(t(X)%*%V.inverse%*%Y)
beta0<-beta.i
eY<-X%*%beta0
resid<-Y-eY
gof[i, k] <- -N/2*log(2*pi)-0.5*log(det(V))-0.5*(t(resid) %*% V.inverse%*%resid);
print(c(half_life_values[i], vy, round(gof[i,k], 4), round(as.numeric(t(beta0)), 4)))
} # end of half-life loop
} # end of vy loop
# Search GOF matrix for best estimates of alpha and vy #
x<-rev(half_life_values)
y<-vy_values
z<-gof;
ml<-max(z);
for(i in 1:length(half_life_values))
{
for(j in 1:length(vy_values))
{
if(gof[i,j]==ml){alpha.est=log(2)/half_life_values[i]; vy.est=vy_values[j]}
}
}
for(i in 1:length(half_life_values))
{
for(j in 1:length(vy_values))
{
if(gof[i,j]<=ml-support) gof[i, j]=ml-support;
}
}
gof=gof-ml
# final GLS estimations for corrected optima using best alpha and vy estimates #
if(alpha.est==Inf) alpha.est<-1000000000000000000000
if(model.type=="IntcptReg")
{
if(alpha.est==Inf || alpha.est>=1000000000000000000000 ) X<-matrix(data=1, nrow=N, ncol=1)
else
if(ultrametric==TRUE) X<-matrix(data=1, nrow=N, ncol=1)
else
{
X<-matrix(data=0, nrow=N, ncol=2);
X[,1]<-1-exp(-alpha.est*T);
X[,2]<-exp(-alpha.est*T)
}
}
else
X<-weight.matrix(alpha.est, topology,times, N, regime.specs, fixed.cov, intercept)
V<-((vy.est)*(1-exp(-2*alpha.est*ta))*exp(-alpha.est*tij)) + me.response;
V.inverse<-solve(V);
beta.i.var<-pseudoinverse(t(X)%*%V.inverse%*%X);
beta.i<-beta.i.var%*%(t(X)%*%V.inverse%*%Y);
gls.beta0<-beta.i;
# code for calculating SSE, SST and r squared
pred.mean <- X%*%gls.beta0
g.mean <- (t(rep(1, times=N))%*%solve(V)%*%Y)/sum(solve(V));
sst <- t(Y-g.mean)%*% solve(V)%*%(Y-g.mean)
sse <-t (Y-pred.mean)%*%solve(V)%*%(Y-pred.mean)
r.squared <- (sst-sse)/sst
} # END OF FIXED PREDICTOR OR INTERCEPT ONLY PARAMETER ESTIMATION
if(model.type =="ffANCOVA" || model.type == "fReg")
{
fixed.pred<-data.frame(fixed.cov);
n.fixed.pred<-length(fixed.pred[1,]);
fixed.pred<-matrix(data=fixed.pred[!is.na(fixed.pred)], ncol=n.fixed.pred);
if(is.null(me.fixed.cov)) me.fixed.pred<-matrix(data=0, nrow=N, ncol=n.fixed.pred) else me.fixed.pred<- matrix(data=me.fixed.cov[!is.na(me.fixed.cov)], ncol=n.fixed.pred);
if(is.null(mecov.fixed.cov)) me.cov<-matrix(data=0, nrow=N, ncol=n.fixed.pred) else me.cov<-matrix(data=me.cov.fixed.cov[!is.na(me.cov.fixed.cov)], ncol=n.fixed.pred);
if(model.type=="fReg")
{
x.ols<-cbind(1, fixed.pred);
beta1<-solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y);
n.fixed<-1
cat(c(" ", "t1/2 ", "Vy ", "Supp ", "Bo", if(is.null(dim(fixed.cov))) deparse(substitute(fixed.cov)) else colnames(fixed.cov)), sep=" ");
message("");
}
if(model.type=="ffANCOVA")
{
regime.specs<-fixed.fact;
n.fixed<-length(levels(as.factor(regime.specs)))
regime.specs<-as.factor(regime.specs)
x.ols<-weight.matrix(1000000000000000000000, topology, times, N, regime.specs, fixed.pred, intercept); beta1<-solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y);
cat(c(" ", "t1/2 ", "Vy ", "Supp ", GS_head, if(is.null(dim(fixed.cov))) deparse(substitute(fixed.cov)) else colnames(fixed.cov)), sep=" ");
message("");
}
for(i in 1:length(half_life_values))
{
for(k in 1:length(vy_values))
{
vy <- vy_values[k];
if(half_life_values[i]==0)
{
a<-1000000000000000000000
V<-diag(rep(vy, times=N)) + me.response + diag(as.numeric(me.fixed.pred%*%(beta1[(n.fixed+1):length(beta1),]*beta1[(n.fixed+1):length(beta1),])))-diag(as.numeric(me.cov%*%(2*beta1[(n.fixed+1):length(beta1),])));
}
else
{
a <- ln2/half_life_values[i];
V<-((vy)*(1-exp(-2*a*ta))*exp(-a*tij))+me.response + diag(as.numeric(me.fixed.pred%*%(beta1[(n.fixed+1):length(beta1),]*beta1[(n.fixed+1):length(beta1),])))-diag(as.numeric(me.cov%*%(2*beta1[(n.fixed+1):length(beta1),])));
}
if(model.type=="fReg") X<-cbind(1, fixed.pred) else X<-weight.matrix(a, topology, times, N, regime.specs, fixed.pred, intercept);
##### iterated GLS
con.count<-0; # Counter for loop break if Beta's dont converge #
repeat
{
if(half_life_values[i]==0)
{
a<-1000000000000000000000
V<-diag(rep(vy, times=N)) + me.response + diag(as.numeric(me.fixed.pred%*%(beta1[(n.fixed+1):length(beta1),]*beta1[(n.fixed+1):length(beta1),])))-diag(as.numeric(me.cov%*%(2*beta1[(n.fixed+1):length(beta1),])));
}
else
{
a <- ln2/half_life_values[i];
V<-((vy)*(1-exp(-2*a*ta))*exp(-a*tij))+me.response + diag(as.numeric(me.fixed.pred%*%(beta1[(n.fixed+1):length(beta1),]*beta1[(n.fixed+1):length(beta1),])))-diag(as.numeric(me.cov%*%(2*beta1[(n.fixed+1):length(beta1),])));
} # END OF If ELSE CONDITION FOR HALF-LIFE 0 OR NOT
if(model.type=="fReg") X<-cbind(1, fixed.pred) else X<-weight.matrix(a, topology, times, N, regime.specs, fixed.pred, intercept);
# INTERMEDIATE ESTIMATION OF OPTIMAL REGRESSION #
V.inverse<-solve(V)
beta.i<-pseudoinverse(t(X)%*%V.inverse%*%X)%*%(t(X)%*%V.inverse%*%Y)
test<-matrix(nrow=(length(beta.i)))
for(f in 1:(length(beta.i)))
{
if(abs(as.numeric(beta.i[f]-beta1[f]))<=convergence) test[f]=0 else test[f]=1
}
if(sum(test)==0) break
con.count=con.count+1
if(con.count >= 50)
{
message("Warning, Beta estimates did not converge after 50 iterations, last estimates printed out")
break
}
beta1<-beta.i
}
eY<-X%*%beta1
resid<-Y-eY
gof[i, k] <- -N/2*log(2*pi)-0.5*log(det(V))-0.5*(t(resid) %*% V.inverse%*%resid);
print(as.numeric(round(cbind(if(a!=0)log(2)/a else 0.00, vy, gof[i,k], t(beta1)), 4)))
### END OF ITERATED GLS
} # end of half-life loop
} # end of vy loop
# Search GOF matrix for best estimates of alpha and vy #
x<-rev(half_life_values)
y<-vy_values
z<-gof;
ml<-max(z);
for(i in 1:length(half_life_values))
{
for(j in 1:length(vy_values))
{
if(gof[i,j]==ml){alpha.est=log(2)/half_life_values[i]; vy.est=vy_values[j]}
}
}
for(i in 1:length(half_life_values))
{
for(j in 1:length(vy_values))
{
if(gof[i,j]<=ml-support) gof[i, j]=ml-support;
}
}
gof=gof-ml
# final GLS estimations for corrected optima using best alpha and vy estimates #
con.count<-0; # Counter for loop break if Beta's dont converge #
repeat
{
if(alpha.est==Inf)
{
a<-1000000000000000000000
V<-diag(rep(vy, times=N)) + me.response + diag(as.numeric(me.fixed.pred%*%(beta1[(n.fixed+1):length(beta1),]*beta1[(n.fixed+1):length(beta1),])))-diag(as.numeric(me.cov%*%(2*beta1[(n.fixed+1):length(beta1),])));
}
else
{
V<-((vy)*(1-exp(-2*alpha.est*ta))*exp(-alpha.est*tij))+me.response + diag(as.numeric(me.fixed.pred%*%(beta1[(n.fixed+1):length(beta1),]*beta1[(n.fixed+1):length(beta1),])))-diag(as.numeric(me.cov%*%(2*beta1[(n.fixed+1):length(beta1),])));
} # END OF If ELSE CONDITION FOR HALF-LIFE 0 OR NOT
if(model.type=="fReg") X<-cbind(1, fixed.pred) else X<-weight.matrix(a, topology, times, N, regime.specs, fixed.pred, intercept);
# INTERMEDIATE ESTIMATION OF OPTIMAL REGRESSION #
V.inverse<-solve(V)
beta.i<-pseudoinverse(t(X)%*%V.inverse%*%X)%*%(t(X)%*%V.inverse%*%Y)
test<-matrix(nrow=(length(beta.i)))
for(f in 1:(length(beta.i)))
{
if(abs(as.numeric(beta.i[f]-beta1[f]))<=convergence) test[f]=0 else test[f]=1
}
if(sum(test)==0) break
con.count=con.count+1
if(con.count >= 50)
{
message("Warning, Beta estimates did not converge after 50 iterations, last estimates printed out")
break
}
beta1<-beta.i
}
gls.beta0<-beta1;
beta.i.var<-pseudoinverse(t(X)%*%V.inverse%*%X);
# code for calculating SSE, SST and r squared
pred.mean <- X%*%gls.beta0
g.mean <- (t(rep(1, times=N))%*%solve(V)%*%Y)/sum(solve(V));
sst <- t(Y-g.mean)%*% solve(V)%*%(Y-g.mean)
sse <-t (Y-pred.mean)%*%solve(V)%*%(Y-pred.mean)
r.squared <- (sst-sse)/sst
} # END OF fReg AND ffANCOVA ESTIMATION ROUTINES must still add iterated GLS for me
# EVALUATE IF IT IS A FIXED MODEL ANCOVA, MIXED MODEL ANCOVA OR RANDOM PREDICTOR REGRESSION, ESTIMATE PARAMETERS WITH ITERATED GLS TO A) TAKE MEASUREMENT VARIANCE INTO ACCOUNT OR B) RANDOM EFFECTS INTO ACCOUNT IN THE CASE OF THE MIXED MODEL AND REGRESSION
if(model.type == "mmANCOVA" || model.type=="rReg") ### more models here
{
# SET UP INITIAL MATRICES FOR MULTIPLE REGRESSION AND CALCULATE THETA AND SIGMA FOR RANDOM PREDICTOR / S
pred<-data.frame(random.cov);
n.pred<-length(pred[1,]);
pred<-matrix(data=pred[!is.na(pred)], ncol=n.pred);
if(is.null(me.random.cov)) me.pred<-matrix(data=0, nrow=N, ncol=n.pred) else me.pred<-matrix(data=me.random.cov[!is.na(me.random.cov)], ncol=n.pred);
if(is.null(mecov.random.cov)) me.cov<-matrix(data=0, nrow=N, ncol=n.pred) else me.cov<-matrix(data=mecov.random.cov[!is.na(mecov.random.cov)], ncol=n.pred);
s.X<-matrix(data=0, ncol=n.pred) # PREDICTOR SIGMA
for(i in 1:n.pred)
{
s.X[,i] <- as.numeric(sigma.X.estimate(pred[,i],me.pred[,i], topology, times)[2]);
}
theta.X<-matrix(data=0, ncol=n.pred) #PREDICTOR THETA
for(i in 1:n.pred)
{
theta.X[,i] <- as.numeric(sigma.X.estimate(pred[,i],me.pred[,i], topology, times)[1]);
}
# END OF RANDOM PREDICTOR THETA AND SIGMA ESTIMATES
## INITIAL OLS ESTIMATES TO SEED ITERATED GLS
if(model.type=="rReg")
{
x.ols<-cbind(1, pred);
beta1<-solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y);
if(ultrametric == FALSE) beta1<-rbind(0, 0, beta1); # 2 additional parameter seeds for Ya and Xa
}
if(model.type=="mmANCOVA")
{
regime.specs<-fixed.fact;
n.fixed<-length(levels(as.factor(regime.specs)))
regime.specs<-as.factor(regime.specs)
x.ols<-cbind(weight.matrix(1000000000000000000000, topology, times, N, regime.specs, fixed.cov, intercept), pred); beta1<-solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y);
}
# GRID ESTIMATION ROUTINE AND ITERATED GLS FOR MODELS THAT INCLUDE RANDOM EFFECTS
if(model.type=="mmANCOVA")
{
cat(c(" ", "t1/2 ", "Vy ", "Supp ", GS_head, if(is.null(dim(random.cov))) deparse(substitute(random.cov)) else colnames(random.cov)), sep=" ");
message(" ");
for(i in 1:length(half_life_values))
{
for(k in 1:length(vy_values))
{
if(half_life_values[i]==0) a<-1000000000000000000000 else a <- ln2/half_life_values[i];
vy <- vy_values[k];
X<-cbind(weight.matrix(a, topology, times, N, regime.specs, fixed.cov, intercept), (1-(1-exp(-a*T))/(a*T))*pred);
if(length(X[1,]) > length(beta1)) {beta1<-as.matrix(c(0, beta1)); n.fixed<-n.fixed+1}
if(length(X[1,])< length(beta1)) {beta1<-solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y);n.fixed<-length(levels(as.factor(regime.specs))); print("The Ya parameter is dropped as its coefficient is too small");}
# CODE FOR ESTIMATING BETA USING ITERATED GLS
con.count<-0; # Counter for loop break if Beta's dont converge #
repeat
{
if(half_life_values[i]==0)
{
X<-cbind(weight.matrix(1000000000000000000000, topology, times, N, regime.specs, fixed.cov, intercept), pred);
V<-diag(rep(vy, times=N))+me.response+diag(as.numeric(me.pred%*%(beta1[(n.fixed+1):length(beta1),]*beta1[(n.fixed+1):length(beta1),])))-diag(as.numeric(me.cov%*%(2*beta1[(n.fixed+1):length(beta1),])));
}
else
{
X<-cbind(weight.matrix(a, topology, times, N, regime.specs, fixed.cov, intercept), (1-(1-exp(-a*T))/(a*T))*pred);
s1<-as.numeric(s.X%*%(beta1[(n.fixed+1):length(beta1),]*beta1[(n.fixed+1):length(beta1),]));
for(p in 1:N)
{
for(q in 1:N)
{
if(ta[q,p]==0)num.prob[q,p]=1 else num.prob[q,p]=(1-exp(-a*ta[q,p]))/(a*ta[q,p]);
}
}
cm1<-(s1/(2*a)+vy)*(1-exp(-2*a*ta))*exp(-a*tij);
for(p in 1:N)
{
for(q in 1:N)
{
cm2[p,q]<-(((1-exp(-a*T[p]))/(a*T[p]))*((1-exp(-a*T[q]))/(a*T[q]))-(exp(-a*tia[p, q])*(1-exp(-a*T[p]))/ (a*T[q])+ exp(-a*tja[p, q])*(1-exp(-a*T[p]))/(a*T[p]))*(num.prob[p,q]));
}
}
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(beta1[(n.fixed+1):length(beta1), ], (1-(1-exp(-a*T))/(a*T)))^2), ncol=n.pred)));
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(2*beta1[(n.fixed+1):length(beta1),], (1-(1-exp(-a*T))/(a*T)))), ncol=n.pred)));
V<-cm1+(s1*ta*cm2)+me.response+mv-mcov
} # END OF If ELSE CONDITION FOR HALF-LIFE 0 OR NOT
# INTERMEDIATE ESTIMATION OF OPTIMAL REGRESSION #
V.inverse<-solve(V)
if(half_life_values[i]==0)
{
beta.i<-pseudoinverse(t(X)%*%V.inverse%*%X)%*%(t(X)%*%V.inverse%*%Y)
test<-matrix(nrow=(length(beta.i)))
for(f in 1:(length(beta.i)))
{
if(abs(as.numeric(beta.i[f]-beta1[f]))<=convergence) test[f]=0 else test[f]=1
}
if(sum(test)==0) break
con.count=con.count+1
if(con.count >= 50)
{
message("Warning, Beta estimates did not converge after 50 iterations, last estimates printed out")
break
}
beta1<-beta.i
}
else
{
beta.i<-pseudoinverse(t(X)%*%V.inverse%*%X)%*%(t(X)%*%V.inverse%*%Y)
test<-matrix(nrow=(length(beta.i)))
for(f in 1:(length(beta.i)))
{
if(abs(as.numeric(beta.i[f]-beta1[f]))<=convergence) test[f]=0 else test[f]=1
}
if(sum(test)==0) break
con.count=con.count+1
if(con.count >= 50)
{
message("Warning, Beta estimates did not converge after 50 iterations, last estimates printed out")
break
}
beta1<-beta.i
}
}
### END OF ITERATED GLS ESTIMATION FOR BETA #
if(half_life_values[i]==0)
{
X<-cbind(weight.matrix(1000000000000000000000, topology, times, N, regime.specs, fixed.cov, intercept), pred)
V<-diag(rep(vy, times=N))+me.response+diag(as.numeric(me.pred%*%(beta1[(n.fixed+1):length(beta1),]*beta1[(n.fixed+1):length(beta1),])))-diag(as.numeric(me.cov%*%(2*beta1[(n.fixed+1):length(beta1),])))
V.inverse<-solve(V)
eY<-X%*%beta1
resid<-Y-eY;
gof[i, k] <- -N/2*log(2*pi)-0.5*log(det(V))-0.5*(t(resid) %*% V.inverse%*%resid);
}
else
{
s1<-as.numeric(s.X%*%(beta1[(n.fixed+1):length(beta1),]*beta1[(n.fixed+1):length(beta1),]))
for(p in 1:N)
{
for(q in 1:N)
{
if(ta[q,p]==0)num.prob[q,p]=1 else num.prob[q,p]=(1-exp(-a*ta[q,p]))/(a*ta[q,p]);
}
}
cm1<-(s1/(2*a)+vy)*(1-exp(-2*a*ta))*exp(-a*tij);
for(p in 1:N)
{
for(q in 1:N)
{
cm2[p,q]<-(((1-exp(-a*T[p]))/(a*T[p]))*((1-exp(-a*T[q]))/(a*T[q]))-(exp(-a*tia[p, q])*(1-exp(-a*T[p]))/(a*T[q])+ exp(-a*tja[p, q])*(1-exp(-a*T[p]))/(a*T[p]))*(num.prob[p,q]));
}
}
X<-cbind(weight.matrix(a, topology, times, N, regime.specs, fixed.cov, intercept), (1-(1-exp(-a*T))/(a*T))*pred);
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(beta1[(n.fixed+1):length(beta1), ], (1-(1-exp(-a*T))/(a*T)))^2), ncol=n.pred)));
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(2*beta1[(n.fixed+1):length(beta1), ], (1-(1-exp(-a*T))/(a*T)))), ncol=n.pred)));
V<-cm1+(s1*ta*cm2)+me.response+mv-mcov;
V.inverse<-solve(V)
eY<-X%*%beta1
resid<-Y-eY;
gof[i, k] <- -N/2*log(2*pi)-0.5*log(det(V))-0.5*(t(resid) %*% V.inverse%*%resid);
} # END OF CONDITION FOR HALF-LIFE = 0 #
print(as.numeric(round(cbind(if(a!=0)log(2)/a else 0.00, vy, gof[i,k], t(beta1)), 4)))
}
}
# END OF GRID SETUP,START OF GRID SEARCH FOR BEST ALPHA AND VY ESTIMATES #
x<-rev(half_life_values)
y<-vy_values
z<-gof;
ml<-max(z);
for(i in 1:length(half_life_values))
{
for(j in 1:length(vy_values))
{
if(gof[i,j]==ml){alpha.est=log(2)/half_life_values[i]; vy.est=vy_values[j]}
}
}
for(i in 1:length(half_life_values))
{
for(j in 1:length(vy_values))
{
if(gof[i,j]<=ml-support)gof[i, j]=ml-support;
}
}
gof=gof-ml
n.fixed<-length(levels(as.factor(regime.specs))) ### reset before final regression
# FINAL OPTIMAL REGRESSION USING BEST ALPHA AND VY ESTIMATES #
if(alpha.est==Inf || alpha.est >=1000000000000000000000)
{
x.ols<-cbind(weight.matrix(1000000000000000000000, topology, times, N, regime.specs, fixed.cov, intercept), pred)
gls.beta1<-solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y)
con.count<-0;
repeat
{
s1<-as.numeric(s.X%*%(gls.beta1[(n.fixed+1):length(gls.beta1),]*gls.beta1[(n.fixed+1):length(gls.beta1),]))
X<-cbind(weight.matrix(1000000000000000000000, topology, times, N, regime.specs, fixed.cov, intercept), pred)
V<-diag(rep(vy, times=N))+me.response+diag(as.numeric(me.pred%*%(gls.beta1[(n.fixed+1):length(gls.beta1),]*gls.beta1[(n.fixed+1):length(gls.beta1),])))-diag(as.numeric(me.cov%*%(2*gls.beta1[(n.fixed+1):length(gls.beta1),])))
V.inverse<-solve(V)
beta.i.var<-ev.beta.i.var<-pseudoinverse(t(X)%*%V.inverse%*%X)
beta.i<-beta.i.var%*%(t(X)%*%V.inverse%*%Y)
test<-matrix(nrow=(length(beta.i)))
for(f in 1:(length(beta.i)))
{
if(abs(as.numeric(beta.i[f]-gls.beta1[f]))<=convergence) test[f]=0 else test[f]=1
}
if(sum(test)==0) break
con.count=con.count+1
if(con.count >= 50)
{
message("Warning, Beta estimates did not converge after 50 iterations, last estimates printed out")
break
}
gls.beta1<-beta.i
}
gls.beta1<-beta.i
X<-cbind(weight.matrix(1000000000000000000000, topology, times, N, regime.specs, fixed.cov, intercept), pred)
V<-diag(rep(vy, times=N))+me.response+diag(as.numeric(me.pred%*%(gls.beta1[(n.fixed+1):length(beta1),]*gls.beta1[(n.fixed+1):length(gls.beta1),])))-diag(as.numeric(me.cov%*%(2*gls.beta1[(n.fixed+1):length(gls.beta1),])))
pred.mean<-X%*%gls.beta1
g.mean<-(t(rep(1, times=N))%*%solve(V)%*%Y)/sum(solve(V));
sst<-t(Y-g.mean)%*% solve(V)%*%(Y-g.mean)
sse<-t(Y-pred.mean)%*%solve(V)%*%(Y-pred.mean)
r.squared<-(sst-sse)/sst
}
else
{
x.ols<-cbind(weight.matrix(1000000000000000000000, topology, times, N, regime.specs, fixed.cov, intercept), pred)
gls.beta1<-solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y)
con.count<-0;
X<-cbind(weight.matrix(alpha.est, topology, times, N, regime.specs, fixed.cov, intercept), (1-(1-exp(-alpha.est*T))/(alpha.est*T))*pred);
if(length(X[1,]) > length(gls.beta1)) {gls.beta1<-as.matrix(c(0, gls.beta1)); n.fixed<-n.fixed+1}
if(length(X[1,])< length(gls.beta1)) {gls.beta1<-solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y);n.fixed<-length(levels(as.factor(regime.specs)))}
repeat
{
X<-cbind(weight.matrix(alpha.est, topology, times, N, regime.specs, fixed.cov, intercept), (1-(1-exp(-alpha.est*T))/(alpha.est*T))*pred);
s1<-as.numeric(s.X%*%(gls.beta1[(n.fixed+1):length(gls.beta1),]*gls.beta1[(n.fixed+1):length(gls.beta1),]))
for(p in 1:N)
{
for(q in 1:N)
{
if(ta[q,p]==0)num.prob[q,p]=1 else num.prob[q,p]=(1-exp(-alpha.est*ta[q,p]))/(alpha.est*ta[q,p])
}
}
cm1<-(s1/(2*alpha.est)+vy.est)*(1-exp(-2*alpha.est*ta))*exp(-alpha.est*tij)
for(p in 1:N)
{
for(q in 1:N)
{
cm2[p,q]<-(((1-exp(-alpha.est*T[p]))/(alpha.est*T[p]))*((1-exp(-alpha.est*T[q]))/(alpha.est*T[q]))-(exp(-alpha.est*tia[p, q])*(1-exp(-alpha.est*T[p]))/(alpha.est*T[q])+ exp(-alpha.est*tja[p, q])*(1-exp(-alpha.est*T[p]))/(alpha.est*T[p]))*(num.prob[p,q]))
}
}
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(gls.beta1[(n.fixed+1):length(gls.beta1), ], (1-(1-exp(-alpha.est*T))/(alpha.est*T)))^2), ncol=n.pred)))
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(gls.beta1[(n.fixed+1):length(gls.beta1),], (1-(1-exp(-alpha.est*T))/(alpha.est*T)))*2), ncol=n.pred)))
V<-cm1+(s1*ta*cm2)+me.response+mv-mcov;
V.inverse<-solve(V)
beta.i.var<-pseudoinverse(t(X)%*%V.inverse%*%X)
beta.i<-beta.i.var%*%(t(X)%*%V.inverse%*%Y)
test<-matrix(nrow=(length(beta.i)))
for(f in 1:(length(beta.i)))
{
if(abs(as.numeric(beta.i[f]-gls.beta1[f]))<=convergence) test[f]=0 else test[f]=1
}
if(sum(test)==0) break
con.count=con.count+1
if(con.count >= 50)
{
message("Warning, Beta estimates did not converge after 50 iterations, last estimates printed out")
break
}
gls.beta1<-beta.i
X<-cbind(weight.matrix(alpha.est, topology, times, N, regime.specs, fixed.cov, intercept), (1-(1-exp(-alpha.est*T))/(alpha.est*T))*pred)
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(gls.beta1[(n.fixed+1):length(gls.beta1), ], (1-(1-exp(-alpha.est*T))/(alpha.est*T)))^2), ncol=n.pred)))
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(gls.beta1[(n.fixed+1):length(gls.beta1), ], (1-(1-exp(-alpha.est*T))/(alpha.est*T)))*2), ncol=n.pred)))
V<-cm1+(s1*ta*cm2)+me.response+mv-mcov;
pred.mean<-X%*%gls.beta1
g.mean<-(t(rep(1, times=N))%*%solve(V)%*%Y)/sum(solve(V));
sst<-t(Y-g.mean)%*% solve(V)%*%(Y-g.mean)
sse<-t(Y-pred.mean)%*%solve(V)%*%(Y-pred.mean)
r.squared<-(sst-sse)/sst
}
}
# END OF ITERATED GLS LOOP #
} # END OF ESTIMATION MIXED MODEL ANCOVA
if(model.type=="rReg")
{
if(ultrametric==TRUE)
{
cat(c(" ", "t1/2 ", "Vy ", "Supp ", "K ", if(is.null(dim(random.cov))) deparse(substitute(random.cov)) else colnames(random.cov)), sep=" ");
}
else
cat(c(" ", "t1/2 ", "Vy ", "Supp ", "Ya ", "Xa " ,"Bo ", if(is.null(dim(random.cov))) deparse(substitute(random.cov)) else colnames(random.cov)), sep=" ");
message(" ");
for(i in 1:length(half_life_values))
{
for(k in 1:length(vy_values))
{
if(half_life_values[i]==0)
{
x.ols<-cbind(1, pred)
beta1<-solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y)
vy <- vy_values[k];
}
else
{
a <- ln2/half_life_values[i];
vy <- vy_values[k];
x.ols<-cbind(1, pred)
if(ultrametric==TRUE)
beta1<-solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y)
else
beta1<-rbind(0, 0, solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y))
}
### CODE FOR ESTIMATING BETA USING ITERATED GLS ###
con.count<-0; # Counter for loop break if Beta's dont converge #
repeat
{
if(half_life_values[i]==0)
{
a<-Inf
s1<-as.numeric(s.X%*%(beta1[2:(n.pred+1),]*beta1[2:(n.pred+1),]))
X<-cbind(1, pred)
V<-diag(rep(vy, times=N))+me.response+diag(as.numeric(me.pred%*%(beta1[2:(n.pred+1),]*beta1[2:(n.pred+1),])))-diag(as.numeric(me.cov%*%(2*beta1[2:(n.pred+1),])))
}
else
{
if(ultrametric==TRUE)
s1<-as.numeric(s.X%*%(beta1[2:(n.pred+1),]*beta1[2:(n.pred+1),]))
else
s1<-as.numeric(s.X%*%(beta1[4:(n.pred+3),]*beta1[4:(n.pred+3),]))
for(p in 1:N)
{
for(q in 1:N)
{
if(ta[q,p]==0)num.prob[q,p]=1 else num.prob[q,p]=(1-exp(-a*ta[q,p]))/(a*ta[q,p])
}
}
cm1<-(s1/(2*a)+vy)*(1-exp(-2*a*ta))*exp(-a*tij)
for(p in 1:N)
{
for(q in 1:N)
{
cm2[p,q]<-(((1-exp(-a*T[p]))/(a*T[p]))*((1-exp(-a*T[q]))/(a*T[q]))-(exp(-a*tia[p, q])*(1-exp(-a*T[p]))/(a*T[q])+ exp(-a*tja[p, q])*(1-exp(-a*T[p]))/(a*T[p]))*(num.prob[p,q]))
}
}
if(ultrametric==TRUE)
{
X<-cbind(1, (1-(1-exp(-a*T))/(a*T))*pred)
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(beta1[2:(n.pred+1), ], (1-(1-exp(-a*T))/(a*T)))^2), ncol=n.pred)))
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(2*beta1[2:(n.pred+1),], (1-(1-exp(-a*T))/(a*T)))), ncol=n.pred)))
V<-cm1+(s1*ta*cm2)+me.response+mv-mcov
}
else
{
nu.X<-cbind(1-exp(-a*T), 1-exp(-a*T)-(1-(1-exp(-a*T))/(a*T)), exp(-a*T), (1-(1-exp(-a*T))/(a*T))*pred)
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(beta1[4:(n.pred+3), ], (1-(1-exp(-a*T))/(a*T)))^2), ncol=n.pred)))
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(2*beta1[4:(n.pred+3),], (1-(1-exp(-a*T))/(a*T)))), ncol=n.pred)))
V<-cm1+(s1*ta*cm2)+me.response+mv-mcov
}
} # END OF ELSE CONDITION FOR HALF-LIFE = 0
# INTERMEDIATE ESTIMATION OF OPTIMAL REGRESSION #
V.inverse<-solve(V)
if(half_life_values[i]==0)
{
beta.i<-pseudoinverse(t(X)%*%V.inverse%*%X)%*%(t(X)%*%V.inverse%*%Y)
test<-matrix(nrow=(n.pred+1))
for(f in 1:(n.pred+1))
{
if(abs(as.numeric(beta.i[f]-beta1[f]))<=convergence) test[f]=0 else test[f]=1
}
if(sum(test)==0) break
con.count=con.count+1
if(con.count >= 50)
{
message("Warning, Beta estimates did not converge after 50 iterations, last estimates printed out")
break
}
beta1<-beta.i
}
else
{
if(ultrametric==TRUE)
{
beta.i<-pseudoinverse(t(X)%*%V.inverse%*%X)%*%(t(X)%*%V.inverse%*%Y)
test<-matrix(nrow=(n.pred+1))
for(f in 1:(n.pred+1))
{
if(abs(as.numeric(beta.i[f]-beta1[f]))<=convergence) test[f]=0 else test[f]=1
}
if(sum(test)==0) break
con.count=con.count+1
if(con.count >= 50)
{
message("Warning, Beta estimates did not converge after 50 iterations, last estimates printed out")
break
}
beta1<-beta.i
}
else
{
beta.i<-pseudoinverse(t(nu.X)%*%V.inverse%*%nu.X)%*%(t(nu.X)%*%V.inverse%*%Y)
test<-matrix(nrow=(n.pred))
for(f in 4:(n.pred+3))
{
if(abs(as.numeric(beta.i[f]-beta1[f]))<=convergence) test[(f-3)]=0 else test[(f-3)]=1
}
if(sum(test)==0) break
con.count=con.count+1
if(con.count >= 50)
{
message("Warning, Beta estimates did not converge after 50 iterations, last estimates printed out")
break
}
beta1<-beta.i
}
} # END OF HALF-LIFE = 0 CONDITION #
} # END OF ITERATED GLS REPEAT LOOP #
beta1<-beta.i
### END OF ITERATED GLS ESTIMATION FOR BETA #
if(half_life_values[i]==0)
{
s1<-as.numeric(s.X%*%(beta1[2:(n.pred+1),]*beta1[2:(n.pred+1),]))
X<-cbind(1, pred)
V<-diag(rep(vy, times=N))+me.response+diag(as.numeric(me.pred%*%(beta1[2:(n.pred+1),]*beta1[2:(n.pred+1),])))-diag(as.numeric(me.cov%*%(2*beta1[2:(n.pred+1),])))
V.inverse<-solve(V)
eY<-X%*%beta1
resid<-Y-eY;
gof[i, k] <- -N/2*log(2*pi)-0.5*log(det(V))-0.5*(t(resid) %*% V.inverse%*%resid);
}
else
{
if(ultrametric==TRUE)
s1<-as.numeric(s.X%*%(beta1[2:(n.pred+1),]*beta1[2:(n.pred+1),]))
else
s1<-as.numeric(s.X%*%(beta1[4:(n.pred+3),]*beta1[4:(n.pred+3),]))
for(p in 1:N)
{
for(q in 1:N)
{
if(ta[q,p]==0)num.prob[q,p]=1 else num.prob[q,p]=(1-exp(-a*ta[q,p]))/(a*ta[q,p]);
}
}
cm1<-(s1/(2*a)+vy)*(1-exp(-2*a*ta))*exp(-a*tij);
for(p in 1:N)
{
for(q in 1:N)
{
cm2[p,q]<-(((1-exp(-a*T[p]))/(a*T[p]))*((1-exp(-a*T[q]))/(a*T[q]))-(exp(-a*tia[p, q])*(1-exp(-a*T[p]))/(a*T[q])+ exp(-a*tja[p, q])*(1-exp(-a*T[p]))/(a*T[p]))*(num.prob[p,q]));
}
}
if(ultrametric==TRUE)
{
X<-cbind(1, (1-(1-exp(-a*T))/(a*T))*pred)
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(beta1[2:(n.pred+1), ], (1-(1-exp(-a*T))/(a*T)))^2), ncol=n.pred)))
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(2*beta1[2:(n.pred+1),], (1-(1-exp(-a*T))/(a*T)))), ncol=n.pred)))
V<-cm1+(s1*ta*cm2)+me.response+mv-mcov;
}
else
{
nu.X<-cbind(1-exp(-a*T), 1-exp(-a*T)-(1-(1-exp(-a*T))/(a*T)), exp(-a*T), (1-(1-exp(-a*T))/(a*T))*pred)
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(beta1[4:(n.pred+3), ], (1-(1-exp(-a*T))/(a*T)))^2), ncol=n.pred)))
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(2*beta1[4:(n.pred+3),], (1-(1-exp(-a*T))/(a*T)))), ncol=n.pred)))
V<-cm1+(s1*ta*cm2)+me.response+mv-mcov
}
V.inverse<-solve(V)
if(ultrametric==TRUE)
eY<-X%*%beta1
else
eY<-nu.X%*%beta1
resid<-Y-eY;
gof[i, k] <- -N/2*log(2*pi)-0.5*log(det(V))-0.5*(t(resid) %*% V.inverse%*%resid);
} # END OF CONDITION FOR HALF-LIFE = 0 #
print(as.numeric(round(cbind(if(a!=0)log(2)/a else 0.00, vy, gof[i,k], t(beta1)), 4)))
}
}
x<-rev(half_life_values)
y<-vy_values
z<-gof;
ml<-max(z);
for(i in 1:length(half_life_values))
{
for(j in 1:length(vy_values))
{
if(gof[i,j]==ml){alpha.est=log(2)/half_life_values[i]; vy.est=vy_values[j]}
}
}
for(i in 1:length(half_life_values))
{
for(j in 1:length(vy_values))
{
if(gof[i,j]<=ml-support)gof[i, j]=ml-support;
}
}
gof=gof-ml
# FINAL OPTIMAL REGRESSION USING BEST ALPHA AND VY ESTIMATES #
if(alpha.est==Inf)
{
gls.beta1<-glsyx.beta1<- solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y)
con.count<-0 # counter to break loop in the event of non-convergence
repeat
{
s1<-as.numeric(s.X%*%(gls.beta1[2:(n.pred+1),]*gls.beta1[2:(n.pred+1),]))
X<-cbind(1, pred)
V<-diag(rep(vy, times=N))+me.response+diag(as.numeric(me.pred%*%(gls.beta1[2:(n.pred+1),]*gls.beta1[2:(n.pred+1),])))-diag(as.numeric(me.cov%*%(2*gls.beta1[2:length(gls.beta1),])))
V.inverse<-solve(V)
beta.i.var<-ev.beta.i.var<-pseudoinverse(t(X)%*%V.inverse%*%X)
beta.i<-beta.i.var%*%(t(X)%*%V.inverse%*%Y)
test<-matrix(nrow=(n.pred+1))
for(f in 1:(n.pred+1))
{
if(abs(as.numeric(beta.i[f]-gls.beta1[f]))<=convergence) test[f]=0 else test[f]=1
}
if(sum(test)==0) break
con.count=con.count+1
if(con.count >= 50)
{
message("Warning, Beta estimates did not converge after 50 iterations, last estimates printed out")
break
}
gls.beta1<-glsyx.beta1<-beta.i
}
gls.beta1<-glsyx.beta1<-beta.i
X<-cbind(1, pred)
V<-diag(rep(vy, times=N))+me.response+diag(as.numeric(me.pred%*%(gls.beta1[2:(n.pred+1),]*gls.beta1[2:(n.pred+1),])))-diag(as.numeric(me.cov%*%(2*gls.beta1[2:length(gls.beta1),])))
pred.mean<-X%*%gls.beta1
g.mean<-(t(rep(1, times=N))%*%solve(V)%*%Y)/sum(solve(V));
sst<-t(Y-g.mean)%*% solve(V)%*%(Y-g.mean)
sse<-t(Y-pred.mean)%*%solve(V)%*%(Y-pred.mean)
r.squared<-(sst-sse)/sst
}
else
{
if(ultrametric==TRUE)
gls.beta1<-solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y)
else
gls.beta1<-rbind(0, 0, solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y));
con.count<-0;
repeat
{
if(ultrametric==TRUE)
s1<-as.numeric(s.X%*%(gls.beta1[2:(n.pred+1),]*gls.beta1[2:(n.pred+1),]))
else
s1<-as.numeric(s.X%*%(gls.beta1[4:(n.pred+3),]*gls.beta1[4:(n.pred+3),]))
for(p in 1:N)
{
for(q in 1:N)
{
if(ta[q,p]==0)num.prob[q,p]=1 else num.prob[q,p]=(1-exp(-alpha.est*ta[q,p]))/(alpha.est*ta[q,p])
}
}
cm1<-(s1/(2*alpha.est)+vy.est)*(1-exp(-2*alpha.est*ta))*exp(-alpha.est*tij)
for(p in 1:N)
{
for(q in 1:N)
{
cm2[p,q]<-(((1-exp(-alpha.est*T[p]))/(alpha.est*T[p]))*((1-exp(-alpha.est*T[q]))/(alpha.est*T[q]))-(exp(-alpha.est*tia[p, q])*(1-exp(-alpha.est*T[p]))/(alpha.est*T[q])+ exp(-alpha.est*tja[p, q])*(1-exp(-alpha.est*T[p]))/(alpha.est*T[p]))*(num.prob[p,q]))
}
}
if(ultrametric==TRUE)
{
X<-cbind(1, (1-(1-exp(-alpha.est*T))/(alpha.est*T))*pred)
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(gls.beta1[2:(n.pred+1), ], (1-(1-exp(-alpha.est*T))/(alpha.est*T)))^2), ncol=n.pred)))
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(2*gls.beta1[2:length(gls.beta1),], (1-(1-exp(-alpha.est*T))/(alpha.est*T)))), ncol=n.pred)))
V<-cm1+(s1*ta*cm2)+me.response+mv-mcov;
}
else
{
nu.X<-cbind(1-exp(-alpha.est*T), 1-exp(-alpha.est*T)-(1-(1-exp(-alpha.est*T))/(alpha.est*T)), exp(-alpha.est*T), (1-(1-exp(-alpha.est*T))/(alpha.est*T))*pred)
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(gls.beta1[4:(n.pred+3), ], (1-(1-exp(-alpha.est*T))/(alpha.est*T)))^2), ncol=n.pred)))
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(2*gls.beta1[4:length(gls.beta1),], (1-(1-exp(-a*T))/(a*T)))), ncol=n.pred)));
V<-cm1+(s1*ta*cm2)+me.response+mv-mcov
}
V.inverse<-solve(V)
if(ultrametric==TRUE)
{
beta.i.var<-pseudoinverse(t(X)%*%V.inverse%*%X)
beta.i<-beta.i.var%*%(t(X)%*%V.inverse%*%Y)
test<-matrix(nrow=(n.pred+1))
for(f in 1:(n.pred+1))
{
if(abs(as.numeric(beta.i[f]-gls.beta1[f]))<=convergence) test[f]=0 else test[f]=1
}
if(sum(test)==0) break
con.count=con.count+1
if(con.count >= 50)
{
message("Warning, Beta estimates did not converge after 50 iterations, last estimates printed out")
break
}
gls.beta1<-beta.i
}
else
{
beta.i.var<-pseudoinverse(t(nu.X)%*%V.inverse%*%nu.X)
beta.i<-beta.i.var%*%(t(nu.X)%*%V.inverse%*%Y)
test<-matrix(nrow=(n.pred))
for(f in 4:(n.pred+3))
{
if(abs(as.numeric(beta.i[f]-beta1[f]))<=convergence) test[(f-3)]=0 else test[(f-3)]=1
}
if(sum(test)==0) break
con.count=con.count+1
if(con.count >= 50)
{
message("Warning, Beta estimates did not converge after 50 iterations, last estimates printed out")
break
}
beta1<-beta.i
}
}
# END OF ITERATED GLS LOOP #
# CODE FOR SST, SSE AND R-SQUARED #
if(ultrametric==TRUE)
gls.beta1<-beta.i
else
{
gls.beta1<-beta.i
ind.par<-matrix(data=0, nrow=N, ncol=4, dimnames=list(NULL, c("Bo", "Bi.Xia", "Yo", "Sum")))
ind.par[,1]<-beta.i[1]*nu.X[,1]
ind.par[,2]<-(beta.i[2]*nu.X[,2])
ind.par[,3]<-beta.i[3]*nu.X[,3]
ind.par[,4]<-ind.par[,1]+ind.par[,2]+ind.par[,3]
mean.Bo=mean(ind.par[,4])
}
if(ultrametric==TRUE)
{
X<-cbind(1, (1-(1-exp(-alpha.est*T))/(alpha.est*T))*pred)
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(gls.beta1[2:(n.pred+1), ], (1-(1-exp(-alpha.est*T))/(alpha.est*T)))^2), ncol=n.pred)))
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(2*gls.beta1[2:length(gls.beta1),], (1-(1-exp(-alpha.est*T))/(alpha.est*T)))), ncol=n.pred)))
V<-cm1+(s1*ta*cm2)+me.response+mv-mcov;
pred.mean<-X%*%gls.beta1
}
else
{
nu.X<-cbind(1-exp(-alpha.est*T), 1-exp(-alpha.est*T)-(1-(1-exp(-alpha.est*T))/(alpha.est*T)), exp(-alpha.est*T), (1-(1-exp(-alpha.est*T))/(alpha.est*T))*pred)
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(gls.beta1[4:(n.pred+3), ], (1-(1-exp(-alpha.est*T))/(alpha.est*T)))^2), ncol=n.pred)))
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(2*gls.beta1[4:length(gls.beta1),], (1-(1-exp(-alpha.est*T))/(alpha.est*T)))), ncol=n.pred)))
V<-cm1+(s1*ta*cm2)+me.response+mv-mcov
pred.mean<-nu.X%*%gls.beta1
}
g.mean<-(t(rep(1, times=N))%*%solve(V)%*%Y)/sum(solve(V));
sst<-t(Y-g.mean)%*% solve(V)%*%(Y-g.mean)
sse<-t(Y-pred.mean)%*%solve(V)%*%(Y-pred.mean)
r.squared<-(sst-sse)/sst
# FINAL EVOLUTIONARY REGRESSION USING BEST ALPHA AND VY ESTIMATES AND KNOWN VARIANCE MATRIX #
if(ultrametric==TRUE) s1<-as.numeric(s.X%*%(gls.beta1[2:(n.pred+1),]*gls.beta1[2:(n.pred+1),]))
else s1<-as.numeric(s.X%*%(gls.beta1[4:(n.pred+3),]*gls.beta1[4:(n.pred+3),]));
for(p in 1:N)
{
for(q in 1:N)
{
if(ta[q,p]==0)num.prob[q,p]=1 else num.prob[q,p]=(1-exp(-alpha.est*ta[q,p]))/(alpha.est*ta[q,p])
}
}
cm1<-(s1/(2*alpha.est)+vy.est)*(1-exp(-2*alpha.est*ta))*exp(-alpha.est*tij)
for(p in 1:N)
{
for(q in 1:N)
{
cm2[p,q]<-(((1-exp(-alpha.est*T[p]))/(alpha.est*T[p]))*((1-exp(-alpha.est*T[q]))/(alpha.est*T[q]))-(exp(-alpha.est*tia[p, q])*(1-exp(-alpha.est*T[p]))/(alpha.est*T[q])+ exp(-alpha.est*tja[p, q])*(1-exp(-alpha.est*T[p]))/(alpha.est*T[p]))*(num.prob[p,q]))
}
}
if(ultrametric==TRUE)
V<-cm1+(s1*ta*cm2)+me.response+diag(as.numeric(me.pred%*%(gls.beta1[2:(n.pred+1),]*gls.beta1[2:(n.pred+1),])))-diag(as.numeric(me.cov%*%(2*gls.beta1[2:length(gls.beta1),])))
else
V<-cm1+(s1*ta*cm2)+me.response+diag(as.numeric(me.pred%*%(gls.beta1[4:(n.pred+3),]*gls.beta1[4:(n.pred+3),])))-diag(as.numeric(me.cov%*%(2*gls.beta1[4:length(gls.beta1),])));
X1<-cbind(1, pred)
V.inverse<-solve(V)
ev.beta.i.var<-pseudoinverse(t(X1)%*%V.inverse%*%X1)
ev.beta.i<-ev.beta.i.var%*%(t(X1)%*%V.inverse%*%Y)
glsyx.beta1<-ev.beta.i
} # END OF HALFLIFE 0 CONDITION #
} # END OF RANDOM COVARIATE REGRESSION ESTIMATION
}# END OF FIXED COVARIATE, MIXED OR RANDOM MODELS PARAMETER ESTIMATION
# EVALUATE IF IT IS A FIXED AND RANDOM COVARIATE ANCOVA OR REGRESSION MODEL ESTIMATE PARAMETERS WITH ITERATED GLS TO A) TAKE MEASUREMENT VARIANCE INTO ACCOUNT OR B) RANDOM EFFECTS INTO ACCOUNT IN THE CASE OF THE MIXED MODEL AND REGRESSION
if(model.type == "mmfANCOVA" || model.type=="mfReg")
{
# SET UP INITIAL MATRICES FOR MULTIPLE REGRESSION AND CALCULATE THETA AND SIGMA FOR RANDOM PREDICTOR / S
pred<-data.frame(random.cov);
n.pred<-length(pred[1,]);
pred<-matrix(data=pred[!is.na(pred)], ncol=n.pred);
if(is.null(me.random.cov)) me.pred<-matrix(data=0, nrow=N, ncol=n.pred) else me.pred<-matrix(data=me.random.cov[!is.na(me.random.cov)], ncol=n.pred);
if(is.null(mecov.random.cov)) me.cov<-matrix(data=0, nrow=N, ncol=n.pred) else me.cov<-matrix(data=mecov.random.cov[!is.na(mecov.random.cov)], ncol=n.pred);
s.X<-matrix(data=0, ncol=n.pred) # PREDICTOR SIGMA
for(i in 1:n.pred)
{
s.X[,i] <- as.numeric(sigma.X.estimate(pred[,i],me.pred[,i], topology, times)[2]);
}
theta.X<-matrix(data=0, ncol=n.pred) #PREDICTOR THETA
for(i in 1:n.pred)
{
theta.X[,i] <- as.numeric(sigma.X.estimate(pred[,i],me.pred[,i], topology, times)[1]);
}
# END OF RANDOM PREDICTOR THETA AND SIGMA ESTIMATES
# FIXED COVARIATES
fixed.pred<-data.frame(fixed.cov);
n.fixed.pred<-length(fixed.pred[1,]);
fixed.pred<-matrix(data=fixed.pred[!is.na(fixed.pred)], ncol=n.fixed.pred);
if(is.null(me.fixed.cov)) me.fixed.pred<-matrix(data=0, nrow=N, ncol=n.fixed.pred) else me.fixed.pred<- matrix(data=me.fixed.cov[!is.na(me.fixed.cov)], ncol=n.fixed.pred);
if(is.null(mecov.fixed.cov)) me.fixed.cov<-matrix(data=0, nrow=N, ncol=n.fixed.pred) else me.fixed.cov<-matrix(data=me.cov.fixed.cov[!is.na(me.cov.fixed.cov)], ncol=n.fixed.pred);
## INITIAL OLS ESTIMATES TO SEED ITERATED GLS
if(model.type=="mfReg")
{
x.ols<-cbind(1, fixed.pred, pred);
beta1<-solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y);
if(ultrametric == FALSE) beta1<-rbind(0, 0, beta1); # 2 additional parameter seeds for Ya and Xa
}
if(model.type=="mmfANCOVA")
{
regime.specs<-fixed.fact;
n.fixed<-length(levels(as.factor(regime.specs)))
regime.specs<-as.factor(regime.specs)
x.ols<-cbind(weight.matrix(1000000000000000000000, topology, times, N, regime.specs, fixed.pred, intercept), pred); beta1<-solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y);
}
# GRID ESTIMATION ROUTINE AND ITERATED GLS FOR MODELS THAT INCLUDE RANDOM EFFECTS
if(model.type=="mmfANCOVA")
{
cat(c(" ", "t1/2 ", "Vy ", "Supp ", GS_head, if(is.null(dim(fixed.cov))) deparse(substitute(fixed.cov)) else colnames(fixed.cov), if(is.null(dim(random.cov))) deparse(substitute(random.cov)) else colnames(random.cov)), sep=" ");
message(" ");
for(i in 1:length(half_life_values))
{
for(k in 1:length(vy_values))
{
if(half_life_values[i]==0) a<-1000000000000000000000 else a <- ln2/half_life_values[i];
vy <- vy_values[k];
X<-cbind(weight.matrix(a, topology, times, N, regime.specs, fixed.pred, intercept), (1-(1-exp(-a*T))/(a*T))*pred);
if(length(X[1,]) > length(beta1)) {beta1<-as.matrix(c(0, beta1)); n.fixed<-n.fixed+1}
if(length(X[1,])< length(beta1)) {beta1<-solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y);n.fixed<-length(levels(as.factor(regime.specs))); print("The Ya parameter is dropped as its coefficient is too small");}
# CODE FOR ESTIMATING BETA USING ITERATED GLS
con.count<-0; # Counter for loop break if Beta's dont converge #
repeat
{
if(half_life_values[i]==0)
{
X<-cbind(weight.matrix(1000000000000000000000, topology, times, N, regime.specs, fixed.pred, intercept), pred);
V<-diag(rep(vy, times=N))+me.response+diag(as.numeric(me.pred%*%(beta1[(n.fixed+1+n.fixed.pred):length(beta1),]*beta1[(n.fixed+1+n.fixed.pred):length(beta1),])))-diag(as.numeric(me.cov%*%(2*beta1[(n.fixed+1+n.fixed.pred):length(beta1),]))) + diag(as.numeric(me.fixed.pred%*%(beta1[(n.fixed+1):(length(beta1)-n.pred),]*beta1[(n.fixed+1):(length(beta1)-n.pred),])))-diag(as.numeric(me.fixed.cov%*%(2*beta1[(n.fixed+1):(length(beta1)-n.pred),])));
}
else
{
X<-cbind(weight.matrix(a, topology, times, N, regime.specs, fixed.pred, intercept), (1-(1-exp(-a*T))/(a*T))*pred);
s1<-as.numeric(s.X%*%(beta1[(n.fixed+1+n.fixed.pred):length(beta1),]*beta1[(n.fixed+1+n.fixed.pred):length(beta1),]));
for(p in 1:N)
{
for(q in 1:N)
{
if(ta[q,p]==0)num.prob[q,p]=1 else num.prob[q,p]=(1-exp(-a*ta[q,p]))/(a*ta[q,p]);
}
}
cm1<-(s1/(2*a)+vy)*(1-exp(-2*a*ta))*exp(-a*tij);
for(p in 1:N)
{
for(q in 1:N)
{
cm2[p,q]<-(((1-exp(-a*T[p]))/(a*T[p]))*((1-exp(-a*T[q]))/(a*T[q]))-(exp(-a*tia[p, q])*(1-exp(-a*T[p]))/ (a*T[q])+ exp(-a*tja[p, q])*(1-exp(-a*T[p]))/(a*T[p]))*(num.prob[p,q]));
}
}
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(beta1[(n.fixed+1+n.fixed.pred):length(beta1), ], (1-(1-exp(-a*T))/(a*T)))^2), ncol=n.pred)));
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(2*beta1[(n.fixed+1+n.fixed.pred):length(beta1),], (1-(1-exp(-a*T))/(a*T)))), ncol=n.pred)));
mv.fixed<-diag(rowSums(matrix(data=as.numeric(me.fixed.pred)*t(kronecker(beta1[(n.fixed+1):(length(beta1)-n.pred), ], rep(1, times=N))), ncol=n.fixed.pred)));
mcov.fixed<-diag(rowSums(matrix(data=as.numeric(me.fixed.cov)*t(kronecker(2*beta1[(n.fixed+1):(length(beta1)-n.pred),], rep(1, times=N))), ncol=n.fixed.pred)));
V<-cm1+(s1*ta*cm2)+me.response+mv+ mv.fixed-mcov-mcov.fixed;
} # END OF If ELSE CONDITION FOR HALF-LIFE 0 OR NOT
# INTERMEDIATE ESTIMATION OF OPTIMAL REGRESSION #
V.inverse<-solve(V)
if(half_life_values[i]==0)
{
beta.i<-pseudoinverse(t(X)%*%V.inverse%*%X)%*%(t(X)%*%V.inverse%*%Y)
test<-matrix(nrow=(length(beta.i)))
for(f in 1:(length(beta.i)))
{
if(abs(as.numeric(beta.i[f]-beta1[f]))<=convergence) test[f]=0 else test[f]=1
}
if(sum(test)==0) break
con.count=con.count+1
if(con.count >= 50)
{
message("Warning, Beta estimates did not converge after 50 iterations, last estimates printed out")
break
}
beta1<-beta.i
}
else
{
beta.i<-pseudoinverse(t(X)%*%V.inverse%*%X)%*%(t(X)%*%V.inverse%*%Y)
test<-matrix(nrow=(length(beta.i)))
for(f in 1:(length(beta.i)))
{
if(abs(as.numeric(beta.i[f]-beta1[f]))<=convergence) test[f]=0 else test[f]=1
}
if(sum(test)==0) break
con.count=con.count+1
if(con.count >= 50)
{
message("Warning, Beta estimates did not converge after 50 iterations, last estimates printed out")
break
}
beta1<-beta.i
}
}
### END OF ITERATED GLS ESTIMATION FOR BETA #
if(half_life_values[i]==0)
{
X<-cbind(weight.matrix(1000000000000000000000, topology, times, N, regime.specs, fixed.pred, intercept), pred)
V<-diag(rep(vy, times=N))+me.response+diag(as.numeric(me.pred%*%(beta1[(n.fixed+1+n.fixed.pred):length(beta1),]*beta1[(n.fixed+1+n.fixed.pred):length(beta1),])))-diag(as.numeric(me.cov%*%(2*beta1[(n.fixed+1+n.fixed.pred):length(beta1),]))) + diag(as.numeric(me.fixed.pred%*%(beta1[(n.fixed+1):(length(beta1)-n.pred),]*beta1[(n.fixed+1):(length(beta1)-n.pred),])))-diag(as.numeric(me.fixed.cov%*%(2*beta1[(n.fixed+1):(length(beta1)-n.pred),])));
V.inverse<-solve(V)
eY<-X%*%beta1
resid<-Y-eY;
gof[i, k] <- -N/2*log(2*pi)-0.5*log(det(V))-0.5*(t(resid) %*% V.inverse%*%resid);
}
else
{
s1<-as.numeric(s.X%*%(beta1[(n.fixed+1+n.fixed.pred):length(beta1),]*beta1[(n.fixed+1+n.fixed.pred):length(beta1),]));
for(p in 1:N)
{
for(q in 1:N)
{
if(ta[q,p]==0)num.prob[q,p]=1 else num.prob[q,p]=(1-exp(-a*ta[q,p]))/(a*ta[q,p]);
}
}
cm1<-(s1/(2*a)+vy)*(1-exp(-2*a*ta))*exp(-a*tij);
for(p in 1:N)
{
for(q in 1:N)
{
cm2[p,q]<-(((1-exp(-a*T[p]))/(a*T[p]))*((1-exp(-a*T[q]))/(a*T[q]))-(exp(-a*tia[p, q])*(1-exp(-a*T[p]))/(a*T[q])+ exp(-a*tja[p, q])*(1-exp(-a*T[p]))/(a*T[p]))*(num.prob[p,q]));
}
}
X<-cbind(weight.matrix(a, topology, times, N, regime.specs, fixed.pred, intercept), (1-(1-exp(-a*T))/(a*T))*pred);
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(beta1[(n.fixed+1+n.fixed.pred):length(beta1), ], (1-(1-exp(-a*T))/(a*T)))^2), ncol=n.pred)));
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(2*beta1[(n.fixed+1+n.fixed.pred):length(beta1),], (1-(1-exp(-a*T))/(a*T)))), ncol=n.pred)));
mv.fixed<-diag(rowSums(matrix(data=as.numeric(me.fixed.pred)*t(kronecker(beta1[(n.fixed+1):(length(beta1)-n.pred), ], rep(1, times=N))), ncol=n.fixed.pred)));
mcov.fixed<-diag(rowSums(matrix(data=as.numeric(me.fixed.cov)*t(kronecker(2*beta1[(n.fixed+1):(length(beta1)-n.pred),], rep(1, times=N))), ncol=n.fixed.pred)));
V<-cm1+(s1*ta*cm2)+me.response+mv+ mv.fixed-mcov-mcov.fixed;
V.inverse<-solve(V)
eY<-X%*%beta1
resid<-Y-eY;
gof[i, k] <- -N/2*log(2*pi)-0.5*log(det(V))-0.5*(t(resid) %*% V.inverse%*%resid);
} # END OF CONDITION FOR HALF-LIFE = 0 #
print(as.numeric(round(cbind(if(a!=0)log(2)/a else 0.00, vy, gof[i,k], t(beta1)), 4)))
}
}
# END OF GRID SETUP,START OF GRID SEARCH FOR BEST ALPHA AND VY ESTIMATES #
x<-rev(half_life_values)
y<-vy_values
z<-gof;
ml<-max(z);
for(i in 1:length(half_life_values))
{
for(j in 1:length(vy_values))
{
if(gof[i,j]==ml){alpha.est=log(2)/half_life_values[i]; vy.est=vy_values[j]}
}
}
for(i in 1:length(half_life_values))
{
for(j in 1:length(vy_values))
{
if(gof[i,j]<=ml-support)gof[i, j]=ml-support;
}
}
gof=gof-ml
n.fixed<-length(levels(as.factor(regime.specs))) ### reset before final regression
# FINAL OPTIMAL REGRESSION USING BEST ALPHA AND VY ESTIMATES #
if(alpha.est==Inf || alpha.est >=1000000000000000000000)
{
x.ols<-cbind(weight.matrix(1000000000000000000000, topology, times, N, regime.specs, fixed.pred, intercept), pred)
gls.beta1<-solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y)
con.count<-0;
repeat
{
s1<-as.numeric(s.X%*%(gls.beta1[(n.fixed+1+n.fixed.pred):length(gls.beta1),]*gls.beta1[(n.fixed+1+n.fixed.pred):length(gls.beta1),]));
X<-cbind(weight.matrix(1000000000000000000000, topology, times, N, regime.specs, fixed.cov, intercept), pred)
V<-diag(rep(vy, times=N))+me.response+diag(as.numeric(me.pred%*%(gls.beta1[(n.fixed+1+n.fixed.pred):length(gls.beta1),]*gls.beta1[(n.fixed+1+n.fixed.pred):length(gls.beta1),])))-diag(as.numeric(me.cov%*%(2*gls.beta1[(n.fixed+1+n.fixed.pred):length(gls.beta1),]))) + diag(as.numeric(me.fixed.pred%*%(gls.beta1[(n.fixed+1):(length(gls.beta1)-n.pred),]*gls.beta1[(n.fixed+1):(length(gls.beta1)-n.pred),])))-diag(as.numeric(me.fixed.cov%*%(2*gls.beta1[(n.fixed+1):(length(gls.beta1)-n.pred),])));
V.inverse<-solve(V)
beta.i.var<-ev.beta.i.var<-pseudoinverse(t(X)%*%V.inverse%*%X)
beta.i<-beta.i.var%*%(t(X)%*%V.inverse%*%Y)
test<-matrix(nrow=(length(beta.i)))
for(f in 1:(length(beta.i)))
{
if(abs(as.numeric(beta.i[f]-gls.beta1[f]))<=convergence) test[f]=0 else test[f]=1
}
if(sum(test)==0) break
con.count=con.count+1
if(con.count >= 50)
{
message("Warning, Beta estimates did not converge after 50 iterations, last estimates printed out")
break
}
gls.beta1<-beta.i
}
gls.beta1<-beta.i
X<-cbind(weight.matrix(1000000000000000000000, topology, times, N, regime.specs, fixed.pred, intercept), pred)
V<-diag(rep(vy, times=N))+me.response+diag(as.numeric(me.pred%*%(gls.beta1[(n.fixed+1+n.fixed.pred):length(gls.beta1),]*gls.beta1[(n.fixed+1+n.fixed.pred):length(gls.beta1),])))-diag(as.numeric(me.cov%*%(2*gls.beta1[(n.fixed+1+n.fixed.pred):length(gls.beta1),]))) + diag(as.numeric(me.fixed.pred%*%(gls.beta1[(n.fixed+1):(length(gls.beta1)-n.pred),]*gls.beta1[(n.fixed+1):(length(gls.beta1)-n.pred),])))-diag(as.numeric(me.fixed.cov%*%(2*gls.beta1[(n.fixed+1):(length(gls.beta1)-n.pred),])));
pred.mean<-X%*%gls.beta1
g.mean<-(t(rep(1, times=N))%*%solve(V)%*%Y)/sum(solve(V));
sst<-t(Y-g.mean)%*% solve(V)%*%(Y-g.mean)
sse<-t(Y-pred.mean)%*%solve(V)%*%(Y-pred.mean)
r.squared<-(sst-sse)/sst
}
else
{
x.ols<-cbind(weight.matrix(1000000000000000000000, topology, times, N, regime.specs, fixed.pred, intercept), pred)
gls.beta1<-solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y)
con.count<-0;
X<-cbind(weight.matrix(alpha.est, topology, times, N, regime.specs, fixed.pred, intercept), (1-(1-exp(-alpha.est*T))/(alpha.est*T))*pred);
if(length(X[1,]) > length(gls.beta1)) {gls.beta1<-as.matrix(c(0, gls.beta1)); n.fixed<-n.fixed+1}
if(length(X[1,])< length(gls.beta1)) {gls.beta1<-solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y);n.fixed<-length(levels(as.factor(regime.specs)))}
repeat
{
X<-cbind(weight.matrix(alpha.est, topology, times, N, regime.specs, fixed.pred, intercept), (1-(1-exp(-alpha.est*T))/(alpha.est*T))*pred);
s1<-as.numeric(s.X%*%(gls.beta1[(n.fixed+1+n.fixed.pred):length(gls.beta1),]*gls.beta1[(n.fixed+1+n.fixed.pred):length(gls.beta1),]));
for(p in 1:N)
{
for(q in 1:N)
{
if(ta[q,p]==0)num.prob[q,p]=1 else num.prob[q,p]=(1-exp(-alpha.est*ta[q,p]))/(alpha.est*ta[q,p])
}
}
cm1<-(s1/(2*alpha.est)+vy.est)*(1-exp(-2*alpha.est*ta))*exp(-alpha.est*tij)
for(p in 1:N)
{
for(q in 1:N)
{
cm2[p,q]<-(((1-exp(-alpha.est*T[p]))/(alpha.est*T[p]))*((1-exp(-alpha.est*T[q]))/(alpha.est*T[q]))-(exp(-alpha.est*tia[p, q])*(1-exp(-alpha.est*T[p]))/(alpha.est*T[q])+ exp(-alpha.est*tja[p, q])*(1-exp(-alpha.est*T[p]))/(alpha.est*T[p]))*(num.prob[p,q]))
}
}
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(gls.beta1[(n.fixed+1+n.fixed.pred):length(gls.beta1), ], (1-(1-exp(-alpha.est*T))/(alpha.est*T)))^2), ncol=n.pred)));
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(2*gls.beta1[(n.fixed+1+n.fixed.pred):length(gls.beta1),], (1-(1-exp(-alpha.est*T))/(alpha.est*T)))), ncol=n.pred)));
mv.fixed<-diag(rowSums(matrix(data=as.numeric(me.fixed.pred)*t(kronecker(gls.beta1[(n.fixed+1):(length(gls.beta1)-n.pred), ], rep(1, times=N))), ncol=n.fixed.pred)));
mcov.fixed<-diag(rowSums(matrix(data=as.numeric(me.fixed.cov)*t(kronecker(2*gls.beta1[(n.fixed+1):(length(gls.beta1)-n.pred),], rep(1, times=N))), ncol=n.fixed.pred)));
V<-cm1+(s1*ta*cm2)+me.response+mv+ mv.fixed-mcov-mcov.fixed;
V.inverse<-solve(V)
beta.i.var<-pseudoinverse(t(X)%*%V.inverse%*%X)
beta.i<-beta.i.var%*%(t(X)%*%V.inverse%*%Y)
test<-matrix(nrow=(length(beta.i)))
for(f in 1:(length(beta.i)))
{
if(abs(as.numeric(beta.i[f]-gls.beta1[f]))<=convergence) test[f]=0 else test[f]=1
}
if(sum(test)==0) break
con.count=con.count+1
if(con.count >= 50)
{
message("Warning, Beta estimates did not converge after 50 iterations, last estimates printed out")
break
}
gls.beta1<-beta.i
X<-cbind(weight.matrix(alpha.est, topology, times, N, regime.specs, fixed.pred, intercept), (1-(1-exp(-alpha.est*T))/(alpha.est*T))*pred)
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(gls.beta1[(n.fixed+1+n.fixed.pred):length(gls.beta1), ], (1-(1-exp(-alpha.est*T))/(alpha.est*T)))^2), ncol=n.pred)));
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(2*gls.beta1[(n.fixed+1+n.fixed.pred):length(gls.beta1),], (1-(1-exp(-alpha.est*T))/(alpha.est*T)))), ncol=n.pred)));
mv.fixed<-diag(rowSums(matrix(data=as.numeric(me.fixed.pred)*t(kronecker(gls.beta1[(n.fixed+1):(length(gls.beta1)-n.pred), ], rep(1, times=N))), ncol=n.fixed.pred)));
mcov.fixed<-diag(rowSums(matrix(data=as.numeric(me.fixed.cov)*t(kronecker(2*gls.beta1[(n.fixed+1):(length(gls.beta1)-n.pred),], rep(1, times=N))), ncol=n.fixed.pred)));
V<-cm1+(s1*ta*cm2)+me.response+mv+ mv.fixed-mcov-mcov.fixed;
pred.mean<-X%*%gls.beta1
g.mean<-(t(rep(1, times=N))%*%solve(V)%*%Y)/sum(solve(V));
sst<-t(Y-g.mean)%*% solve(V)%*%(Y-g.mean)
sse<-t(Y-pred.mean)%*%solve(V)%*%(Y-pred.mean)
r.squared<-(sst-sse)/sst
}
}
# END OF ITERATED GLS LOOP #
} # END OF ESTIMATION MIXED MODEL ANCOVA
if(model.type=="mfReg")
{
if(ultrametric==TRUE)
{
cat(c(" ", "t1/2 ", "Vy ", "Supp ", "K ",if(is.null(dim(fixed.cov))) deparse(substitute(fixed.cov)) else colnames(fixed.cov), if(is.null(dim(random.cov))) deparse(substitute(random.cov)) else colnames(random.cov)), sep=" ");
}
else
cat(c(" ", "t1/2 ", "Vy ", "Supp ", "Ya ", "Xa " ,"Bo ", if(is.null(dim(fixed.cov))) deparse(substitute(fixed.cov)) else colnames(fixed.cov), if(is.null(dim(random.cov))) deparse(substitute(random.cov)) else colnames(random.cov)), sep=" ");
message(" ");
for(i in 1:length(half_life_values))
{
for(k in 1:length(vy_values))
{
if(half_life_values[i]==0)
{
x.ols<-cbind(1, fixed.pred, pred)
beta1<-solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y)
vy <- vy_values[k];
}
else
{
a <- ln2/half_life_values[i];
vy <- vy_values[k];
x.ols<-cbind(1,fixed.pred, pred)
if(ultrametric==TRUE)
beta1<-solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y)
else
beta1<-rbind(0, 0, solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y))
}
### CODE FOR ESTIMATING BETA USING ITERATED GLS ###
con.count<-0; # Counter for loop break if Beta's dont converge #
repeat
{
if(half_life_values[i]==0)
{
a<-Inf
s1<-as.numeric(s.X%*%(beta1[(2+n.fixed.pred):(n.pred+1+n.fixed.pred),]*beta1[(2+n.fixed.pred):(n.pred+1+n.fixed.pred),]))
X<-cbind(1, fixed.pred, pred)
V<-diag(rep(vy, times=N))+me.response+diag(as.numeric(me.pred%*%(beta1[(2+n.fixed.pred):(n.pred+n.fixed.pred+1),]*beta1[(2+n.fixed.pred):(n.pred+n.fixed.pred+1),])))-diag(as.numeric(me.cov%*%(2*beta1[(2+n.fixed.pred):(n.pred+n.fixed.pred+1),]))) + diag(as.numeric(me.fixed.pred%*%(beta1[2:(length(beta1)-n.pred),]*beta1[2:(length(beta1)-n.pred),])))-diag(as.numeric(me.fixed.cov%*%(2*beta1[2:(length(beta1)-n.pred),])));
}
else
{
if(ultrametric==TRUE)
s1<-as.numeric(s.X%*%(beta1[(2+n.fixed.pred):(n.pred+n.fixed.pred+1),]*beta1[(2+n.fixed.pred):(n.pred+n.fixed.pred+1),]))
else
s1<-as.numeric(s.X%*%(beta1[(4+n.fixed.pred):(n.pred+n.fixed.pred+3),]*beta1[(4+n.fixed.pred):(n.pred+n.fixed.pred+3),]))
for(p in 1:N)
{
for(q in 1:N)
{
if(ta[q,p]==0)num.prob[q,p]=1 else num.prob[q,p]=(1-exp(-a*ta[q,p]))/(a*ta[q,p])
}
}
cm1<-(s1/(2*a)+vy)*(1-exp(-2*a*ta))*exp(-a*tij)
for(p in 1:N)
{
for(q in 1:N)
{
cm2[p,q]<-(((1-exp(-a*T[p]))/(a*T[p]))*((1-exp(-a*T[q]))/(a*T[q]))-(exp(-a*tia[p, q])*(1-exp(-a*T[p]))/(a*T[q])+ exp(-a*tja[p, q])*(1-exp(-a*T[p]))/(a*T[p]))*(num.prob[p,q]))
}
}
if(ultrametric==TRUE)
{
X<-cbind(1, fixed.pred, (1-(1-exp(-a*T))/(a*T))*pred)
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(beta1[(2+n.fixed.pred):(n.pred+n.fixed.pred+1), ], (1-(1-exp(-a*T))/(a*T)))^2), ncol=n.pred)))
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(2*beta1[(2+n.fixed.pred):(n.pred+n.fixed.pred+1),], (1-(1-exp(-a*T))/(a*T)))), ncol=n.pred)))
V<-cm1+(s1*ta*cm2)+me.response+mv-mcov + diag(as.numeric(me.fixed.pred%*%(beta1[2:(length(beta1)-n.pred),]*beta1[2:(length(beta1)-n.pred),])))-diag(as.numeric(me.fixed.cov%*%(2*beta1[2:(length(beta1)-n.pred),])));
}
else
{
nu.X<-cbind(1-exp(-a*T), 1-exp(-a*T)-(1-(1-exp(-a*T))/(a*T)), exp(-a*T), fixed.pred, (1-(1-exp(-a*T))/(a*T))*pred)
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(beta1[(4+n.fixed.pred):(n.pred+n.fixed.pred+3), ], (1-(1-exp(-a*T))/(a*T)))^2), ncol=n.pred)))
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(2*beta1[(4+n.fixed.pred):(n.pred+n.fixed.pred+3),], (1-(1-exp(-a*T))/(a*T)))), ncol=n.pred)))
V<-cm1+(s1*ta*cm2)+me.response+mv-mcov + diag(as.numeric(me.fixed.pred%*%(beta1[4:(length(beta1)-n.pred),]*beta1[4:(length(beta1)-n.pred),])))-diag(as.numeric(me.fixed.cov%*%(2*beta1[4:(length(beta1)-n.pred),])));
}
} # END OF ELSE CONDITION FOR HALF-LIFE = 0
# INTERMEDIATE ESTIMATION OF OPTIMAL REGRESSION #
V.inverse<-solve(V)
if(half_life_values[i]==0)
{
beta.i<-pseudoinverse(t(X)%*%V.inverse%*%X)%*%(t(X)%*%V.inverse%*%Y)
test<-matrix(nrow=(n.pred+n.fixed.pred+1))
for(f in 1:(n.pred+n.fixed.pred+1))
{
if(abs(as.numeric(beta.i[f]-beta1[f]))<=convergence) test[f]=0 else test[f]=1
}
if(sum(test)==0) break
con.count=con.count+1
if(con.count >= 50)
{
message("Warning, Beta estimates did not converge after 50 iterations, last estimates printed out")
break
}
beta1<-beta.i
}
else
{
if(ultrametric==TRUE)
{
beta.i<-pseudoinverse(t(X)%*%V.inverse%*%X)%*%(t(X)%*%V.inverse%*%Y)
test<-matrix(nrow=(n.pred+n.fixed.pred+1))
for(f in 1:(n.pred+n.fixed.pred+1))
{
if(abs(as.numeric(beta.i[f]-beta1[f]))<=convergence) test[f]=0 else test[f]=1
}
if(sum(test)==0) break
con.count=con.count+1
if(con.count >= 50)
{
message("Warning, Beta estimates did not converge after 50 iterations, last estimates printed out")
break
}
beta1<-beta.i
}
else
{
beta.i<-pseudoinverse(t(nu.X)%*%V.inverse%*%nu.X)%*%(t(nu.X)%*%V.inverse%*%Y)
test<-matrix(nrow=(n.pred+n.fixed.pred))
for(f in 4:(n.pred+n.fixed.pred+3))
{
if(abs(as.numeric(beta.i[f]-beta1[f]))<=convergence) test[(f-3)]=0 else test[(f-3)]=1
}
if(sum(test)==0) break
con.count=con.count+1
if(con.count >= 50)
{
message("Warning, Beta estimates did not converge after 50 iterations, last estimates printed out")
break
}
beta1<-beta.i
}
} # END OF HALF-LIFE = 0 CONDITION #
} # END OF ITERATED GLS REPEAT LOOP #
beta1<-beta.i
### END OF ITERATED GLS ESTIMATION FOR BETA #
if(half_life_values[i]==0)
{
s1<-as.numeric(s.X%*%(beta1[(2+n.fixed.pred):(n.pred+n.fixed.pred+1),]*beta1[(2+n.fixed.pred):(n.pred+n.fixed.pred+1),]))
X<-cbind(1, fixed.pred,pred)
V<-diag(rep(vy, times=N))+me.response+diag(as.numeric(me.pred%*%(beta1[(2+n.fixed.pred):(n.pred+n.fixed.pred++1),]*beta1[(2+n.fixed.pred):(n.pred+n.fixed.pred+1),])))-diag(as.numeric(me.cov%*%(2*beta1[(2+n.fixed.pred):(n.pred+n.fixed.pred+1),]))) + diag(as.numeric(me.fixed.pred%*%(beta1[2:(length(beta1)-n.pred),]*beta1[2:(length(beta1)-n.pred),])))-diag(as.numeric(me.fixed.cov%*%(2*beta1[2:(length(beta1)-n.pred),])))
V.inverse<-solve(V)
eY<-X%*%beta1
resid<-Y-eY;
gof[i, k] <- -N/2*log(2*pi)-0.5*log(det(V))-0.5*(t(resid) %*% V.inverse%*%resid);
}
else
{
if(ultrametric==TRUE)
s1<-as.numeric(s.X%*%(beta1[(2+n.fixed.pred):(n.pred+1+n.fixed.pred),]*beta1[(2+n.fixed.pred):(n.pred+1+n.fixed.pred),]))
else
s1<-as.numeric(s.X%*%(beta1[(4+n.fixed.pred):(n.pred+3+n.fixed.pred),]*beta1[(4+n.fixed.pred):(n.pred+3+n.fixed.pred),]))
for(p in 1:N)
{
for(q in 1:N)
{
if(ta[q,p]==0)num.prob[q,p]=1 else num.prob[q,p]=(1-exp(-a*ta[q,p]))/(a*ta[q,p]);
}
}
cm1<-(s1/(2*a)+vy)*(1-exp(-2*a*ta))*exp(-a*tij);
for(p in 1:N)
{
for(q in 1:N)
{
cm2[p,q]<-(((1-exp(-a*T[p]))/(a*T[p]))*((1-exp(-a*T[q]))/(a*T[q]))-(exp(-a*tia[p, q])*(1-exp(-a*T[p]))/(a*T[q])+ exp(-a*tja[p, q])*(1-exp(-a*T[p]))/(a*T[p]))*(num.prob[p,q]));
}
}
if(ultrametric==TRUE)
{
X<-cbind(1, fixed.pred, (1-(1-exp(-a*T))/(a*T))*pred)
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(beta1[(2+n.fixed.pred):(n.pred+1+n.fixed.pred), ], (1-(1-exp(-a*T))/(a*T)))^2), ncol=n.pred)))
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(2*beta1[(2+n.fixed.pred):(n.pred+1+n.fixed.pred),], (1-(1-exp(-a*T))/(a*T)))), ncol=n.pred)))
V<-cm1+(s1*ta*cm2)+me.response+mv-mcov+ diag(as.numeric(me.fixed.pred%*%(beta1[2:(length(beta1)-n.pred),]*beta1[2:(length(beta1)-n.pred),])))-diag(as.numeric(me.fixed.cov%*%(2*beta1[2:(length(beta1)-n.pred),])));
}
else
{
nu.X<-cbind(1-exp(-a*T), 1-exp(-a*T)-(1-(1-exp(-a*T))/(a*T)), exp(-a*T), fixed.pred, (1-(1-exp(-a*T))/(a*T))*pred)
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(beta1[(4+n.fixed.pred):(n.pred+3+n.fixed.pred), ], (1-(1-exp(-a*T))/(a*T)))^2), ncol=n.pred)))
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(2*beta1[(4+n.fixed.pred):(n.pred+3+n.fixed.pred),], (1-(1-exp(-a*T))/(a*T)))), ncol=n.pred)))
V<-cm1+(s1*ta*cm2)+me.response+mv-mcov + diag(as.numeric(me.fixed.pred%*%(beta1[4:(length(beta1)-n.pred),]*beta1[4:(length(beta1)-n.pred),])))-diag(as.numeric(me.fixed.cov%*%(2*beta1[4:(length(beta1)-n.pred),])));
}
V.inverse<-solve(V)
if(ultrametric==TRUE)
eY<-X%*%beta1
else
eY<-nu.X%*%beta1
resid<-Y-eY;
gof[i, k] <- -N/2*log(2*pi)-0.5*log(det(V))-0.5*(t(resid) %*% V.inverse%*%resid);
} # END OF CONDITION FOR HALF-LIFE = 0 #
print(as.numeric(round(cbind(if(a!=0)log(2)/a else 0.00, vy, gof[i,k], t(beta1)), 4)))
}
}
x<-rev(half_life_values)
y<-vy_values
z<-gof;
ml<-max(z);
for(i in 1:length(half_life_values))
{
for(j in 1:length(vy_values))
{
if(gof[i,j]==ml){alpha.est=log(2)/half_life_values[i]; vy.est=vy_values[j]}
}
}
for(i in 1:length(half_life_values))
{
for(j in 1:length(vy_values))
{
if(gof[i,j]<=ml-support)gof[i, j]=ml-support;
}
}
gof=gof-ml
# FINAL OPTIMAL REGRESSION USING BEST ALPHA AND VY ESTIMATES #
if(alpha.est==Inf)
{
gls.beta1<-glsyx.beta1<- solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y)
con.count<-0 # counter to break loop in the event of non-convergence
repeat
{
s1<-as.numeric(s.X%*%(gls.beta1[(2+n.fixed.pred):(n.pred+1+n.fixed.pred),]*gls.beta1[(2+n.fixed.pred):(n.pred+1+n.fixed.pred),]))
X<-cbind(1, fixed.pred, pred)
V<-diag(rep(vy, times=N))+me.response+diag(as.numeric(me.pred%*%(gls.beta1[(2+n.fixed.pred):(n.pred+1+n.fixed.pred),]*gls.beta1[(2+n.fixed.pred):(n.pred+1+n.fixed.pred),])))-diag(as.numeric(me.cov%*%(2*gls.beta1[(2+n.fixed.pred):length(gls.beta1),]))) + diag(as.numeric(me.fixed.pred%*%(gls.beta1[2:(length(gls.beta1)-n.pred),]*gls.beta1[2:(length(gls.beta1)-n.pred),])))-diag(as.numeric(me.fixed.cov%*%(2*gls.beta1[2:(length(gls.beta1)-n.pred),])));
V.inverse<-solve(V)
beta.i.var<-ev.beta.i.var<-pseudoinverse(t(X)%*%V.inverse%*%X)
beta.i<-beta.i.var%*%(t(X)%*%V.inverse%*%Y)
test<-matrix(nrow=(n.pred+n.fixed.pred+1))
for(f in 1:(n.pred+1+n.fixed.pred))
{
if(abs(as.numeric(beta.i[f]-gls.beta1[f]))<=convergence) test[f]=0 else test[f]=1
}
if(sum(test)==0) break
con.count=con.count+1
if(con.count >= 50)
{
message("Warning, Beta estimates did not converge after 50 iterations, last estimates printed out")
break
}
gls.beta1<-glsyx.beta1<-beta.i
}
gls.beta1<-glsyx.beta1<-beta.i
X<-cbind(1, fixed.pred,pred)
V<-diag(rep(vy, times=N))+me.response+diag(as.numeric(me.pred%*%(gls.beta1[(2+n.fixed.pred):(n.pred+1+n.fixed.pred),]*gls.beta1[(2+n.fixed.pred):(n.pred+1+n.fixed.pred),])))-diag(as.numeric(me.cov%*%(2*gls.beta1[(2+n.fixed.pred):length(gls.beta1),]))) + diag(as.numeric(me.fixed.pred%*%(gls.beta1[2:(length(gls.beta1)-n.pred),]*gls.beta1[2:(length(gls.beta1)-n.pred),])))-diag(as.numeric(me.fixed.cov%*%(2*gls.beta1[2:(length(gls.beta1)-n.pred),])));
pred.mean<-X%*%gls.beta1
g.mean<-(t(rep(1, times=N))%*%solve(V)%*%Y)/sum(solve(V));
sst<-t(Y-g.mean)%*% solve(V)%*%(Y-g.mean)
sse<-t(Y-pred.mean)%*%solve(V)%*%(Y-pred.mean)
r.squared<-(sst-sse)/sst
}
else
{
if(ultrametric==TRUE)
gls.beta1<-solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y)
else
gls.beta1<-rbind(0, 0, solve(t(x.ols)%*%x.ols)%*%(t(x.ols)%*%Y));
con.count<-0;
repeat
{
if(ultrametric==TRUE)
s1<-as.numeric(s.X%*%(gls.beta1[(2+n.fixed.pred):(n.pred+1+n.fixed.pred),]*gls.beta1[(2+n.fixed.pred):(n.pred+1+n.fixed.pred),]))
else
s1<-as.numeric(s.X%*%(gls.beta1[(4+n.fixed.pred):(n.pred+3+n.fixed.pred),]*gls.beta1[(4+n.fixed.pred):(n.pred+3+n.fixed.pred),]))
for(p in 1:N)
{
for(q in 1:N)
{
if(ta[q,p]==0)num.prob[q,p]=1 else num.prob[q,p]=(1-exp(-alpha.est*ta[q,p]))/(alpha.est*ta[q,p])
}
}
cm1<-(s1/(2*alpha.est)+vy.est)*(1-exp(-2*alpha.est*ta))*exp(-alpha.est*tij)
for(p in 1:N)
{
for(q in 1:N)
{
cm2[p,q]<-(((1-exp(-alpha.est*T[p]))/(alpha.est*T[p]))*((1-exp(-alpha.est*T[q]))/(alpha.est*T[q]))-(exp(-alpha.est*tia[p, q])*(1-exp(-alpha.est*T[p]))/(alpha.est*T[q])+ exp(-alpha.est*tja[p, q])*(1-exp(-alpha.est*T[p]))/(alpha.est*T[p]))*(num.prob[p,q]))
}
}
if(ultrametric==TRUE)
{
X<-cbind(1, fixed.pred,(1-(1-exp(-alpha.est*T))/(alpha.est*T))*pred)
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(gls.beta1[(2+n.fixed.pred):(n.pred+1+n.fixed.pred), ], (1-(1-exp(-alpha.est*T))/(alpha.est*T)))^2), ncol=n.pred)))
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(2*gls.beta1[(2+n.fixed.pred):length(gls.beta1),], (1-(1-exp(-alpha.est*T))/(alpha.est*T)))), ncol=n.pred)))
V<-cm1+(s1*ta*cm2)+me.response+mv-mcov+ diag(as.numeric(me.fixed.pred%*%(gls.beta1[2:(length(gls.beta1)-n.pred),]*gls.beta1[2:(length(gls.beta1)-n.pred),])))-diag(as.numeric(me.fixed.cov%*%(2*gls.beta1[2:(length(gls.beta1)-n.pred),]))); }
else
{
nu.X<-cbind(1-exp(-alpha.est*T), 1-exp(-alpha.est*T)-(1-(1-exp(-alpha.est*T))/(alpha.est*T)), exp(-alpha.est*T), fixed.pred, (1-(1-exp(-alpha.est*T))/(alpha.est*T))*pred)
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(gls.beta1[(4+n.fixed.pred):(n.pred+3+n.fixed.pred), ], (1-(1-exp(-alpha.est*T))/(alpha.est*T)))^2), ncol=n.pred)))
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(2*gls.beta1[(4+n.fixed.pred):length(gls.beta1),], (1-(1-exp(-a*T))/(a*T)))), ncol=n.pred)))
V<-cm1+(s1*ta*cm2)+me.response+mv-mcov + diag(as.numeric(me.fixed.pred%*%(gls.beta1[4:(length(gls.beta1)-n.pred),]*gls.beta1[4:(length(gls.beta1)-n.pred),])))-diag(as.numeric(me.fixed.cov%*%(2*gls.beta1[4:(length(gls.beta1)-n.pred),])));
}
V.inverse<-solve(V)
if(ultrametric==TRUE)
{
beta.i.var<-pseudoinverse(t(X)%*%V.inverse%*%X)
beta.i<-beta.i.var%*%(t(X)%*%V.inverse%*%Y)
test<-matrix(nrow=(n.pred+1+n.fixed.pred))
for(f in 1:(n.pred+1+n.fixed.pred))
{
if(abs(as.numeric(beta.i[f]-gls.beta1[f]))<=convergence) test[f]=0 else test[f]=1
}
if(sum(test)==0) break
con.count=con.count+1
if(con.count >= 50)
{
message("Warning, Beta estimates did not converge after 50 iterations, last estimates printed out")
break
}
gls.beta1<-beta.i
}
else
{
beta.i.var<-pseudoinverse(t(nu.X)%*%V.inverse%*%nu.X)
beta.i<-beta.i.var%*%(t(nu.X)%*%V.inverse%*%Y)
test<-matrix(nrow=(n.pred))
for(f in 4:(n.pred+3+n.fixed.pred))
{
if(abs(as.numeric(beta.i[f]-beta1[f]))<=convergence) test[(f-3)]=0 else test[(f-3)]=1
}
if(sum(test)==0) break
con.count=con.count+1
if(con.count >= 50)
{
message("Warning, Beta estimates did not converge after 50 iterations, last estimates printed out")
break
}
beta1<-beta.i
}
}
# END OF ITERATED GLS LOOP #
# CODE FOR SST, SSE AND R-SQUARED #
if(ultrametric==TRUE)
gls.beta1<-beta.i
else
{
gls.beta1<-beta.i
ind.par<-matrix(data=0, nrow=N, ncol=4, dimnames=list(NULL, c("Bo", "Bi.Xia", "Yo", "Sum")))
ind.par[,1]<-beta.i[1]*nu.X[,1]
ind.par[,2]<-(beta.i[2]*nu.X[,2])
ind.par[,3]<-beta.i[3]*nu.X[,3]
ind.par[,4]<-ind.par[,1]+ind.par[,2]+ind.par[,3]
mean.Bo=mean(ind.par[,4])
}
if(ultrametric==TRUE)
{
X<-cbind(1, fixed.pred,(1-(1-exp(-alpha.est*T))/(alpha.est*T))*pred)
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(gls.beta1[(2+n.fixed.pred):(n.pred+1+n.fixed.pred), ], (1-(1-exp(-alpha.est*T))/(alpha.est*T)))^2), ncol=n.pred)))
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(2*gls.beta1[(2+n.fixed.pred):length(gls.beta1),], (1-(1-exp(-alpha.est*T))/(alpha.est*T)))), ncol=n.pred)))
V<-cm1+(s1*ta*cm2)+me.response+mv-mcov+ diag(as.numeric(me.fixed.pred%*%(beta1[2:(length(gls.beta1)-n.pred),]*gls.beta1[2:(length(gls.beta1)-n.pred),])))-diag(as.numeric(me.fixed.cov%*%(2*gls.beta1[2:(length(gls.beta1)-n.pred),])))
pred.mean<-X%*%gls.beta1
}
else
{
nu.X<-cbind(1-exp(-alpha.est*T), 1-exp(-alpha.est*T)-(1-(1-exp(-alpha.est*T))/(alpha.est*T)), exp(-alpha.est*T),fixed.pred, (1-(1-exp(-alpha.est*T))/(alpha.est*T))*pred)
mv<-diag(rowSums(matrix(data=as.numeric(me.pred)*t(kronecker(beta1[(4+n.fixed.pred):(n.pred+3+n.fixed.pred), ], (1-(1-exp(-alpha.est*T))/(alpha.est*T)))^2), ncol=n.pred)))
mcov<-diag(rowSums(matrix(data=as.numeric(me.cov)*t(kronecker(2*beta1[(4+n.fixed.pred):length(beta1),], (1-(1-exp(-alpha.est*T))/(alpha.est*T)))), ncol=n.pred)))
V<-cm1+(s1*ta*cm2)+me.response+mv-mcov+ diag(as.numeric(me.fixed.pred%*%(gls.beta1[4:(length(gls.beta1)-n.pred),]*gls.beta1[4:(length(gls.beta1)-n.pred),])))-diag(as.numeric(me.fixed.cov%*%(2*gls.beta1[4:(length(gls.beta1)-n.pred),])))
pred.mean<-nu.X%*%gls.beta1
}
g.mean<-(t(rep(1, times=N))%*%solve(V)%*%Y)/sum(solve(V));
sst<-t(Y-g.mean)%*% solve(V)%*%(Y-g.mean)
sse<-t(Y-pred.mean)%*%solve(V)%*%(Y-pred.mean)
r.squared<-(sst-sse)/sst
# FINAL EVOLUTIONARY REGRESSION USING BEST ALPHA AND VY ESTIMATES AND KNOWN VARIANCE MATRIX #
if(ultrametric==TRUE) s1<-as.numeric(s.X%*%(gls.beta1[(2+n.fixed.pred):(n.pred+1+n.fixed.pred),]*gls.beta1[(2+n.fixed.pred):(n.pred+1+n.fixed.pred),]))
else s1<-as.numeric(s.X%*%(gls.beta1[(4+n.fixed.pred):(n.pred+3+n.fixed.pred),]*gls.beta1[(4+n.fixed.pred):(n.pred+3+n.fixed.pred),]));
for(p in 1:N)
{
for(q in 1:N)
{
if(ta[q,p]==0)num.prob[q,p]=1 else num.prob[q,p]=(1-exp(-alpha.est*ta[q,p]))/(alpha.est*ta[q,p])
}
}
cm1<-(s1/(2*alpha.est)+vy.est)*(1-exp(-2*alpha.est*ta))*exp(-alpha.est*tij)
for(p in 1:N)
{
for(q in 1:N)
{
cm2[p,q]<-(((1-exp(-alpha.est*T[p]))/(alpha.est*T[p]))*((1-exp(-alpha.est*T[q]))/(alpha.est*T[q]))-(exp(-alpha.est*tia[p, q])*(1-exp(-alpha.est*T[p]))/(alpha.est*T[q])+ exp(-alpha.est*tja[p, q])*(1-exp(-alpha.est*T[p]))/(alpha.est*T[p]))*(num.prob[p,q]))
}
}
if(ultrametric==TRUE)
V<-cm1+(s1*ta*cm2)+me.response+diag(as.numeric(me.pred%*%(gls.beta1[(2+n.fixed.pred):(n.pred+1+n.fixed.pred),]*gls.beta1[(2+n.fixed.pred):(n.pred+1+n.fixed.pred),])))-diag(as.numeric(me.cov%*%(2*gls.beta1[(2+n.fixed.pred):length(gls.beta1),]))) + diag(as.numeric(me.fixed.pred%*%(gls.beta1[2:(length(gls.beta1)-n.pred),]*gls.beta1[2:(length(gls.beta1)-n.pred),])))-diag(as.numeric(me.fixed.cov%*%(2*gls.beta1[2:(length(gls.beta1)-n.pred),])))
else
V<-cm1+(s1*ta*cm2)+me.response+diag(as.numeric(me.pred%*%(gls.beta1[(4+n.fixed.pred):(n.pred+3+n.fixed.pred),]*gls.beta1[(4+n.fixed.pred):(n.pred+3+n.fixed.pred),])))-diag(as.numeric(me.cov%*%(2*gls.beta1[(4+n.fixed.pred):length(gls.beta1),])))+ diag(as.numeric(me.fixed.pred%*%(gls.beta1[4:(length(gls.beta1)-n.pred),]*gls.beta1[4:(length(gls.beta1)-n.pred),])))-diag(as.numeric(me.fixed.cov%*%(2*gls.beta1[4:(length(gls.beta1)-n.pred),])))
X1<-cbind(1, fixed.pred, pred)
V.inverse<-solve(V)
ev.beta.i.var<-pseudoinverse(t(X1)%*%V.inverse%*%X1)
ev.beta.i<-ev.beta.i.var%*%(t(X1)%*%V.inverse%*%Y)
glsyx.beta1<-ev.beta.i
} # END OF HALFLIFE 0 CONDITION #
} # END OF RANDOM AND FIXED COVARIATE REGRESSION ESTIMATION
}# END OF FIXED AND RANDOM COVARIATE ANCOVA AND REGRESSION PARAMETER ESTIMATION
#### END OF NEW CODE
# PLOT THE SUPPORT SURFACE FOR HALF-LIVES AND VY
if(length(half_life_values) > 1 && length(vy_values) > 1){
z1<-gof
for(i in 1:length(vy_values)){
h.lives[,i]=rev(z1[,i])
}
z<-h.lives
op <- par(bg = "white")
persp(x, y, z, theta = 30, phi = 30, expand = 0.5, col = "NA")
persp(x, y, z, theta = 30, phi = 30, expand = 0.5, col = "NA",
ltheta = 120, shade = 0.75, ticktype = "detailed",
xlab = "half-life", ylab = "vy", zlab = "log-likelihood") -> res
}
# MODEL OUTPUT
# alpha, half-lives, correction factor, v
message("==================================================")
half.life<-log(2)/alpha.est
c.factor<-mean(1-(1-exp(-alpha.est*T))/(alpha.est*T))
modeloutput<-matrix(data=0, nrow=4, ncol=1, dimnames=list(c("Rate of adaptation ", "Phylogenetic half-life ","Phylogenetic correction factor", "Stationary variance "), " Estimate"))
modeloutput[1, 1]=alpha.est; modeloutput[2, 1]=half.life; modeloutput[3,1]=c.factor; modeloutput[4,1]=vy.est; ##### Rememeber to output s.X
modfit<-matrix(data=0, nrow=7, ncol=1, dimnames=list(c("Support", "AIC", "AICc", "SIC", "r squared", "SST", "SSE"),("Value")))
#if(ultrametric==TRUE) n.par=1+n.pred else n.par=3+n.pred
if(model.type=="ffANOVA" || model.type=="fReg" || model.type=="ffANCOVA") n.par<-length(gls.beta0)
if(model.type == "mmANCOVA" || model.type=="rReg" || model.type=="mfReg" || model.type=="mmfANCOVA") n.par<-length(beta1)
modfit[1,1]=ml
modfit[2,1]=-2*ml+2*(2+n.par)
modfit[3,1]=modfit[2,1]+(2*(2+n.par)*((2+n.par)+1))/(N-(2+n.par)-1)
modfit[4,1]=-2*ml+log(N)*(2+n.par)
modfit[5,1]=r.squared*100
modfit[6,1]=sst
modfit[7,1]=sse
message("");
message("BEST ESTIMATES & MODEL FIT");message("");
message("==================================================");
message("MODEL PARAMETERS");
print(modeloutput);message("");
# predictor means and variances for random predictors
if(model.type == "mmANCOVA" || model.type=="rReg" || model.type=="mfReg" || model.type=="mmfANCOVA")
{
print(matrix(data=rbind(theta.X, s.X), nrow=2, ncol=n.pred, dimnames=list(c("Predictor theta", "Predictor variance"), if(n.pred==1) deparse(substitute(random.cov)) else colnames(random.cov))));
message("");
}
# PRIMARY OPTIMA OR REGRESSION SLOPE ESTIMATES
message("--------------------------------------------------");
message("PRIMARY OPTIMA");message("");
if(model.type=="IntcptReg")
{
if(ultrametric==TRUE || alpha.est==Inf || alpha.est>=1000000000000000){
Intercept<-matrix(nrow=1, ncol=2, dimnames=list(("Theta_global"), c("Estimate", "Std.error")))
Intercept[,1]<-gls.beta0
Intercept[,2]<-sqrt(beta.i.var)}
else {
Intercept<-matrix(data=0, nrow=2, ncol=1, dimnames=list(c("Bo", "Ya"), (" Estimate")))
Intercept[1,1]<-beta.i[1]
Intercept[2,1]<-beta.i[2]
}
print(Intercept); message("")
}
if(model.type=="ffANOVA")
{
std<-sqrt(diag(beta.i.var))
optima<-matrix(data=0, nrow=ncol(X), ncol=2, dimnames = list(colnames(X), c("Estimates", "Std.error")));
optima[,1] = gls.beta0;
optima[,2] = std;
reg <- set.of.regimes(topology,regime.specs);
root.reg<-as.character(regime.specs[times==0])
nonroot.reg<-as.character(reg[reg != root.reg])
if(is.null(intercept))
{
if(ncol(X) == length(reg)) message ("The ancestral state (Ya) parameter was dropped from this model as there is not enough information to estimate it") else
if(ncol(X)<length(reg)) message ("Ya and the parameter at the root were dropped") else
message("this model does not drop Ya as it may influence the other parameters")
}
else
{
if(intercept=="root") message(root.reg, " ", "mapped to the root of the tree and includes the coefficent for the ancestral state (Ya)") else
message("you set the intercept coefficent to a value of", " ", intercept,". Ya is not the true ancestral state anymore")
}
print(optima);message("");
}
if(model.type== "fReg")
{
std<-sqrt(diag(beta.i.var))
optima<-matrix(data=0, nrow=(nrow(gls.beta0)), ncol=2, dimnames=list(c("Bo", if(is.null(dim(fixed.cov))) deparse(substitute(fixed.cov)) else colnames(fixed.cov)), c("Estimate", "Std. Error")))
optima[,1] = gls.beta0;
optima[,2] = std;
print(optima);message("");
}
if(model.type=="ffANCOVA")
{
std<-sqrt(diag(beta.i.var))
optima<-matrix(data=0, nrow=ncol(X), ncol=2, dimnames = list(c(as.character(levels(fixed.fact)), if(is.null(dim(fixed.cov))) deparse(substitute(fixed.cov)) else colnames(fixed.cov)), c("Estimates", "Std.error")));
optima[,1] = gls.beta0;
optima[,2] = std;
print(optima);message("");
}
if(model.type == "mmANCOVA")
{
std<-sqrt(diag(beta.i.var))
if(length(X[1,]) > length(x.ols[1,])) optima<-matrix(data=0, nrow=ncol(X), ncol=2, dimnames = list(c(c("Ya",as.character(levels(fixed.fact))), if(is.null(dim(random.cov))) deparse(substitute(random.cov)) else colnames(random.cov)), c("Estimates", "Std.error")))
else
optima<-matrix(data=0, nrow=ncol(X), ncol=2, dimnames = list(c(as.character(levels(fixed.fact)), if(is.null(dim(random.cov))) deparse(substitute(random.cov)) else colnames(random.cov)), c("Estimates", "Std.error")));
optima[,1] = gls.beta1;
optima[,2] = std;
print(optima)
}
if(model.type == "mmfANCOVA")
{
std<-sqrt(diag(beta.i.var))
if(length(X[1,]) > length(x.ols[1,])) optima<-matrix(data=0, nrow=ncol(X), ncol=2, dimnames = list(c(c("Ya",as.character(levels(fixed.fact))),if(is.null(dim(fixed.cov))) deparse(substitute(fixed.cov)) else colnames(fixed.cov), if(is.null(dim(random.cov))) deparse(substitute(random.cov)) else colnames(random.cov)), c("Estimates", "Std.error")))
else
optima<-matrix(data=0, nrow=ncol(X), ncol=2, dimnames = list(c(as.character(levels(fixed.fact)), if(is.null(dim(fixed.cov))) deparse(substitute(fixed.cov)) else colnames(fixed.cov),if(is.null(dim(random.cov))) deparse(substitute(random.cov)) else colnames(random.cov)), c("Estimates", "Std.error")));
optima[,1] = gls.beta1;
optima[,2] = std;
print(optima)
}
if(model.type=="rReg")
{
if(ultrametric==TRUE || alpha.est == Inf)
opreg<-matrix(data=0, nrow=(nrow(gls.beta1)), ncol=2, dimnames=list(c("K", if(is.null(dim(random.cov))) deparse(substitute(random.cov)) else colnames(random.cov)), c("Estimate", "Std. Error")))
else
{
if(alpha.est != Inf)
opreg<-matrix(data=0, nrow=(nrow(gls.beta1)), ncol=2, dimnames=list(c("Xa", "Bo","Ya" ,if(is.null(dim(random.cov))) deparse(substitute(random.cov)) else colnames(random.cov)), c("Estimate", "Std. Error")))
else opreg<-matrix(data=0, nrow=(nrow(gls.beta1)), ncol=2, dimnames=list(c("K", if(is.null(dim(random.cov))) deparse(substitute(random.cov)) else colnames(random.cov)), c("Estimate", "Std. Error")))}
opreg[,1] =round(gls.beta1, 5)
opreg[,2]= round(sqrt(diag(beta.i.var)),5)
if(model.type=="rReg")
{
evreg<-matrix(data=0, nrow=(nrow(glsyx.beta1)), ncol=2, dimnames=list(c("Intercept", if(is.null(dim(random.cov))) deparse(substitute(random.cov)) else colnames(random.cov)), c("Estimate", "Std. Error")))
evreg[,1] =round(glsyx.beta1, 5)
evreg[,2]= round(sqrt(diag(ev.beta.i.var)),5)
message("Evolutionary regression"); message("")
print(evreg);
message("");
}
message("Optimal regression"); message("")
print(opreg);
if(model.type=="rReg" && ultrametric==TRUE && alpha.est != Inf)
{
message("")
message("Decomposition of K assuming Ya = Xa to get the optimal regression intercept Bo")
message("")
bo<-opreg[1,1] + (c.factor-1)*(sum(gls.beta1[-1]*theta.X))
print(bo)
message("")
message("(Use this as the intercept when plotting the regression line)")
message("")
}
}
if(model.type=="mfReg")
{
if(ultrametric==TRUE || alpha.est == Inf)
opreg<-matrix(data=0, nrow=(nrow(gls.beta1)), ncol=2, dimnames=list(c("K",if(is.null(dim(fixed.cov))) deparse(substitute(fixed.cov)) else colnames(fixed.cov),if(is.null(dim(random.cov))) deparse(substitute(random.cov)) else colnames(random.cov)), c("Estimate", "Std. Error")))
else
{
if(alpha.est != Inf)
opreg<-matrix(data=0, nrow=(nrow(gls.beta1)), ncol=2, dimnames=list(c("Xa", "Bo","Ya" ,if(is.null(dim(fixed.cov))) deparse(substitute(fixed.cov)) else colnames(fixed.cov),if(is.null(dim(random.cov))) deparse(substitute(random.cov)) else colnames(random.cov)), c("Estimate", "Std. Error")))
else opreg<-matrix(data=0, nrow=(nrow(gls.beta1)), ncol=2, dimnames=list(c("K", if(is.null(dim(fixed.cov))) deparse(substitute(fixed.cov)) else colnames(fixed.cov),if(is.null(dim(random.cov))) deparse(substitute(random.cov)) else colnames(random.cov)), c("Estimate", "Std. Error")))}
opreg[,1] =round(gls.beta1, 5)
opreg[,2]= round(sqrt(diag(beta.i.var)),5)
if(model.type=="mfReg")
{
evreg<-matrix(data=0, nrow=(nrow(glsyx.beta1)), ncol=2, dimnames=list(c("Intercept",if(is.null(dim(fixed.cov))) deparse(substitute(fixed.cov)) else colnames(fixed.cov), if(is.null(dim(random.cov))) deparse(substitute(random.cov)) else colnames(random.cov)), c("Estimate", "Std. Error")))
evreg[,1] =round(glsyx.beta1, 5)
evreg[,2]= round(sqrt(diag(ev.beta.i.var)),5)
message("Evolutionary regression"); message("")
print(evreg);
message("");
}
message("Optimal regression"); message("")
print(opreg);
if(model.type=="mfReg" && ultrametric==TRUE && alpha.est != Inf)
{
message("")
message("Decomposition of K assuming Ya = Xa to get the optimal regression intercept Bo")
message("")
bo<-opreg[1,1] + (c.factor-1)*(sum(gls.beta1[-(1:(1+n.fixed.pred))]*theta.X))
print(bo)
message("")
message("(Use this as the intercept when plotting the regression line)")
message("")
}
}
message("--------------------------------------------------");
message("MODEL FIT");message("");
print(modfit); message("");
message("==================================================");
} # END OF MODEL FITTING FUNCTION
|
61326fc9ca2a6ecb66ea6e77fbb97e0bf21ffe98
|
b4846c2330b9a5528af4c2df65f0c3fdeae789ce
|
/Higher Terms/higher_terms.R
|
3bd716cfb0dd0a3294bb31b8fe95a5b7e72426d4
|
[] |
no_license
|
devitrylouis/degree_project
|
a0f96e275a340411a42dfa3dc62487510d4faecc
|
72585664377016a986d6f41d9900f67c3be67085
|
refs/heads/master
| 2021-06-15T12:40:54.229035
| 2017-04-20T13:48:15
| 2017-04-20T13:48:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,728
|
r
|
higher_terms.R
|
higher_terms <- function(df,k_max)
{
names <- character()
predictors = c("MSSubClass","LotFrontage","LotArea","OverallQual","OverallCond"
,"YearBuilt","YearRemodAdd","BsmtFinSF1","BsmtFinSF2","BsmtUnfSF","1stFlrSF","2ndFlrSF"
,"LowQualFinSF","BsmtFullBath","BsmtHalfBath","FullBath","HalfBath","BedroomAbvGr"
,"KitchenAbvGr","TotRmsAbvGrd","GarageYrBlt","GarageArea","WoodDeckSF","OpenPorchSF","EnclosedPorch"
,"3SsnPorch","ScreenPorch","MoSold","YrSold")
j<-0
while(length(predictors)>0)
{
j<-j+1
p_value <- matrix(NA,nrow=k_max-1,ncol=length(predictors))
significance <- matrix(FALSE,nrow=k_max-1,ncol=length(predictors))
for (i in 1:length(predictors))
{
for(k in 2:k_max)
{
fit1 <- lm(df$SalePrice ~.,data=df)
fit2 <- lm(df$SalePrice ~. +I(df[[ predictors[i] ]]^k) ,data=df)
anov<-anova(fit1,fit2)
p_value[k-1,i]<-anov$`Pr(>F)`[2]
if(p_value[k-1,i]<=0.01)
{
significance[k-1,i]<-TRUE
}
}
}
# Predictor higher term with the lowest p-value
inds<-which(p_value == min(p_value), arr.ind=TRUE)
names[j] <- predictors[inds[2]]
# Predictors of interest are kept
test_significance <- logical(k_max-1)
indices<-inds
for(l in 1:length(predictors))
{
if(all(significance[,l]==test_significance))
{
indices<-c(indices,l)
}
}
predictors <- predictors[-indices]
# Add the most significant to the data frame
degree<-as.character(inds[1]+1)
df[[paste(names[j],degree)]] <- df[[names[j]]]^(inds[1]+1) ### Concatenate name
}
return(df)
}
|
0d53ec392ce3eb8b078e60f901b738397d2e8048
|
1496d1fca7f4711766376602239a9608c7efe669
|
/r-programming/corr.R
|
8a4d33065ecc307d74921af4b368ca4e7d15c1fd
|
[] |
no_license
|
mattyb678/coursera-courses
|
98da8d30c8122f7a6435b644e17ec988724b2682
|
a06629500ac9623105380ac1eb792eaa20ba74ea
|
refs/heads/master
| 2021-01-10T07:25:11.685381
| 2016-02-16T17:18:06
| 2016-02-16T17:18:06
| 51,853,875
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 355
|
r
|
corr.R
|
corr <- function(directory, threshold = 0) {
files <- list.files(directory)
cors <- numeric()
for(i in 1:length(files)) {
data <- read.csv(paste(directory,files[i],sep="/"))
data <- data[!is.na(data$sulfate) & !is.na(data$nitrate), ]
if (nrow(data) > threshold) {
cors <- c(cors, cor(data$nitrate, data$sulfate))
}
}
cors
}
|
291f39dbf7fb9736f1fafbe373d0c1f29c4470eb
|
1e4d6814b572dcb6ae984261210c74859997bcc4
|
/R/stan_adapted.R
|
62ae97687feaba4715cf99bd28172dbb6575947e
|
[] |
no_license
|
retodomax/cowfit
|
bba01468ea699bcd617f4f6fcec88d8495ec76af
|
e1eacd9d3622c63301bef7da4af4eca88d06747d
|
refs/heads/master
| 2022-11-25T00:18:53.381555
| 2020-07-26T13:01:59
| 2020-07-26T13:01:59
| 277,785,758
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,656
|
r
|
stan_adapted.R
|
#' stan_glmer() which transformes Z matrix with Lt
stan_adapt <- function (formula, data = NULL, family = gaussian, subset, weights,
na.action = getOption("na.action", "na.omit"), offset, contrasts = NULL,
..., prior = normal(), prior_intercept = normal(), prior_aux = exponential(),
prior_covariance = decov(), prior_PD = FALSE, algorithm = c("sampling",
"meanfield", "fullrank"), adapt_delta = NULL, QR = FALSE,
sparse = FALSE)
{
call <- match.call(expand.dots = TRUE)
mc <- match.call(expand.dots = FALSE)
data <- validate_data(data)
family <- validate_family(family)
mc[[1]] <- quote(lme4::glFormula)
mc$control <- make_glmerControl(ignore_lhs = prior_PD, ignore_x_scale = prior$autoscale %ORifNULL%
FALSE)
mc$data <- data
mc$prior <- mc$prior_intercept <- mc$prior_covariance <- mc$prior_aux <- mc$prior_PD <- mc$algorithm <- mc$scale <- mc$concentration <- mc$shape <- mc$adapt_delta <- mc$... <- mc$QR <- mc$sparse <- NULL
glmod <- eval(mc, parent.frame())
X <- glmod$X
if ("b" %in% colnames(X)) {
stop("stan_glmer does not allow the name 'b' for predictor variables.",
call. = FALSE)
}
if (prior_PD && !has_outcome_variable(formula)) {
y <- NULL
}
else {
y <- glmod$fr[, as.character(glmod$formula[2L])]
if (is.matrix(y) && ncol(y) == 1L) {
y <- as.vector(y)
}
}
offset <- model.offset(glmod$fr) %ORifNULL% double(0)
weights <- validate_weights(as.vector(model.weights(glmod$fr)))
if (binom_y_prop(y, family, weights)) {
y1 <- as.integer(as.vector(y) * weights)
y <- cbind(y1, y0 = weights - y1)
weights <- double(0)
}
if (is.null(prior))
prior <- list()
if (is.null(prior_intercept))
prior_intercept <- list()
if (is.null(prior_aux))
prior_aux <- list()
if (is.null(prior_covariance))
stop("'prior_covariance' can't be NULL.", call. = FALSE)
### What I change:
glmod$reTrms$Ztlist[[1]] <- Lt %*% glmod$reTrms$Ztlist[[1]]
###
group <- glmod$reTrms
group$decov <- prior_covariance
algorithm <- match.arg(algorithm)
stanfit <- stan_glm.fit(x = X, y = y, weights = weights,
offset = offset, family = family, prior = prior, prior_intercept = prior_intercept,
prior_aux = prior_aux, prior_PD = prior_PD, algorithm = algorithm,
adapt_delta = adapt_delta, group = group, QR = QR, sparse = sparse,
mean_PPD = !prior_PD, ...)
add_classes <- "lmerMod"
if (family$family == "Beta regression") {
add_classes <- c(add_classes, "betareg")
family$family <- "beta"
}
sel <- apply(X, 2L, function(x) !all(x == 1) && length(unique(x)) <
2)
X <- X[, !sel, drop = FALSE]
Z <- pad_reTrms(Ztlist = group$Ztlist, cnms = group$cnms,
flist = group$flist)$Z
colnames(Z) <- b_names(names(stanfit), value = TRUE)
fit <- nlist(stanfit, family, formula, offset, weights, x = cbind(X,
Z), y = y, data, call, terms = NULL, model = NULL, na.action = attr(glmod$fr,
"na.action"), contrasts, algorithm, glmod, stan_function = "stan_glmer")
out <- stanreg(fit)
class(out) <- c(class(out), add_classes)
return(out)
}
environment(stan_adapt) <- asNamespace('rstanarm')
|
68506a94243a3def08ae20c6c8013e61f127f6cc
|
930c0c45143b14875d30c4b382fa82611df218ce
|
/scripts/6-LASSO.R
|
26a01636120ec846c15dd485d2cfbd6f7db70a5f
|
[
"BSD-3-Clause"
] |
permissive
|
Christensen-Lab-Dartmouth/VAE_methylation
|
dab9f1bb5a8df4096d24f660dc426aefcc7c88ce
|
3d56e3aa7c489a38dc85f56755ac3ba487a7d838
|
refs/heads/master
| 2021-05-26T10:55:01.799593
| 2019-05-31T21:48:06
| 2019-05-31T21:48:06
| 128,078,667
| 8
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,479
|
r
|
6-LASSO.R
|
######################
# Comparing significant CpGs from EWAS to LaWAS results
#
# Author: Alexander Titus
# Created: 08/20/2018
# Updated: 08/20/2018
######################
#####################
# Set up the environment
#####################
require(data.table)
library(glmnet)
######################
## Set WD
# Change this to your base WD, and all other code is relative to the folder structure
base.dir = 'C:/Users/atitus/github/VAE_methylation'
setwd(base.dir)
# Covariate data
covs.file = 'Full_data_covs.csv'
covs.dir = paste('data', covs.file, sep = '/')
covs = data.frame(fread(covs.dir))
covs.updated = covs
covs.updated = covs.updated[covs.updated$SampleType != 'Metastatic', ]
covs.updated$BasalVother = ifelse(covs.updated$PAM50 == "Basal", 1, 0)
covs.updated$NormalVother = ifelse(covs.updated$PAM50 == "Normal", 1, 0)
covs.updated$Her2Vother = ifelse(covs.updated$PAM50 == "Her2", 1, 0)
covs.updated$LumAVother = ifelse(covs.updated$PAM50 == "LumA", 1, 0)
covs.updated$LumBVother = ifelse(covs.updated$PAM50 == "LumB", 1, 0)
covs.updated$LumVother = ifelse(covs.updated$PAM50 == "LumA" |
covs.updated$PAM50 == "LumB", 1, 0)
covs.updated$sample.typeInt = ifelse(covs.updated$SampleType == 'Solid Tissue Normal', 0, 1)
covs.updated$ERpos = ifelse(covs.updated$ER == "Positive", 1,
ifelse(covs.updated$ER == "Negative", 0, NA))
# Methylation data
beta.file = 'BreastCancerMethylation_top100kMAD_cpg.csv'
beta.dir = paste('data/raw', beta.file, sep = '/')
betas = data.frame(fread(beta.dir)) # on my computer takes ~8min
rownames(betas) = betas[,1]
betas = betas[,2:ncol(betas)]
betas = betas[rownames(betas) %in% covs.updated$Basename, ]
betas = betas[order(rownames(betas), decreasing=T), ]
covs.updated = covs.updated[covs.updated$Basename %in% rownames(betas), ]
covs.updated = covs.updated[order(covs.updated$Basename, decreasing=T), ]
## Check sample concordance
all(covs.updated$Basename == rownames(betas))
# Annotation file with breast specific enhancer information
anno.file = 'Illumina-Human-Methylation-450kilmn12-hg19.annotated.csv'
anno.dir = paste('data', anno.file, sep = '/')
anno = data.frame(fread(anno.dir))
rownames(anno) = anno[, 1]
#####################
# LASSO
#####################
# https://cran.r-project.org/web/packages/biglasso/vignettes/biglasso.pdf
# install.packages('biglasso')
require(biglasso)
temp = cbind('ERpos' = covs.updated$ERpos, betas)
temp = temp[!is.na(temp$ERpos), ]
X = temp[, 2:ncol(temp)]
y = temp$ERpos
X.bm <- as.big.matrix(X)
fit <- biglasso(X.bm, y, screen = "SSR-BEDPP")
plot(fit)
cvfit <- cv.biglasso(X.bm, y, seed = 1234, nfolds = 10, ncores = 4)
par(mfrow = c(2, 2), mar = c(3.5, 3.5, 3, 1) ,mgp = c(2.5, 0.5, 0))
plot(cvfit, type = "all")
summary(cvfit)
coef(cvfit)[which(coef(cvfit) != 0)]
temp2 = temp[, which(coef(cvfit) != 0)]
anno.temp = anno[anno$Name %in% colnames(temp2), ]
write.csv(anno.temp, file = 'results/ERposVERneg_LASSO.csv')
|
2e010efb6d7e8acefaf7c31ed7852a5778e67b09
|
068100cfbf0a84379536169bf70bf72ad54ca4f4
|
/scripts/mapped_truth_with_sj.R
|
a9b1de8e3d1685212590351bfe7865a7ccc2ac13
|
[] |
no_license
|
imallona/discerns_manuscript
|
17c6bf1559b2b51859398c491407f8a58b511927
|
c438121896ef0fe4c9884032c43e80a10906526a
|
refs/heads/master
| 2023-09-04T04:39:05.881681
| 2020-10-19T14:37:59
| 2020-10-19T14:37:59
| 402,038,552
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,851
|
r
|
mapped_truth_with_sj.R
|
## Compare the mapped and the true location of a read. The mapped coordinates
## including splice junctions are considered.
library(rtracklayer)
library(GenomicAlignments)
library(stringr)
library(dplyr)
library(GenomicFeatures)
library(data.table)
BAM <- snakemake@input[["bam"]]
GTF <- snakemake@input[["gtf"]]
SIM_ISOFORMS_RESULTS <- snakemake@input[["sim_iso_res"]]
OUTPREFIX <- snakemake@params[["outprefix"]]
REMOVED_GTF <- snakemake@input[["removed_gtf"]]
# GTF <- "annotation/Homo_sapiens.GRCh37.85_chr19_22.gtf"
# SIM_ISOFORMS_RESULTS <- "simulation/simulated_data/simulated_reads_chr19_22.sim.isoforms.results"
# BAM <- "simulation/mapping/STAR/me_exon/default/pass2_Aligned.out_s.bam"
# REMOVED_GTF <- "simulation/reduced_GTF/removed_me_exon.gtf"
# OUTPREFIX <- "simulation/mapped_truth/star/me_exon/default/pass2_Aligned.out"
#' Exon exon junction coordinates
#'
#' Compute the transcriptomic coordinates of exon exon junctions
#'
#' @param u integer vector odered widths of all exons in the transcript
#'
#' @return IRanges object with exon-exon junctions
#' @export
#'
#' @examples
get_exon_junction <- function(u){
cs <- cumsum(u)
return(IRanges(start=cs[-length(cs)], end=cs[-length(cs)]+1))
}
#' Evaluate gaps in aligned reads
#'
#' Comparison of read gaps with the true set of gaps in the data set.
#'
#' @param q1 GRanges object with gaps per first reads
#' @param q2 GRanges object with gaps per second reads
#' @param t1 GRanges object with eej per first reads
#' @param t2 GRanges object with eej per second reads
#' @param aln GAlignmentPairs with all mapped read
#'
#' @return data.frame with TP, FP, TN, FN of first and second reads
#' @export
#'
#' @examples
evaluat_read_sj <- function(q1, q2, t1, t2, aln){
## Identify wrong gaps by comparing the rows between the two data.frames
## FP: all reads that do not have a gap in the truth or a wrong gap
fp1 <- dplyr::setdiff(q1, t1 ) %>%
dplyr::pull(read_id) %>%
unique %>% length
fp2 <- dplyr::setdiff(q2, t2 ) %>%
dplyr::pull(read_id) %>%
unique %>% length
## TP: all reads with correct gaps
tp1 <- length(unique(q1$read_id)) - fp1
tp2 <- length(unique(q2$read_id)) - fp2
## TN: all reads that do not have a gap in both the mapping and the truth
r1_no_gap <- names(aln)[!names(aln) %in% q1$read_id]
r2_no_gap <- names(aln)[!names(aln) %in% q2$read_id]
tn1 <- length(r1_no_gap[!r1_no_gap %in% t1$read_id])
tn2 <- length(r2_no_gap[!r2_no_gap %in% t2$read_id])
## FN: all reads that are mapped without a gap but have a gap in the truth
fn1 <- sum(r1_no_gap %in% t1$read_id)
fn2 <- sum(r2_no_gap %in% t2$read_id)
data.frame(measure = c("TP", "TP", "FP", "FP", "TN", "TN", "FN", "FN"),
count = c(tp1, tp2, fp1, fp2, tn1, tn2, fn1, fn2),
read = rep(c("first", "second"), 4))
}
##------------------------------------------------------------------------------
print("Constructing transcript ranges of all reads")
gtf <- import(GTF)
iso_results <- read.table(SIM_ISOFORMS_RESULTS, header=TRUE)
iso_results <- cbind(iso_results,sid=1:nrow(iso_results))
## sid = id that represents which transcript this read is simulated from
aln <- readGAlignmentPairs(BAM, strandMode=2, use.names=TRUE)
## get the transcriptomic ranges from each simulated read
read_tr_range <- str_split(string = names(aln), pattern = "_",
simplify=TRUE)[,c(3, 4, 5)]
colnames(read_tr_range) <- c("sid", "pos", "insertL")
read_tr_range <- apply(read_tr_range, 2, as.numeric)
read_tr_range <- as.data.frame(read_tr_range)
read_tr_range$read_id <- names(aln)
read_tr_range <- read_tr_range %>%
dplyr::left_join(dplyr::select(iso_results, transcript_id, length, sid),
by="sid")
read_tr_range <- read_tr_range %>%
dplyr::mutate(start1 = length-pos-100, ## read length 101
end1 = length-pos,
start2 = length-pos-insertL+1,
end2 = length-pos-insertL+101
)
tr_ranges1 <- data.frame(start = read_tr_range$start1,
end = read_tr_range$end1,
transcript_id = as.character(read_tr_range$transcript_id),
read_id = read_tr_range$read_id,
stringsAsFactors = FALSE)
tr_ranges2 <- data.frame(start = read_tr_range$start2,
end = read_tr_range$end2,
transcript_id = as.character(read_tr_range$transcript_id),
read_id = read_tr_range$read_id,
stringsAsFactors = FALSE)
##------------------------------------------------------------------------------
## Map the transcriptomic coordinates to genomic coordinates -------------------
print("Mapping transcriptomic to genomic coordinates")
## we identify all reads that overlap exon-exon boundaries on the transcript
## to do that, we identify the exon-exon boundaries in transcriptomic coordinates
## we take the exon-exon boundary position and map it to genomic coordinates
## we extract the SJ from the "N" positions in the CIGAR string of each read
## we compare the true SJ with the mapped SJ
## this way, we do not evalues the actual mapped bases, but only the SJ
## so it does not matter if the reads are soft-clipped or not
## List of exons per transcript
txdb <- makeTxDbFromGRanges(gtf)
tr_granges <- exonsBy(txdb, by="tx", use.names=TRUE)
tr <- transcripts(txdb)
## keep all transcripts with more than one exon
tr_granges_sj <- tr_granges[lengths(tr_granges)>1]
## The exons are already ordered correctly (descending for the "-" strand: 5'
## exon is the last) --> the cumsumm of the exon lengths gives us the correct
## location of the exon exon junction
tr_eej <- lapply(width(tr_granges_sj), get_exon_junction)
tr_eej <- as(tr_eej, "IRangesList")
# ## get the eej per read, together with the read_id
tr_eej <- tr_eej %>%
unlist %>%
as.data.frame %>%
dplyr::rename(transcript_id = names)
## join the eej with the reads
## only keep the eej that are within the exon boundaries
r1_truth_eej <- tr_ranges1 %>%
dplyr::inner_join(tr_eej, by = "transcript_id") %>%
dplyr::filter(start.x <= start.y & end.x >= end.y) %>%
dplyr::select(start.y, end.y, read_id, transcript_id) %>%
dplyr::rename(start = start.y, end = end.y, seqnames = transcript_id)
r2_truth_eej <- tr_ranges2 %>%
dplyr::inner_join(tr_eej, by = "transcript_id") %>%
dplyr::filter(start.x <= start.y & end.x >= end.y) %>%
dplyr::select(start.y, end.y, read_id, transcript_id) %>%
dplyr::rename(start = start.y, end = end.y, seqnames = transcript_id)
## add the strand information
r1_truth_eej$strand <- strand(tr)[match(r1_truth_eej$seqnames,
mcols(tr)$tx_name)] %>% as.character
r2_truth_eej$strand <- strand(tr)[match(r2_truth_eej$seqnames,
mcols(tr)$tx_name)] %>% as.character
## genomic coordinates of the exon exon junctions (including last and first base of exon)
## XXXXX---XXXXXX annotation
## xxxxx genomic eej coordinates
read_id1 <- r1_truth_eej$read_id
r1_truth_eej <- mapFromTranscripts(GRanges(r1_truth_eej), tr_granges)
read_id2 <- r2_truth_eej$read_id
r2_truth_eej <- mapFromTranscripts(GRanges(r2_truth_eej), tr_granges)
## add the read name
mcols(r1_truth_eej)$read_id <- read_id1[mcols(r1_truth_eej)$xHits]
mcols(r2_truth_eej)$read_id <- read_id2[mcols(r2_truth_eej)$xHits]
## remove the last and first base of the touching exons from the range, we only
## want the sj
r1_truth_eej <- narrow(r1_truth_eej, start=2, end=-2)
r2_truth_eej <- narrow(r2_truth_eej, start=2, end=-2)
## convert back to data.frame
r1_truth_eej <- r1_truth_eej %>%
data.frame %>%
dplyr::select(-c(xHits, transcriptsHits)) %>%
dplyr::mutate(strand = droplevels(strand))
r2_truth_eej <- r2_truth_eej %>%
data.frame %>%
dplyr::select(-c(xHits, transcriptsHits)) %>%
dplyr::mutate(strand = droplevels(strand))
## Filter out all reads without splice Junctions -------------------------------
print("Preparing GRanges of all gaps")
s1 <- GenomicAlignments::first(aln)[grepl("N", cigar(GenomicAlignments::first(aln)))]
s2 <- GenomicAlignments::second(aln)[grepl("N", cigar(GenomicAlignments::second(aln)))]
## Find the location of the "N" in the read: https://support.bioconductor.org/p/75307/
## and add the start location of the read --> genomic location of gap
s1_range <- cigarRangesAlongReferenceSpace(cigar(s1), ops = "N", pos = start(s1))
s2_range <- cigarRangesAlongReferenceSpace(cigar(s2), ops = "N", pos = start(s2))
names(s1_range) <- names(s1)
names(s2_range) <- names(s2)
s1_range <- unlist(s1_range)
s2_range <- unlist(s2_range)
## The strand of the pair is the strand of its last alignment --> revert first
s1_range <- s1_range %>%
data.frame %>%
dplyr::rename(read_id = names) %>%
dplyr::mutate(seqnames = factor(seqnames(s1[match(read_id, names(s1))])),
strand = factor(strand(
GenomicAlignments::second(aln)[match(read_id, names(aln))])))
s2_range <- s2_range %>%
data.frame %>%
dplyr::rename(read_id = names) %>%
dplyr::mutate(seqnames = factor(seqnames(s2[match(read_id, names(s2))])),
strand = factor(strand(s2[match(read_id, names(s2))])))
##----------------------------------------------------------------------------
## compare the read gaps to the true eej per read
print("Evaluating all reads")
res <- evaluat_read_sj(s1_range, s2_range, r1_truth_eej, r2_truth_eej, aln)
write.table(res, file = file.path(paste0(OUTPREFIX, "_evaluation_SJ_all.txt")),
quote = FALSE, sep = "\t", row.names = FALSE)
##----------------------------------------------------------------------------
## We want all reads that were simulated from one of the exons that were
## removed from the gtf annotation.
r_gtf <- import(REMOVED_GTF)
## only keep the read pairs where any of the single reads overlaps with the
## location of the removed exons
print("Only reads from the removed exons")
read_id1 <- tr_ranges1$read_id
read_id2 <- tr_ranges2$read_id
r1_truth <- mapFromTranscripts(
GRanges(seqnames = tr_ranges1$transcript_id,
ranges = IRanges(start = tr_ranges1$start, end = tr_ranges1$end)),
tr_granges)
r2_truth <- mapFromTranscripts(
GRanges(seqnames = tr_ranges2$transcript_id,
ranges = IRanges(start = tr_ranges2$start, end = tr_ranges2$end)),
tr_granges)
mcols(r1_truth)$read_id <- read_id1[mcols(r1_truth)$xHits]
mcols(r2_truth)$read_id <- read_id2[mcols(r2_truth)$xHits]
r1_removed <- subsetByOverlaps(r1_truth, r_gtf)
r2_removed <- subsetByOverlaps(r2_truth, r_gtf)
r_removed <- union(mcols(r1_removed)$read_id, mcols(r2_removed)$read_id)
## Filter out all reads without splice Junctions -------------------------------
print("Removed: Preparing GRanges of all gaps")
aln_removed <- aln[r_removed]
s1 <- GenomicAlignments::first(aln_removed)[
grepl("N", cigar(GenomicAlignments::first(aln_removed)))]
s2 <- GenomicAlignments::second(aln_removed)[
grepl("N", cigar(GenomicAlignments::second(aln_removed)))]
## Find the location of the "N" in the read: https://support.bioconductor.org/p/75307/
## and add the start location of the read --> genomic location of gap
s1_range <- cigarRangesAlongReferenceSpace(cigar(s1), ops = "N", pos = start(s1))
s2_range <- cigarRangesAlongReferenceSpace(cigar(s2), ops = "N", pos = start(s2))
names(s1_range) <- names(s1)
names(s2_range) <- names(s2)
s1_range <- unlist(s1_range)
s2_range <- unlist(s2_range)
### data.frame
s1_range <- s1_range %>%
data.frame %>%
dplyr::rename(read_id = names) %>%
dplyr::mutate(seqnames = factor(seqnames(s1[match(read_id, names(s1))])),
strand = factor(strand(
GenomicAlignments::second(aln_removed)[match(read_id,
names(aln_removed))])))
s2_range <- s2_range %>%
data.frame %>%
dplyr::rename(read_id = names) %>%
dplyr::mutate(seqnames = factor(seqnames(s2[match(read_id, names(s2))])),
strand = factor(strand(s2[match(read_id, names(s2))])))
##----------------------------------------------------------------------------
## compare the read gaps to the true eej per read
print("Removed: Evaluating all reads")
res <- evaluat_read_sj(s1_range, s2_range, r1_truth_eej, r2_truth_eej,
aln_removed)
write.table(res, file = file.path(paste0(OUTPREFIX, "_evaluation_SJ_overl_removed_exons.txt")),
quote = FALSE, sep = "\t", row.names = FALSE)
##----------------------------------------------------------------------------
## conda env for R-3.5.1 is /home/Shared/kathi/microexon_pipeline/.snakemake/conda/a0a0dd0e
# R_LIBS=/home/Shared/Rlib/release-3.5-lib/ /usr/local/R/R-3.4.0/bin/R
|
6d9db9d5076b87e42f2c09a60eda638573695869
|
c443e68905ea44d277deafa11ce2bb3463e5ab61
|
/man/slopesolvers-package.Rd
|
52eaf8766b7ec1e63bce0bfe70c135cfb948f09b
|
[] |
no_license
|
jolars/slopesolvers
|
8a69116ed29eea156f71f107a408c44310e0c7c4
|
3ef2ea6ff174d6ef35f663e24eada5d900c51010
|
refs/heads/master
| 2021-01-05T17:01:19.335470
| 2020-02-17T13:09:36
| 2020-02-17T13:09:36
| 241,083,259
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 708
|
rd
|
slopesolvers-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slopesolvers-package.R
\docType{package}
\name{slopesolvers-package}
\alias{slopesolvers}
\alias{slopesolvers-package}
\title{slopesolvers: A Suite of Solvers with Associated Functions for SLOPE}
\description{
A set of solvers for SLOPE to compare performance with various
types of input and objectives.
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/jolars/slopesolvers}
\item Report bugs at \url{https://github.com/jolars/slopesolvers/issues}
}
}
\author{
\strong{Maintainer}: Johan Larsson \email{johanlarsson@outlook.com} (\href{https://orcid.org/0000-0002-4029-5945}{ORCID})
}
\keyword{internal}
|
fb7da413607cc3206b66cfde617cdeb2989ca916
|
3c887e5568e6815edb9e6adde83e2b5fa36800cb
|
/plots/plot.R
|
1577a902372a815ed90f3e55c8217d1123156847
|
[] |
no_license
|
Mytherin/MonetDBLiteBenchmarks
|
04db316f24f64b61c8d81611e8effa4389e36f7a
|
5fdacde734a36f82f52c6e55f2bb8ec3a5208cb9
|
refs/heads/master
| 2020-03-14T17:32:43.717433
| 2018-06-21T10:27:04
| 2018-06-21T10:27:04
| 131,722,589
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 931
|
r
|
plot.R
|
library(dplyr)
library(ggplot2)
library(ggthemes)
library(ggrepel)
library(stringr)
library(grid)
library(reshape2)
theme <- theme_few(base_size = 24) +
theme(axis.title.y=element_text(vjust=0.9),
axis.title.x=element_text(vjust=-0.1),
axis.ticks.x=element_blank(),
text=element_text(family="serif"),
legend.position="none")
data <- read.table("temp_data.csv", header=T, sep=",", stringsAsFactors=F, na.strings="-1")
data$time[data$time < 10] <- round(data$time[data$time < 10], 2)
data$time[data$time >= 10] <- round(data$time[data$time >= 10], 1)
ymax <- as.integer(Sys.getenv('Y_MAX_BOUND'))
pdf(Sys.getenv('PLOT_NAME'), width=8, height=6)
ggplot(data, aes(x = reorder(system, time), y = time, fill = system, label=time)) + geom_bar(stat = "identity", width=.7) + theme + xlab("") + ylab("Wall clock time (s)") + scale_y_continuous(limits=c(0, ymax)) + geom_text(size=7, vjust=-.3, family="serif")
dev.off()
|
c9f7df1f570fdcb8a8e8b8bf3c8d653605901f91
|
fee0fc1f748a72a845c1b81bb99f159e08fd6fb9
|
/man/MTi.Rd
|
bed9a8f8f7dc9c210135e053bf15e083bd82121e
|
[] |
no_license
|
francisco-fjvm/MatrixCollection
|
718245ed44eb61be9a4203259fc11c5629b12bc3
|
313643d7b0fd78a7330c8ff4793ad7f25f46c82e
|
refs/heads/master
| 2020-04-24T12:58:02.817971
| 2019-02-22T01:28:43
| 2019-02-22T01:28:43
| 171,585,274
| 2
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 340
|
rd
|
MTi.Rd
|
\name{MTi}
\alias{MTi}
\title{ Matriz triangular superior con entradas aleatoria enteras}
\description{ Función para crear una matriz superior aleatoria}
\usage{ MTi(n, a, b) }
\arguments{
\item{n}{ Tamaño de matriz}
\item{a}{ Límite inferior del intervalo}
\item{b}{ Límite superior del intervalo}
}
\examples{M=MTi(4,-15,5)
}
|
c4ba7ce75e73c31c8463240193bd17cfe463ac41
|
cb9dcfc00cc07dbef7d49a320af6b581a58fbc65
|
/Regression Template.R
|
58757257d9011b124ac2b121a65f160abfb814b5
|
[] |
no_license
|
ZyanWC/R-Machine-Learning
|
8c0c62582a0216ffbc4814600a34eb385122d9b8
|
30eae32a43b5d83379d58a63e4f2ab8e2829fa3e
|
refs/heads/master
| 2021-05-09T16:32:28.416097
| 2018-01-27T00:18:16
| 2018-01-27T00:18:16
| 119,117,199
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,487
|
r
|
Regression Template.R
|
#Regression Template
#Importing the dataset
dataset = read.csv("Position_Salaries.csv")
dataset = dataset[2:3]
#Splitting the data into Training and Test set
#install.packages("caTools")
#library(caTools)
#set.seed(123)
#split = sample.split(dataset$Purchased, SplitRatio = .8)
#training_set = subset(dataset, split == TRUE)
#test_set = subset(dataset, split == FALSE)
#Feature Scaling
#training_set[, 2:3] = scale(training_set[, 2:3])
#test_set[, 2:3] = scale(test_set[, 2:3])
#Fitting the Polynomial Regression model to the dataset
#Create Regressor Here
#Predicting Salary using user input values
y_hat = predict(regressor, data.frame(Level = 6.5))
#Visualising the Regression Model results
#install.packages("ggplot2")
library(ggplot2)
ggplot()+
geom_point(aes(x = dataset$Level, y = dataset$Salary),
colour = "blue") +
geom_line(aes(x = dataset$Level, y = predict(regressor, newdata = dataset)),
colour = "red") +
ggtitle("") +
xlab("") +
ylab("")
#Visualising the Regression Model results (smoother curve/better quality)
#install.packages("ggplot2")
library(ggplot2)
x_grid = seq(min(dataset$Level), max(dataset$Level), 0.1)
ggplot()+
geom_point(aes(x = dataset$Level, y = dataset$Salary),
colour = "blue") +
geom_line(aes(x = x_grid, y = predict(regressor, newdata = data.frame(Level = x_grid))),
colour = "red") +
ggtitle("") +
xlab("") +
ylab("")
|
c6f6eac074adc11ad4658c6a5f1b25ba6905eed5
|
3db9b63f9eadda8129c5057a246476dc47b41dea
|
/App1/ui.R
|
3c425bb8a6f1421eaa3c9d10fcfabc882e9811d5
|
[] |
no_license
|
vikramjeet312/dataProduct-shiny
|
f4686bbf239915ec363d8fbef624d6b87aa2826d
|
8dfece76e52dd2c7e253b50268d3cb373fcf4ebe
|
refs/heads/master
| 2021-01-10T01:27:45.893373
| 2015-11-22T08:02:20
| 2015-11-22T08:02:20
| 46,651,376
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 563
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
titlePanel(title = "EPL Stats Season: 2014"),
sidebarLayout(
sidebarPanel(
sliderInput("teams", "Top n-teams", 2,20,1,pre = "Top ", post=" teams"),
selectInput("column", "Choose a Statistic", choices = c("Shots Per Game"=1,"Possession %"=2, "Pass Success"="", "Aerials Won"=4, "Shots On Target"=5, "Dribbles"=6, "Fouled Per Game"=7, "Shots Conceded"=8, "Tackles"=9, "Interceptions"=10, "Fouls Per Game"=11, "Offsides"=12))
),
mainPanel(plotOutput("histPlot"), plotOutput("vsPlot"))
)
))
|
505a9d8562120d223f21b85b583583aba9e48c1f
|
daab105ecede477a1e89a851b93e337525e26b20
|
/cachematrix.R
|
367091f942033c3e162c5f47f64371fc748eaede
|
[] |
no_license
|
Zeeshanasif/ProgrammingAssignment2
|
aa449ec3aff5297438764db1115969744d661c85
|
76f3df9a92ef31a9aadf2b63c14a9114c88af572
|
refs/heads/master
| 2020-12-03T09:31:57.627642
| 2014-11-23T20:23:21
| 2014-11-23T20:23:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,138
|
r
|
cachematrix.R
|
## The pair of below function uses lexical scoping rules to to cach the inverse
## of a Matrix. Please not that the nomenclature has been kept the same as the
## example for this assignment. To get inverse of a matrix say "MAT"
## type MAT$getinv() and to set inverse MAT$setinv().
## example for creating a matrix is as below
## "MAT = makeCacheMatrix(matrix(c(1,2,3,4), nrow=2, ncol=2))"
## This function creates a special "matrix" object that can cache its inverse.
##
makeCacheMatrix <- function(x = matrix()) {
I <- NULL
set <- function(y) {
x <<- y
I <<- NULL
}
get <- function() x
setinv <- function(solve) I <<- solve
getinv <- function() I
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above
cacheSolve <- function(x, ...) {
I <- x$getinv()
if(!is.null(I)) {
message("getting cached data")
return(I)
}
mat <- x$get()
I <- solve(mat, ...)
x$setinv(I)
I
## Return a matrix that is the inverse of 'x'
}
|
c4fab4d5e5d1e755535fc5f8ccca89a7896bd0c6
|
a728f406ceed9e6480880856242f9e52748c3e0f
|
/C_Code/main.R
|
537a6b3a03ca50764a1f0d1e2cdf8bc9aeb1389b
|
[] |
no_license
|
qc-an/Renewables_EM_participation
|
6c0ef3990099ff0cdf59f635fd031a31ec6325e4
|
dc4d35c74913f75e174d7c65560e4aec0703ad10
|
refs/heads/master
| 2021-09-10T09:08:18.224490
| 2018-03-23T09:12:34
| 2018-03-23T09:12:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,252
|
r
|
main.R
|
## Assignment 2 ##
## Renewables in electricity market ##
## Author : Florian Guillebeaud ##
###################################
###################################
setwd("~/Documents/DTU/B_Semester-2/31761_Renew_ElectricityMarkets/Assignments/Assignment2")
###################################
###################################
library("forecast")
###################################
###################################
source("C_Code/read_wp.R") # ! quantities in kW
source("C_Code/read_elspot.R") # ! price is given in DKK/MWh
source("C_Code/read_regulations.R")
source("C_Code/get_schedule.R")
source("C_Code/balancing_new.R")
source("C_Code/performance_ratio.R")
source("C_Code/get_best_quantile.R")
source("C_Code/quantile_distribution.R")
source("C_Code/scenario_output.R")
source("C_Code/plot_quantile.R")
###################################
## Quantity Bid ##
###################################
## Scenario 1 : We bid what forecasted ##
scenario1 = scenario_output(1, data_wp, elspot_price_2017, regulating_prices_2017, plot_results = TRUE)
## Scenario 2 : Perfect forecast ##
scenario2 = scenario_output(2, data_wp, elspot_price_2017, regulating_prices_2017, plot_results = TRUE)
ideal_revenue = sum(scenario2$revenues_hourly, na.rm = TRUE)
## Scenario 3 : Persistence forecast (using the last power measurement value at 11h)
scenario3 = scenario_output(3, data_wp, elspot_price_2017, regulating_prices_2017, plot_results = TRUE)
## Scenario 4 : Random bid between 0 and MaxProd MW
actual_revenue = pf = vector()
for (j in 1:100){
cat(paste0("round : ", j ), "\n")
scenario4 = scenario_output(4, data_wp, elspot_price_2017, regulating_prices_2017, plot_results = FALSE)
actual_revenue[j] = sum(scenario4$revenues_hourly, na.rm = TRUE)
pf[j] = actual_revenue[j]/ideal_revenue
}
## Scenario 5 : We bid a constant amount based on an estimated CF ##
scenario5 = scenario_output(5, data_wp, elspot_price_2017, regulating_prices_2017, plot_results = TRUE)
## Scenario 6 : We bid the median (0.5 quantile) ##
scenario6 = scenario_output(6, data_wp, elspot_price_2017, regulating_prices_2017, plot_results = TRUE)
## Scenario 7 : using the otpimal quantile
scenario7 = scenario_output(7, data_wp, elspot_price_2017, regulating_prices_2017, plot_results = TRUE)
##################################
## Post traitement Scenarios ##
###################################
# Maximum we can get
plot((rowMeans(scenario2$revenues_hourly)/10^3)[1:31], ylim = c(0,40),
type = 'h', lwd = 20, xlab = "Time [Days]", ylab = "Revenue [k€]")
title(main="Revenue in January 2017")
# Compare to our scenarios
lines((rowMeans(scenario1$revenues_hourly)/10^3)[1:31], type = 'h', lwd = 17, col = "grey")
lines((rowMeans(scenario3$revenues_hourly)/10^3)[1:31], type = 'h', lwd = 14, col = "yellow")
lines((rowMeans(scenario4$revenues_hourly)/10^3)[1:31], type = 'h', lwd = 11, col = "green")
lines((rowMeans(scenario5$revenues_hourly)/10^3)[1:31], type = 'h', lwd = 8, col = "blue")
# lines((rowMeans(scenario6$revenues_hourly)/10^3)[1:31], type = 'h', lwd = 8, col = "blue")
# lines((rowMeans(scenario7$revenues_hourly)/10^3)[1:31], type = 'h', lwd = 8, col = "blue")
legend("topright", legend = c("Perfect forecast", "Believe in forecast", "Persistence forecast", "Random bid using CF=0.5", "Constant bid using CF=0.5"),
col = c("black", "grey", "yellow", "green", "blue"), lty = 1, lwd = c(20,17,14,11,8), cex = 0.75)
###################################
###################################
# Cumulative revenue plot
scenario1$revenues_hourly[is.na(scenario1$revenues_hourly)] <- 0
scenario2$revenues_hourly[is.na(scenario2$revenues_hourly)] <- 0
scenario3$revenues_hourly[is.na(scenario3$revenues_hourly)] <- 0
scenario4$revenues_hourly[is.na(scenario4$revenues_hourly)] <- 0
scenario5$revenues_hourly[is.na(scenario5$revenues_hourly)] <- 0
scenario6$revenues_hourly[is.na(scenario6$revenues_hourly)] <- 0
scenario7$revenues_hourly[is.na(scenario7$revenues_hourly)] <- 0
# plot the optimal first
plot((cumsum(scenario2$revenues_hourly)/10^7)[0:200], type = 'l')
lines((cumsum(scenario1$revenues_hourly)/10^7)[0:200], type = 'l', lty = 2, col = "red")
lines(cumsum(scenario3$revenues_hourly)/10^7, type = 'l', lty = 2, col = "green")
lines(cumsum(scenario4$revenues_hourly)/10^7, type = 'l', col = "blue")
lines(cumsum(scenario5$revenues_hourly)/10^7, type = 'l', col = "orange")
lines(cumsum(scenario6$revenues_hourly)/10^7, type = 'l', lty = 2, col = "yellow")
lines(cumsum(scenario7$revenues_hourly)/10^7, type = 'l', col = "purple")
###################################
###################################
# Cumulative balancing / day ahead revenue plot
scenario1$da_revenue_hourly[is.na(scenario1$da_revenue_hourly)] <- 0
scenario1$ba_revenue_hourly[is.na(scenario1$ba_revenue_hourly)] <- 0
scenario2$da_revenue_hourly[is.na(scenario2$da_revenue_hourly)] <- 0
scenario2$ba_revenue_hourly[is.na(scenario2$ba_revenue_hourly)] <- 0
scenario7$da_revenue_hourly[is.na(scenario7$da_revenue_hourly)] <- 0
scenario7$ba_revenue_hourly[is.na(scenario7$ba_revenue_hourly)] <- 0
plot(cumsum(scenario1$da_revenue_hourly)/10^7, ylim = c(-2,15), type = 'l')
abline(h=0)
lines(cumsum(scenario1$ba_revenue_hourly)/10^7, type = 'l')
lines(cumsum(scenario2$da_revenue_hourly)/10^7, type = 'l', col = "green")
lines(cumsum(scenario2$ba_revenue_hourly)/10^7, type = 'l', col = "green")
lines(cumsum(scenario7$da_revenue_hourly)/10^7, type = 'l', col = "red")
lines(cumsum(scenario7$ba_revenue_hourly)/10^7, type = 'l', col = "red")
###################################
###################################
# SPOT PRICES
N = min(length(elspot_price_2016$DK1), length(elspot_price_2017$DK1))
plot(1:N, elspot_price_2016$DK1[1:N], type = "l", xlab = "Time [h]", ylab = "[DKK/MWh]")
lines(1:N, elspot_price_2017$DK1[1:N], col = "blue")
legend("bottomleft", legend = c(paste0("2016 / mean price : ", round(mean(elspot_price_2016$DK1[1:N], na.rm = TRUE), digits = 2), " DKK/MWh"),
paste0("2017 / mean price : ", round(mean(elspot_price_2017$DK1[1:N], na.rm = TRUE), digits = 2), " DKK/MWh")), col = c("black", "blue"), lty = 1)
title(main = "Electricity Spot Price in DK1")
# Hourly tendancy
temp_elspot = vector() # matrix(0,ncol = 24, nrow = length(seq(1,length(elspot_price_2016$DK1), 24)))
temp_up = temp_dw = vector() # matrix(0,ncol = 24, nrow = length(seq(1,length(regulating_prices_2016), 24)))
for (i in seq(1,length(elspot_price_2016$DK1), 24)){
temp_elspot = rbind(temp_elspot,elspot_price_2016$DK1[i:(i+23)])
temp_up = rbind(temp_up, regulating_prices_2016$DK1_UP[i:(i+23)])
temp_dw = rbind(temp_dw, regulating_prices_2016$DK1_DOWN[i:(i+23)])
}
temp_elspot[is.na(temp_elspot)] <- 0
temp_up[is.na(temp_up)] <- 0
temp_dw[is.na(temp_dw)] <- 0
hourly_av_spot_2016 = colMeans(temp_elspot[,1:24])
hourly_av_up = colMeans(temp_up[,1:24])
hourly_av_dw = colMeans(temp_dw[,1:24])
plot(1:24, hourly_av_spot_2016, type = "o",
ylim = c(min(hourly_av_spot_2016, hourly_av_up, hourly_av_dw), max(hourly_av_spot_2016, hourly_av_up, hourly_av_dw)),
col = "blue", xlab = "Time [h]", ylab = "Price [DKK/MWh]", lwd = 2)
points(1:24, hourly_av_up, col = "red", pch = 3, lwd = 2)
lines(1:24, hourly_av_up, col = "red", lwd = 2)
points(1:24, hourly_av_dw, col = "darkorange", pch = 3, lwd = 2)
lines(1:24, hourly_av_dw, col = "darkorange", lwd = 2)
plot(1:24, apply(temp_elspot, 2 , sd), type = "o",
ylim = c(min(apply(temp_elspot,2,sd), apply(temp_up,2,sd),apply(temp_dw,2,sd)), max(apply(temp_elspot,2,sd), apply(temp_up,2,sd),apply(temp_dw,2,sd))),
col = "blue", xlab = "Time [h]", ylab = "Standard deviation [DKK/MWh]", lwd = 2)
points(1:24, apply(temp_up, 2 , sd), col = "red", pch = 3, lwd = 2)
lines(1:24, apply(temp_up, 2 , sd), col = "red", lwd = 2)
points(1:24, apply(temp_dw, 2 , sd), col = "darkorange", pch = 3, lwd = 2)
lines(1:24, apply(temp_dw, 2 , sd), col = "darkorange", lwd = 2)
###################################
###################################
## compute key figures for regulation prices DKK/MWh
# average up and down regulations costs
down_reg_av_2016 = mean(regulating_prices_2016$DK1_DOWN, na.rm=TRUE)
up_reg_av_2016 = mean(regulating_prices_2016$DK1_UP, na.rm=TRUE)
down_reg_av_2017 = mean(regulating_prices_2017$DK1_DOWN, na.rm=TRUE)
up_reg_av_2017 = mean(regulating_prices_2017$DK1_UP, na.rm=TRUE)
###################################
###################################
# What date do you want to plot ?
# 28th of March 2016 : YYYYMMDD
date = 20170328
year = noquote(sub("^(\\d{4}).*$", "\\1", date))
reg_date_up = eval(parse(text = paste0("regulating_prices_",year,"[regulating_prices_",year,"$date_daily==date,]$DK1_UP")))
reg_date_down = eval(parse(text = paste0("regulating_prices_",year,"[regulating_prices_",year,"$date_daily==date,]$DK1_DOWN")))
price_date = eval(parse(text = paste0("elspot_price_",year,"[elspot_price_",year,"$date_daily==date,]$DK1")))
wind_fore_date = eval(data_wp[data_wp$date_daily==date,]$fore)
wind_meas_date = eval(data_wp[data_wp$date_daily==date,]$meas)
# Balancing Price market for this date
plot(price_date,type = "o", ylim = c(min(reg_date_up,reg_date_down, price_date, na.rm = TRUE), max(reg_date_up, reg_date_down, price_date, na.rm = TRUE)),
xlab = "Program Time Unit [h]", ylab = "DKK/MWh", lty = 1, lwd = 2)
points(reg_date_up, col = "blue", type = "o", lty = 2 )
points(reg_date_down, col ="red", type ="o", lty = 2)
legend("topleft", legend = c("Spot price", "Up-reg. price", "Down-reg. price"),
col = c("black", "blue", "red"), lty = c(1,2,2), cex = 0.75, lwd = c(2,1,1))
title(main = paste0("Prices the : ", date))
# in March 2017 : YYYYMM
date = 201703
year = noquote(sub("^(\\d{4}).*$", "\\1", date))
reg_date_up = eval(parse(text = paste0("regulating_prices_",year,"[regulating_prices_",year,"$date_monthly==date,]$DK1_UP")))
reg_date_down = eval(parse(text = paste0("regulating_prices_",year,"[regulating_prices_",year,"$date_monthly==date,]$DK1_DOWN")))
price_date = eval(parse(text = paste0("elspot_price_",year,"[elspot_price_",year,"$date_monthly==date,]$DK1")))
wind_fore_date = eval(parse(text = paste0("wind_power_",year,"[wind_power_",year,"$date_monthly==date,]$fore")))
wind_meas_date = eval(parse(text = paste0("wind_power_",year,"[wind_power_",year,"$date_monthly==date,]$meas")))
plot(price_date,type = "l", ylim = c(min(reg_date_up,reg_date_down, price_date, na.rm = TRUE), max(reg_date_up, reg_date_down, price_date, na.rm = TRUE)),
xlab = paste0("Month studied : ", date, " [h]"), ylab = "DKK/MWh", lty = 1)
points(reg_date_up, col = "blue", type = "l", lty = 2 )
points(reg_date_down, col ="red", type ="l")
par(xpd=TRUE)
legend(list(x = 0,y = 2), legend = c("Spot price", "Up-reg. price", "Down-reg. price"),
col = c("black", "blue", "red"), lty = c(1,2,2), cex = 0.75)
#
plot(wind_fore_date, type = "l", xlab = "Time [h]", ylab = "[kW]")
lines(wind_meas_date, type = "l", lty = 2, col = "blue")
title(main = paste0("Wind power production in : ", date))
legend(list(x = 0,y = 2), legend = c("forecasted", "measured"), col = c("black", "blue"), lty = c(1,2), cex = 0.75, lwd = 2)
title(main = paste0("Wind power production : ", date))
####
|
187905c53ce23e9c318b9976ad6faedb247f864a
|
a0d8cc13c2552f6abeff5d39c45bc578a7d67eb5
|
/intial R code.r
|
4541bf71717c9c1ec8ffaba0a0e10606cac8ae1f
|
[] |
no_license
|
ajsarver87/service_parts_forcast
|
72b6dfe9f11243ba9d78402ea00f3a72950c6fab
|
73db1705baafeb56665dff036a51916dfc0e43e5
|
refs/heads/master
| 2020-03-22T22:22:11.678256
| 2018-07-12T18:34:32
| 2018-07-12T18:34:32
| 140,748,120
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,439
|
r
|
intial R code.r
|
#LIBRARIES
library(TSA)
library(forecast)
library(foreach)
library(doSNOW)
library(doParallel)
library(plyr)
#Custom Function for fitting automatically chossing the best model between ARIMA and ETS
fitting.function <- function(x, h){
temp.mod.arima <- auto.arima(x, seasonal = TRUE)
temp.mod.ets <- ets(x)
if((temp.mod.arima$aic)-(temp.mod.ets$aic) > 0){
temp.mod <- temp.mod.ets
}
else{
temp.mod <- temp.mod.arima
}
return(temp.mod)
}
forecast.function <- function(x,h){
temp1 <- fitting.function(x,h)
if(class(temp1)[1]=="ARIMA"){
temp2 <- forecast(temp1)
return(temp2$mean[1,h])
}
else {
temp2 <- predict(temp1, n.ahead=h)
return(as.vector(temp2$mean)[1:h])
}
}
#importing data
df <- read.table("parts_usage.csv", header = TRUE, sep=",", check.names=FALSE)
df[is.na(df)] <- 0
df$month <- NULL
df <- df[, colSums(df !=0) > 0]
n <- ncol(df)
h <- 3
df <- ts(df, start=c(2012,8), end=c(2017, 7), frequency = 12)
forecast.df <- matrix(NA, ncol=n, nrow=h)
cores=detectCores()
cl <- make.Cluster(cores-1)
registerDoParallel(cl)
strt <- Sys.time()
forecast.df <- foreach(i=1:n) %dopar% forecast.function(df[,i],h)
print(Sys.time()-strt)
final <- data.frame(matric(unlist(forecast.df), nrow=n, byrow=TRUE))
final <- rename(final, c("X1"="August 2017", "X2"="September 2017", "X3"="October 2017"))
names <- colnames(df[,1:n])
row.name(final) <- names
write.csv(final, file="forecast.csv")
|
bab8e5e7a8eb247cda099e49a8417b8af203e960
|
9f6226caf5268ce2ae0d8e9b5abcfe6b7f5c8c0d
|
/R/token.R
|
3977ac77669bed7cb24c29573149d09bd7a1d3a7
|
[] |
no_license
|
dgkf/reflow
|
910efc856ec64ce1fa170057aef2dc472ec33634
|
a8bcda6b24b738e3c829674c75ed834b1a484ae3
|
refs/heads/master
| 2023-09-04T16:06:12.070079
| 2021-11-09T02:32:31
| 2021-11-09T02:32:31
| 405,763,101
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 178
|
r
|
token.R
|
token <- function(x) {
UseMethod("token")
}
token.xml_node <- function(x) {
token(xml2::xml_name(x))
}
token.character <- function(x) {
gsub("[^a-z]", "_", tolower(x))
}
|
7497dfcdc55cb173d15f50ef457d4534dbab195d
|
d2591ae7dbf33133b7576d90593e546f8bc92e40
|
/r-scripts/RandomForest.R
|
325b1aafd7fdecb15352ecd30371b502c5e2e792
|
[] |
no_license
|
hreiten/mnist-digit-recognizer
|
3e1d26597df74853accb3a41c7e61ce30216493a
|
715c9a889eb8086240a500884976220261301011
|
refs/heads/master
| 2021-09-15T07:00:35.353669
| 2018-05-28T06:23:17
| 2018-05-28T06:23:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,207
|
r
|
RandomForest.R
|
rm(list=ls(all = T))
set.seed(1000)
Sys.setenv(TZ="Africa/Johannesburg")
library(ggplot2)
library(randomForest)
library(tikzDevice)
library(caret)
library(xtable)
source("HelpFunctions.R")
exportspath <- "../exports/tree_based_methods/randomforest/"
# read in data
data <- read.csv("../data/Train_Digits_20171108.csv")
data$Digit <- as.factor(data$Digit)
sample <- sample(1:nrow(data), 0.8*nrow(data))
train <- data[sample,]
test <- data[-sample,]
ntree = 1000
# ## RANDOM FOREST IN PARALLEL ##
# # will not get OOB measures, so will not use it in this excercise
#
# library(doParallel)
# ncores <- detectCores()
# cl <- makeCluster(ncores)
# registerDoParallel(cl)
#
# rf <- foreach(ntree=rep(floor(ntree/ncores), ncores), .combine = combine, .packages = "randomForest") %dopar% {
# tit_rf <- randomForest(Digit ~ ., data = train, ntree = ntree, importance=TRUE, na.action = na.exclude)
# }
# stopCluster(cl)
## RANDOM FOREST NORMAL ##
rf <- randomForest(Digit ~ ., data = train,
ntree = ntree,
importance=TRUE,
na.action = na.exclude,
do.trace = floor(ntree/10))
err_df <- data.frame(trees = 1:length(rf$err.rate[,"OOB"]), err = rf$err.rate[,"OOB"])
pl <- ggplot(err_df) +
geom_line(aes(x = trees, y = err)) +
xlab("Number of trees") + ylab("OOB error") + ggtitle("") +
theme_bw()
exportPlotToLatex(pl, exportspath, "rf_oob_err.tex")
# Grow new Random Forest with optimal number of trees
ntree = 500
rf <- randomForest(Digit ~ ., data = train,
ntree = ntree,
importance=TRUE,
na.action = na.exclude,
do.trace = floor(ntree/10))
# predict values on test set
pred <- predict(rf, newdata = test)
pred_df <- data.frame(pred = pred, true = test$Digit)
confM <- makeConfusionMatrix(pred,test$Digit)
exportTableToLatex(confM, exportspath, "rf_confusionmatrix.tex")
# save data to .csv-files
write.table(err_df, file = paste(exportspath, "rf_oob_error.csv", sep=""), row.names=F, col.names=T, sep = ",", append=F)
write.table(pred_df, file = paste(exportspath, "rf_predictions.csv", sep=""), row.names=F, col.names=T, sep = ",", append=F)
|
2f579865c1609468b0132e4e0a8cbebf53c8b923
|
7a0fd3bfeebef43dd86047941fd56d6d5c3cdcb1
|
/CI1107219/Atividade-Perceptron/src/perceptron.R
|
f7b64ea543e39323a139fb11449b72ea112ae53c
|
[] |
no_license
|
alvesmarcos/deep-learning
|
207e2f612bd362b1efb63fe4546a66b33fe55fc8
|
10ce4c7535142acb23a07c486257ccf8f5d1dc7d
|
refs/heads/master
| 2020-04-10T08:49:57.017278
| 2018-05-08T19:22:59
| 2018-05-08T19:22:59
| 124,267,816
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,099
|
r
|
perceptron.R
|
library(ggplot2)
threshold <- function(y) {
ifelse(y>0, 1, 0)
}
activation_func <- function(z, func) {
switch(func,
'degrau' = ifelse(z>0, 1, 0),
'sigmoid' = 1/(1+exp(-z)),
'tanh' = (2/(1+exp(-2*z)))-1,
'relu' = ifelse(z>0, z, 0))
}
forward <- function(w,b,x_i) {
z = (x_i%*%t(w)) + b
activation_func(z, 'degrau')[1,1]
}
train_and_test <- function(X, Y, W, epoch, learning_rate) {
# inicializando pesos e bias
w = matrix(runif(dim(X)[2])-0.5, nrow=1, ncol=dim(X)[2])
b = 0
# tamanho da entrada
x_len = length(Y)
w_len = dim(W)[1]
for(step in 1:epoch) {
for(i in 1:x_len) {
y_pred = forward(w,b,X[i,])
error = Y[i]-y_pred
w = w + learning_rate*(X[i,]*error)
b = b + learning_rate*error
}
}
print(w)
print(b)
print(error)
for(i in 1:w_len) {
y_pred = forward(w, b, W[i,])
cat("Input => ",W[i,],"\nOuput =>", y_pred, "\n----\n")
}
}
x = matrix(c(0,0,0,1,1,0,1,1), nrow=4, ncol=2, byrow=TRUE)
w = matrix(c(1,1,0,1,0,0,1,1), nrow=4, ncol=2, byrow=TRUE)
y = c(0,0,0,1)
train_and_test(x, y, w, 100, 0.03)
|
b5e4328ac70d9d46741a45c88169854f8814301e
|
72449ca51b9c019f8268e929bbe5c3e850235158
|
/fall2016_prematch_code/Generate_clean_golden_set.R
|
896498c3918d6608f8ee240e1b9debfabdccf011
|
[] |
no_license
|
joshuaschwab/social-networks
|
15f4660e0a9f491c2471bf1497b17a1d215a70e5
|
c1026bea660e6e0a94e4fb9c9f7f0fd8c371fc5e
|
refs/heads/master
| 2020-12-03T04:01:32.282732
| 2017-07-04T04:39:13
| 2017-07-04T04:39:13
| 95,803,938
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 897
|
r
|
Generate_clean_golden_set.R
|
# load the online golden set
#####
#parish = 'muyembe'
#parish = 'nsiika'
#parish = 'nyatoto'
#parish = 'magunga'
# parish = 'ogongo'
#parish = 'mitooma'
#parish = 'rugazi'
#parish = 'nsiinze'
#parish = 'nankoma'
#parish = 'kisegi'
#parish = 'kitwe'
#parish = 'rubaare'
#parish = 'nyamrisra'
parish = 'kitare'
golden_set_file = paste0('/home/yiqun.chen/trainset_data/',parish,'_goldenSet1100_corrected.csv')
# load the modified csv
golden_set <- read.csv2(golden_set_file, sep =",")
golden_set_na_removed <- golden_set[!is.na(golden_set$link),]
prematch_trainset_file = paste0('/home/yiqun.chen/trainset_data/UseSASprematch_trainSet_',parish,'.RData')
load(prematch_trainset_file)
golden.pairs <- golden_set_na_removed[golden_set_na_removed$link==1,c('refID.1','refID.2')]
save(golden.pairs, train.link, file = paste0('/home/yiqun.chen/goldenset_data/',parish,'Clean_GoldenSet.RData'))
|
629c7331b1dd7252517d0697a5b4894c13501f51
|
c9f5de2870f782a56c98c88497c24bb7b521bdb3
|
/plot2.R
|
cb885c3c9f1c7dc317bf490cbd8fea17ca0e434e
|
[] |
no_license
|
lcheeme1/ExData_Plotting1
|
501573f6e2ac063fc3a8df4438f0bafabd10a83b
|
df39b4bf9697990242dba5a715167e9f07a8cb9c
|
refs/heads/master
| 2021-01-18T15:18:55.245242
| 2014-11-09T14:20:57
| 2014-11-09T14:20:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 581
|
r
|
plot2.R
|
library(dplyr)
library(lubridate)
mydata <- read.csv("household_power_consumption.txt", sep=";")
startDate <- ymd("2007-02-01")
endDate <- ymd("2007-02-03")
mydata <- mutate(mydata, DateTime = dmy_hms(paste(as.character(Date), " ", as.character(Time))))
mydata <- filter(mydata, DateTime >= startDate)
mydata <- filter(mydata, DateTime < endDate)
x <- strptime(mydata$DateTime,"%Y-%m-%d %H:%M:%S")
y <- as.numeric(as.character(mydata$Global_active_power))
png("plot2.png", width=480, height=480)
plot(x, y, type="l", xlab="", ylab="Global Active Power(kilowatts)")
dev.off()
|
c6879cd4499826a9d34e5e66d64f6f5f06db97a4
|
7074008683ce97e0e40682bf680d404ba3e02aee
|
/01-removing-careless-motivation.R
|
18ef38f5c452ede231b8fe944034fb93cbfb2d41
|
[] |
no_license
|
geiser/rachel-imi-evaluation
|
82a08b6a2d60b6437269f962c4e0d3e54e54c287
|
5e360c1441090846123c9d6db93def203f0ba86f
|
refs/heads/master
| 2020-03-18T04:35:01.893755
| 2018-05-21T16:23:51
| 2018-05-21T16:23:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,178
|
r
|
01-removing-careless-motivation.R
|
wants <- c('readr', 'dplyr', 'devtools','readxl')
has <- wants %in% rownames(installed.packages())
if (any(!has)) install.packages(wants[!has])
if (!any(rownames(installed.packages()) %in% c('careless'))) {
devtools::install_github('ryentes/careless')
}
library(daff)
library(readr)
library(dplyr)
library(careless)
library(readxl)
SourceIMI <- read_excel("data/IMI-rachel.xlsx", sheet = "IMI_Todos_Alunos")
##########################################################################
## Removing Careless in Motivation Survey ##
##########################################################################
resp <- select(SourceIMI, starts_with("UserID"), starts_with("Item"))
(careless_info <- careless::longstring(select(resp, -starts_with("UserID")), na=T))
respIMI <- resp[careless_info <= 12 & complete.cases(resp),]
filename <- 'data/SourceIMI.csv'
if (!file.exists(filename)) {
write_csv(respIMI, filename)
}
## write in latex
render_diff(ddIMI <- diff_data(resp, respIMI))
filename <- 'report/latex/careless-IMI.tex'
write_careless_in_latex(
ddIMI, filename, in_title = "in the IMI data collected over the pilot empirical study")
|
11f76060321d2452797bef4dba10895130b137d0
|
edc9289ab789afe6c5c720be9338508a01976305
|
/Dataset_operators.R
|
14d84b1129bb023813537a5de0cfe79d3c58774c
|
[] |
no_license
|
SuruthiVinothKannan/RBasics
|
f4ded39e27d5e41a385ee6b043ef04f0e249e613
|
7b137b926610128dcd4a36b2c1bb87e2493b8a37
|
refs/heads/master
| 2022-11-17T18:06:44.134231
| 2020-07-17T20:21:21
| 2020-07-17T20:21:21
| 280,517,587
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 598
|
r
|
Dataset_operators.R
|
# Creating Subset of data
getwd()
install.packages("openxlsx")
library(openxlsx)
test=read.xlsx("Revenue_dataset.xlsx")
dim(test)
head(test,10) #default of head()& tail() 6 records
tail(test,15)
subset=test[100:105,]
dim(subset)
names(subset)
str(subset)
rm(may_subset) # To remove a dataset
?save
may_subset=test[1:10,c(1,4)]
#save(may_subset,file="May_subset.R")
#Selecting ROWS from dataset
test[1:10,]
test[10,]
test[c(1:3,5,10:15),]
#Selecting Columns from dataset
test[1:5,1:3]
test[1:5,5]
test[1:5,c(1,3,5)]
nrow(test)
colSums(is.na(test))
|
e170dd655cbf88a1c5a34d7f88e6843440574fc7
|
530474c7537d174c797f8be66da1087bf7cf1c59
|
/R/samplePlotCompilation.R
|
3dc037f21ce9358a23501863539c5d1fceadc508
|
[
"Apache-2.0"
] |
permissive
|
bcgov/FAIBCompiler
|
409d88e9444ca26847b62e43668b41eb945f84e0
|
3baf38a21c5493b7d7cf0f4695e1cc6322eeabe3
|
refs/heads/master
| 2023-08-05T08:47:43.934344
| 2023-08-02T21:35:23
| 2023-08-02T21:35:23
| 195,121,227
| 0
| 0
|
Apache-2.0
| 2020-02-12T18:00:30
| 2019-07-03T20:19:19
|
R
|
UTF-8
|
R
| false
| false
| 18,024
|
r
|
samplePlotCompilation.R
|
#' Compile sample and plot level information
#'
#'
#' @description This function is to compile sample and plot information.
#'
#' @param compilationType character, either \code{PSP} or \code{nonPSP}. If it is \code{PSP}, it
#' is consistent with original PSP compiler, otherwise, it
#' is consistent with VRI compiler.
#' @param dataSourcePath character, Specifies the path that contains prepared data from raw data.
#' @param mapPath character, Specifies the path dependent maps are stored.
#' @param coeffPath character, Specifies the path dependent coeffs are stored.
#' @return A data table that contains key information at cluster/plot level and compiler log file.
#'
#' @importFrom data.table ':='
#' @importFrom dplyr '%>%'
#' @importFrom FAIBBase merge_dupUpdate
#'
#' @export
#' @docType methods
#' @rdname samplePlotCompilation
#'
#' @author Yong Luo
samplePlotCompilation <- function(compilationType,
dataSourcePath,
mapPath,
coeffPath){
vi_a <- readRDS(file.path(dataSourcePath, "vi_a.rds"))
vi_a <- vi_a[substr(PROJ_ID, 1, 3) != "DEV",]
vi_a <- vi_a[substr(TYPE_CD, 1, 1) != "E", ]
# The plots belong to LGMW project, which samples each polygon (a unit of sample)
# that has one or more plots, however, the plot identity is not unique
# remove these plot from further compilation
vi_a <- vi_a[substr(TYPE_CD, 1, 1) != "W",] # double check with Bob and Rene
# vi_a <- vi_a[!(PROJ_ID == "CAR1" & TYPE_CD == "N"),]
vi_a[, meas_yr_temp := as.numeric(substr(MEAS_DT, 1, 4))]
vi_a[, meas_yr_cut := as.Date(paste0(meas_yr_temp, "-06-01"))]
vi_a[, MEAS_YR := ifelse(MEAS_DT >= meas_yr_cut, meas_yr_temp,
meas_yr_temp - 1)]
vi_a[, NO_MEAS := max(VISIT_NUMBER),
by = "SITE_IDENTIFIER"]
vi_a[VISIT_NUMBER == 1,
':='(MEAS_DT_FIRST = MEAS_DT,
MEAS_YR_FIRST = MEAS_YR)]
vi_a[VISIT_NUMBER == NO_MEAS,
':='(MEAS_DT_LAST = MEAS_DT,
MEAS_YR_LAST = MEAS_YR)]
vi_a[, ':='(MEAS_DT_FIRST = min(MEAS_DT_FIRST, na.rm = TRUE),
MEAS_YR_FIRST = min(MEAS_YR_FIRST, na.rm = TRUE),
MEAS_DT_LAST = min(MEAS_DT_LAST, na.rm = TRUE),
MEAS_YR_LAST = min(MEAS_YR_LAST, na.rm = TRUE)),
by = "SITE_IDENTIFIER"]
vi_a[, TOTAL_PERIOD := MEAS_YR_LAST - MEAS_YR_FIRST]
vi_a <- vi_a[order(SITE_IDENTIFIER, VISIT_NUMBER),]
vi_a[, meas_yr_next := shift(MEAS_YR, type = "lag"),
by = "SITE_IDENTIFIER"]
vi_a[, PERIOD := MEAS_YR - meas_yr_next]
vi_a[,':='(meas_yr_temp = NULL,
meas_yr_cut = NULL,
meas_yr_next = NULL)]
vi_a <- updateSpatial(compilationType = compilationType,
samplesites = vi_a,
mapPath = mapPath)
if(compilationType == "PSP"){
# populate bec and tsa based on region number and compartment
## the bec zone with the most sites for a given region/compartment wins
## based on Rene's suggestions on March 14, 2023
spatialAvailable <- unique(vi_a[!is.na(BEC),.(SITE_IDENTIFIER, BEC, BEC_SBZ, BEC_VAR,
TSA, TSA_DESC, SAMPLING_REGION_NUMBER, COMPARTMENT_NUMBER)],
by = "SITE_IDENTIFIER")
bec_avai <- spatialAvailable[, .(No_samples = length(SITE_IDENTIFIER)),
by = c("SAMPLING_REGION_NUMBER", "COMPARTMENT_NUMBER",
"BEC", "BEC_SBZ", "BEC_VAR")]
bec_avai <- bec_avai[order(SAMPLING_REGION_NUMBER, COMPARTMENT_NUMBER, -No_samples),
.(SAMPLING_REGION_NUMBER, COMPARTMENT_NUMBER,
BEC_new = BEC,
BEC_SBZ_new = BEC_SBZ,
BEC_VAR_new = BEC_VAR)]
bec_avai <- unique(bec_avai,
by = c("SAMPLING_REGION_NUMBER", "COMPARTMENT_NUMBER"))
vi_a <- merge(vi_a,
bec_avai,
by = c("SAMPLING_REGION_NUMBER", "COMPARTMENT_NUMBER"),
all.x = TRUE)
vi_a[is.na(BEC),
':='(BEC = BEC_new,
BEC_SBZ = BEC_SBZ_new,
BEC_VAR = BEC_VAR_new)]
vi_a[is.na(BEC)]
tsa_avai <- spatialAvailable[, .(No_samples = length(SITE_IDENTIFIER)),
by = c("SAMPLING_REGION_NUMBER", "COMPARTMENT_NUMBER",
"TSA", "TSA_DESC")]
tsa_avai <- tsa_avai[order(SAMPLING_REGION_NUMBER, COMPARTMENT_NUMBER, -No_samples),
.(SAMPLING_REGION_NUMBER, COMPARTMENT_NUMBER,
TSA_new = TSA,
TSA_DESC_new = TSA_DESC)]
tsa_avai <- unique(tsa_avai,
by = c("SAMPLING_REGION_NUMBER", "COMPARTMENT_NUMBER"))
vi_a <- merge(vi_a,
tsa_avai,
by = c("SAMPLING_REGION_NUMBER", "COMPARTMENT_NUMBER"),
all.x = TRUE)
vi_a[is.na(TSA),
':='(TSA = TSA_new,
TSA_DESC = TSA_DESC_new)]
vi_a[is.na(TSA)]
#
# previousSamples <- readRDS(file.path(mapPath, "spatiallookup_PSP.rds"))
# previousSamples <- previousSamples$spatiallookup
# names(previousSamples) <- paste0(names(previousSamples), "_prev")
# setnames(previousSamples, "SITE_IDENTIFIER_prev", "SITE_IDENTIFIER")
# samplesites_Loc <- unique(vi_a[,
# .(SITE_IDENTIFIER,
# IP_UTM, IP_NRTH, IP_EAST)],
# by = "SITE_IDENTIFIER")
#
# allsamples <- merge(previousSamples[, inprev := TRUE],
# samplesites_Loc[, incurt := TRUE],
# by = "SITE_IDENTIFIER",
# all = TRUE)
# allsamples[, unid := 1:nrow(allsamples)]
#
# samples_skip <- allsamples[(inprev == TRUE & incurt == TRUE) &
# (IP_UTM_prev == IP_UTM |
# (is.na(IP_UTM_prev) & is.na(IP_UTM))) &
# (IP_EAST_prev == IP_EAST |
# (is.na(IP_EAST_prev) & is.na(IP_EAST))) &
# (IP_NRTH_prev == IP_NRTH |
# (is.na(IP_NRTH_prev) & is.na(IP_NRTH)))]
samples_skip <- vi_a[!is.na(BEC),]
samples_proc <- vi_a[!(SITE_IDENTIFIER %in% samples_skip$SITE_IDENTIFIER),]
if(nrow(samples_proc) > 0){
## for PSP, some samples do not have good spatial coordinates, hence, causing
## missing spatial attributes
samples_proc <- updateMissingSpAttribute(spatialtable = samples_proc,
mapPath = mapPath,
updateMethod = "fromRegionCompartMap")
}
vi_a <- rbindlist(list(samples_skip, samples_proc),
fill = TRUE)
spatialLookups <- unique(vi_a[,.(SITE_IDENTIFIER, SAMP_POINT = SITE_IDENTIFIER,
IP_UTM, IP_NRTH, IP_EAST, UTM_SOURCE, CORRDINATE_SOURCE, BC_ALBERS_X, BC_ALBERS_Y,
Longitude, Latitude, BEC_ZONE = BEC, BEC_SBZ, BEC_VAR,
TSA, TSA_DESC, FIZ, TFL, OWNER, SCHEDULE,
PROJ_ID, SAMP_NO,
SAMPLE_ESTABLISHMENT_TYPE = paste0("PSP_", PSP_TYPE), SAMPLE_SITE_NAME,
SITE_STATUS_CODE, SITE_ACCESS_CODE, STAND_ORIGIN_CODE,
STAND_DISTURBANCE_CODE, SEL_LGD = SELECTIVELY_LOGGED_IND,
BGC_SS_GRD, MEAS_DT_FIRST, MEAS_DT_LAST, MEAS_YR_FIRST, MEAS_YR_LAST,
TOTAL_PERIOD, NO_MEAS)],
by = "SAMP_POINT")
} else {
spatialLookups <- unique(vi_a[,.(SITE_IDENTIFIER, SAMP_POINT = SITE_IDENTIFIER,
IP_UTM, IP_NRTH, IP_EAST, UTM_SOURCE, CORRDINATE_SOURCE, BC_ALBERS_X, BC_ALBERS_Y,
Longitude, Latitude, BEC_ZONE = BEC, BEC_SBZ, BEC_VAR,
TSA, TSA_DESC, FIZ, TFL, OWNER, SCHEDULE,
PROJ_ID, SAMP_NO,
SAMPLE_SITE_NAME,
SITE_STATUS_CODE, SITE_ACCESS_CODE, STAND_ORIGIN_CODE,
STAND_DISTURBANCE_CODE, SEL_LGD = SELECTIVELY_LOGGED_IND,
BGC_SS_GRD, MEAS_DT_FIRST, MEAS_DT_LAST, MEAS_YR_FIRST, MEAS_YR_LAST,
TOTAL_PERIOD, NO_MEAS)],
by = "SAMP_POINT")
}
vi_a <- vi_a[,.(CLSTR_ID,
SITE_IDENTIFIER,
VISIT_NUMBER,
BEC, FIZ,
MEAS_DT,
MEAS_YR,
PERIOD,
TYPE_CD,
SAMPLE_SITE_PURPOSE_TYPE_DESCRIPTION,
PROJ_ID,
SAMP_NO,
SAMPLE_BREAK_POINT,
SAMPLE_BREAK_POINT_TYPE,
DBH_LIMIT_TAG = DBH_TAGGING_LIMIT,
DBHLIMIT_COUNT,
PROJECT_DESCRIPTOR)]
mapsource <- data.table(mapFile = dir(mapPath, pattern = "_map"))
spatialLookups <- list(spatiallookup = spatialLookups,
mapsource = mapsource)
vi_a[, PRJ_GRP := prj_ID2Grp(PROJ_ID)]
vi_a[!(BEC %in% c("AT","BWBS","CDF","CWH","ESSF","ICH","IDF","MH",
"MS","PP","SBPS","SBS","SWB","BG","BAFA","CMA","IMA")),
BEC := prj_ID2BEC(PROJ_ID)]
vi_a[is.na(FIZ) | FIZ == " ", FIZ := "E"]
# vi_a <- merge(vi_a,
# SAVegComp[,.(SITE_IDENTIFIER, PROJ_AGE_1,
# PROJECTED_Year = as.numeric(substr(PROJECTED_DATE, 1, 4)))],
# by = "SITE_IDENTIFIER",
# all.x = TRUE)
# vi_a[, measYear := as.numeric(substr(MEAS_DT, 1, 4))]
#
# vi_a[, SA_VEGCOMP := measYear - PROJECTED_Year + PROJ_AGE_1]
# vi_a[, ':='(PROJ_AGE_1 = NULL,
# PROJECTED_Year = NULL,
# measYear = NULL)]
vi_b <- readRDS(file.path(dataSourcePath, "vi_b.rds")) %>% data.table
vi_b <- vi_b[CLSTR_ID %in% vi_a$CLSTR_ID,]
vi_b <- merge(vi_b, vi_a[,.(CLSTR_ID, PROJ_ID)],
by = "CLSTR_ID",
all.x = TRUE)
sitetopography <- vi_b[,.(SITE_IDENTIFIER, VISIT_NUMBER,
ELEVATION = PLOT_ELEVATION,
ASPECT = PLOT_ASPECT,
SLOPE = PLOT_SLOPE)]
sitetopography[, lastvisit := max(VISIT_NUMBER),
by = SITE_IDENTIFIER]
sitetopography <- unique(sitetopography[VISIT_NUMBER == lastvisit,
.(SITE_IDENTIFIER, ELEVATION,
ASPECT, SLOPE)],
by = "SITE_IDENTIFIER")
spatialLookups$spatiallookup <- merge(spatialLookups$spatiallookup,
sitetopography,
by = "SITE_IDENTIFIER",
all.x = TRUE)
# remove I from N samples in CAR1 project, as these N samples do not have
# IPC, see communications with Rene and Chris on July 29, 2022
vi_b <- vi_b[!(PROJ_ID == "CAR1" & TYPE_CD == "N" & PLOT == "I"),]
vi_b <- unique(vi_b, by = c("CLSTR_ID", "PLOT"))
# for variable area plot
vi_b[V_BAF > 0 & V_FULL == TRUE, PLOT_WT := 1]
vi_b[V_BAF > 0 & V_HALF == TRUE, PLOT_WT := 2]
vi_b[V_BAF > 0 & V_QRTR == TRUE, PLOT_WT := 4]
vi_b[V_BAF > 0, ':='(SAMP_TYP = "V",
PLOT_AREA_MAIN = as.numeric(NA),
BLOWUP_MAIN = V_BAF)]
# for fixed area plot
vi_b[is.na(V_BAF) & F_FULL == TRUE, PLOT_WT := 1]
vi_b[is.na(V_BAF) & F_HALF == TRUE, PLOT_WT := 2]
vi_b[is.na(V_BAF) & F_QRTR == TRUE, PLOT_WT := 4]
# calculate main plot area
#for circular plot
vi_b[V_BAF %in% c(0, NA) &
!is.na(F_RAD),
':='(SAMP_TYP = "F",
PLOT_AREA_MAIN = (pi* F_RAD^2)/10000)]
# for rectangle plot
vi_b[V_BAF %in% c(0, NA) &
!is.na(PLOT_WIDTH) &
is.na(PLOT_AREA_MAIN),
':='(SAMP_TYP = "F",
PLOT_AREA_MAIN = (PLOT_WIDTH* PLOT_LENGTH)/10000)]
# for the plot that just have plot area
vi_b[V_BAF %in% c(0, NA) &
!is.na(PLOT_AREA) &
is.na(PLOT_AREA_MAIN),
':='(SAMP_TYP = "F",
PLOT_AREA_MAIN = PLOT_AREA)]
# for subplot area
vi_b[V_BAF %in% c(0, NA) &
!(SMALL_TREE_SUBPLOT_RADIUS %in% c(NA, 0)),
':='(PLOT_AREA_SUBPLOT = (pi* SMALL_TREE_SUBPLOT_RADIUS^2) / 10000)]
vi_b[is.na(PLOT_AREA_SUBPLOT) &
SMALL_TREE_SUBPLOT_RADIUS == 0,
':='(PLOT_AREA_SUBPLOT = 0)]
vi_b[is.na(PLOT_AREA_SUBPLOT) &
!is.na(AREA_PS),
':='(PLOT_AREA_SUBPLOT = AREA_PS)] # area_ps is in hactre
# for the fixed area plot, the blowup is 1/total plot area
vi_b[SAMP_TYP == "F",
':='(BLOWUP_MAIN = 1/sum(PLOT_AREA_MAIN),
BLOWUP_SUBPLOT = 1/sum(PLOT_AREA_SUBPLOT)),
by = "CLSTR_ID"]
vi_b[BLOWUP_SUBPLOT %in% c(Inf, NA),
BLOWUP_SUBPLOT := 0]
vi_b[, NO_PLOTS := length(PLOT), by = CLSTR_ID]
vi_b[, PLOT_DED := 1L]
vi_b <- merge(vi_b, vi_a[,.(CLSTR_ID, MEAS_DT)],
by = "CLSTR_ID",
all.x = TRUE)
vi_b[TYPE_CD == "N" | (as.numeric(substr(MEAS_DT, 1, 4)) >= 2008) |
(as.numeric(substr(MEAS_DT, 1, 4)) == 2007 & PROJ_ID %in% c("0141", "014M", "0091")),
PLOT_DED := NO_PLOTS]
vi_a <- merge(vi_a,
unique(vi_b[,.(CLSTR_ID, SAMP_TYP, NO_PLOTS, PLOT_DED)],
by = "CLSTR_ID"),
by = "CLSTR_ID")
setnames(vi_a, "BEC", "BEC_ZONE")
if(compilationType == "nonPSP"){
allsample_ests <- dir(coeffPath, pattern = "sample_establishment_type")
allsample_ests <- gsub("sample_establishment_type_", "", allsample_ests)
allsample_ests <- gsub(".xlsx", "", allsample_ests)
allsample_est_last <- max(as.numeric(allsample_ests))
sample_est_1 <- openxlsx::read.xlsx(file.path(coeffPath,
paste0("sample_establishment_type_", allsample_est_last, ".xlsx")),
sheet = "1_non_standard_site_identifier") %>%
data.table
sample_est_1 <- sample_est_1[,.(SITE_IDENTIFIER, SAMPLE_ESTABLISHMENT_TYPE)]
sample_est_2 <- openxlsx::read.xlsx(file.path(coeffPath,
paste0("sample_establishment_type_", allsample_est_last, ".xlsx")),
sheet = "2_non_standard_project") %>%
data.table
sample_est_2 <- sample_est_2[,.(PROJECT_NAME, TYPE_CD = SAMPLE_SITE_PURPOSE_TYPE_CODE,
SAMPLE_ESTABLISHMENT_TYPE2 = SAMPLE_ESTABLISHMENT_TYPE)]
sample_est_3 <- openxlsx::read.xlsx(file.path(coeffPath,
paste0("sample_establishment_type_", allsample_est_last, ".xlsx")),
sheet = "3_standard") %>%
data.table
sample_est_3 <- sample_est_3[,.(TYPE_CD = sample_site_purpose_type_code,
SAMPLE_ESTABLISHMENT_TYPE3 = SAMPLE_ESTABLISHMENT_TYPE)]
site_visit1 <- vi_a[TYPE_CD != "N",]
site_visit1 <- site_visit1[!(TYPE_CD == "B" &
PROJ_ID == "KOL1"),]
## these are test sites
site_visit1 <- site_visit1[substr(PROJ_ID, 1, 4) != "2019",]
site_visit1[, VISIT_NUMBER_first := min(VISIT_NUMBER),
by = "SITE_IDENTIFIER"]
site_visit1 <- site_visit1[VISIT_NUMBER == VISIT_NUMBER_first,]
site_visit1 <- merge(site_visit1,
sample_est_1,
by = "SITE_IDENTIFIER",
all.x = TRUE)
site_visit1[, PROJECT_NAME := PROJ_ID]
site_visit1 <- merge(site_visit1,
sample_est_2,
by = c("PROJECT_NAME", "TYPE_CD"),
all.x = TRUE)
site_visit1[is.na(SAMPLE_ESTABLISHMENT_TYPE) &
!is.na(SAMPLE_ESTABLISHMENT_TYPE2),
SAMPLE_ESTABLISHMENT_TYPE := SAMPLE_ESTABLISHMENT_TYPE2]
site_visit1[, SAMPLE_ESTABLISHMENT_TYPE2 := NULL]
site_visit1 <- merge(site_visit1,
sample_est_3,
by = c("TYPE_CD"),
all.x = TRUE)
site_visit1[is.na(SAMPLE_ESTABLISHMENT_TYPE),
SAMPLE_ESTABLISHMENT_TYPE := SAMPLE_ESTABLISHMENT_TYPE3]
site_visit1[, SAMPLE_ESTABLISHMENT_TYPE3 := NULL]
site_visit1[TYPE_CD == "A",
SAMPLE_ESTABLISHMENT_TYPE := "EYSM"]
site_visit1[TYPE_CD == "A" & PROJECT_DESCRIPTOR == "Forest Health Early YSM",
SAMPLE_ESTABLISHMENT_TYPE := "FHYSM"]
site_visit1 <- site_visit1[,.(SITE_IDENTIFIER, SAMPLE_ESTABLISHMENT_TYPE)]
site_visit1[SITE_IDENTIFIER == "2104138",
SAMPLE_ESTABLISHMENT_TYPE := "YNS"]
spatialLookups$spatiallookup <- merge(spatialLookups$spatiallookup,
site_visit1,
by = "SITE_IDENTIFIER",
all.x = TRUE)
}
vi_a[,':='(BEC_ZONE = NULL,
FIZ = NULL)]
return(list(spatiallookup = spatialLookups,
samples = vi_a,
plots = vi_b[,.(CLSTR_ID, PLOT, PLOT_WT, PLOT_AREA_MAIN, PLOT_AREA_SUBPLOT,
BLOWUP_MAIN, BLOWUP_SUBPLOT,
PLOT_SHAPE_CODE, F_RAD,
PLOT_WIDTH, PLOT_LENGTH, V_BAF, SMALL_TREE_SUBPLOT_RADIUS,
PLOT_SLOPE, PLOT_ASPECT, PLOT_ELEVATION)]))
}
|
c0af21f199ccdd1d91c781fc9543f5985cfb1866
|
22f3f32e253acdcb407f5e2bf934bf0ff4a3d280
|
/scripts/sc_cities_sc_city_boundaries.R
|
ae62f0a9729baf967b8bfa57ce2451db11f3c7a4
|
[] |
no_license
|
ottoman91/_sc-evictions_
|
c8e4b059ba8db73fd86df38fc8c9d8efacf6809e
|
bebbaaf463f510103bf8d291fc0d9d07c02ee93b
|
refs/heads/master
| 2020-12-23T08:46:35.470630
| 2020-01-31T01:10:24
| 2020-01-31T01:10:24
| 237,101,692
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 673
|
r
|
sc_cities_sc_city_boundaries.R
|
# Script that joins the Eviction Data for South Carolina cities with the City Boundaries
# Author: Usman Khaliq
# Version: 2020-01-30
# Libraries
library(tidyverse)
# Parameters
path_sc_cities <- "data/sc_cities.rds"
path_sc_city_boundaries <- "data/sc_city_boundaries.rds"
rds_file_path <- "data/sc_cities_sc_city_boundaries.rds"
#===============================================================================
# Code
read_rds(here::here(path_sc_cities)) %>%
left_join(
read_rds(here::here(path_sc_city_boundaries)),
by = c("geoid" = "city_id")
) %>%
select(-city_name) %>%
write_rds(
path = here::here(rds_file_path),
compress = "gz"
)
|
e53080fcadd215b1d663820e07aa3e19513b9b68
|
087c25946bb6d396cff7f6d21c25c704939a6b21
|
/dictionary.R
|
0e4e8cdc07f967e67a6461b65115e9e14db71dc9
|
[] |
no_license
|
alightner/acculturationMarketInt_trust2020
|
31b52c3d83eff7dc81fbb7a829148d6c587a22c9
|
ab8d27d00d555b013ab6dd496046edb3e7e6f834
|
refs/heads/master
| 2023-01-02T04:29:55.803127
| 2020-10-27T13:58:36
| 2020-10-27T13:58:36
| 307,703,246
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,200
|
r
|
dictionary.R
|
# dictionary --------------------------------------------------------------
var_dict <- c(
"id"="id",
"age"="age",
"sons"="sons",
"daughters"="daughters",
"farms"="farming",
"donkeys"="donkeys",
"chickens"="chickens",
"cattle"="cattle",
"goats"="goats",
"sheep"="sheep",
"TLU"="TLU",
"market integration"="MI",
"wealth"="wealth",
"region" = "int_id",
"age set" = "age_set",
"wives"= "wives_cowives",
"children"= "num_children",
"# children in school"= "children_school" ,
"% children in school"= "children_school_percent",
"manages livestock"= "livestock_manage" ,
"sells dairy"= "sell_milk_meat",
"sells handcrafts"= "sell_handcrafts" ,
"wage labor"= "wage_labor",
"education"="education",
"literate"="literate",
"sells crops"= "sell_crops",
"owns a business"= "own_business" ,
"teaches"= "teaching",
"misc. livelihoods"= "other_livelihood" ,
"household size"= "hh_size",
"household labor"= "hh_labor" ,
"household need"= "need",
"freq. urban travel"= "urban_travel" ,
"acres farmed"= "farm_acres",
"years farm experience"= "farm_experience" ,
"freq. cattle market"= "sales_market",
"freq. cash purchases"= "purchases_market" ,
"freq. cell phone use"= "cell_use",
"fertilizes crops"= "fertilize_crops",
"condition"= "condition",
"trust (raw)"= "trust_vignette",
# "trust"= "trust" ,
# "fact-checks"= "check",
"Christian"= "christian",
"Engai/Christian same"= "engai_christian_same",
"god has a mind"= "god_mind" ,
"god has a body"= "god_body",
"god omnipotent"= "god_omnipotent" ,
"god omniscient"= "god_omniscient",
"god omnibenevolent"= "god_omnibenevolent" ,
"god punishes"= "god_punish",
"god rewards"= "god_reward",
"freq. church/rituals"= "rituals_frequency",
"freq. prayer"= "prayer_frequency" ,
"freq. talk abt. god"= "disagree_god_frequency",
"Maasai cattle rights"= "maasai_all_cattle" ,
"polygyny"= "polygyny",
"warrior food taboos"= "moran_eating" ,
"cattle raiding"= "cattle_raid",
"educate children"= "educate_children" ,
"educate women"= "educate_women",
"cattle > cash"= "cattle_over_cash" ,
"belief in god is important"= "belief_god_important",
"children share religion"= "children_share_religious" ,
"people share religion"= "others_share_religious",
"farm for most food"= "farming_good" ,
"female circumcision"= "female_circumcision",
"worry about future of Maasai"= "maasai_worry_future" ,
"god gives comfort/safety"= "god_comfort_safety",
"metal roof"= "roof" ,
"solar panel"= "solar",
"food insecurity"= "insecure",
"dependence on livestock" = "depend",
"trust" = "trust",
"fact-check" = "check"
)
var_dict2 <- names(var_dict)
names(var_dict2) <- as.character(var_dict)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.