blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
30eed50f1b4bdebb1feb9476726bea9e917ef0b5
|
e788c2cee9b7c0890ebea041c8a192c2af1a83bc
|
/networks.R
|
ce188c7e453e318c664a5107ef01d4f5b0082ac8
|
[] |
no_license
|
GenevieveRichards/Honours_networks_generate
|
755b24126e610ee63b4589a52cc3db14ccc522b5
|
8c421f5c0ce0ac1cf0215df85cca4ac1f1df3470
|
refs/heads/master
| 2022-05-23T04:29:57.148549
| 2020-04-26T11:30:42
| 2020-04-26T11:30:42
| 256,400,816
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,641
|
r
|
networks.R
|
library(igraph)
library(purrr)
library(readr)
MHmakeRandomString <- function(n=1, lenght=12)
{
randomString <- c(1:n) # initialize vector
for (i in 1:n)
{
randomString[i] <- paste(sample(c(0:9, letters, LETTERS),
lenght, replace=TRUE),
collapse="")
}
return(randomString)
}
LETTERS <- MHmakeRandomString(26, lenght = 2)
#Bipartite Graph
g <- make_bipartite_graph(rep(0:1,length=20),c(1:20), directed = FALSE )
plot(g)
#ER Random Network
g <- erdos.renyi.game(26, 26/100) %>% set_vertex_attr("label", value = LETTERS[1:26])
degree_value <- degree(g)
plot(g, layout=layout_with_fr)
hist(degree_value)
df <- as_long_data_frame(g)
write_csv(df, "random_network.csv")
#String
g_2 <- make_star(26) %>% set_vertex_attr("label", value = LETTERS[1:26])
ends(g_2, E(g_2))
plot(g_2)
hist(degree(g_2))
df <- as_long_data_frame(g_2)
write_csv(df, "ring_network.csv")
#Small World Network - Watt and Strogatz Model
g_sm <- sample_smallworld(1, 30, 5, 0.4) %>% set_vertex_attr("label", value = LETTERS[1:30])
mean_distance(g_sm)
transitivity(g_sm, type="average")
degree(g_sm)
plot(g_sm, size = 0.3)
hist(degree(g_sm))
df <- as_long_data_frame(g_sm)
write_csv(df, "small_world_network.csv")
#Scale Free Network
g_scale<- sample_pa(30, directed = FALSE) %>% set_vertex_attr("label", value = LETTERS[1:30])
degree(g_scale)
plot(g_scale, size = 0.2, layout = layout_with_fr)
ends(g_scale, E(g_scale))
hist(degree(g_scale))
df <- as_long_data_frame(g_scale)
write_csv(df, "scale_free_network.csv")
#Lattice
l <- make_lattice(c(5, 10, 5))
plot(l)
|
b7176ee6c817dcb6a03d5a79bef7c4d3ff6ed457
|
9e87da7892842890e5a5ac50c60d1203112ba257
|
/man/geom_plane.Rd
|
81fdae594b7e2a85b6001397c52ec06d9a410f4e
|
[] |
no_license
|
paleolimbot/ggstereo
|
2920a21b21f8f0af2111eef391b0734585d37383
|
e56129f315058b8eacc710c50865802593efb095
|
refs/heads/master
| 2020-04-24T20:48:26.566329
| 2019-02-27T02:11:34
| 2019-02-27T02:11:34
| 172,256,451
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 896
|
rd
|
geom_plane.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom_plane.R
\docType{data}
\name{geom_plane}
\alias{geom_plane}
\alias{GeomPlane}
\title{Draw a plane on a steronet}
\format{An object of class \code{GeomPlane} (inherits from \code{Geom}, \code{ggproto}, \code{gg}) of length 5.}
\usage{
geom_plane(mapping = NULL, data = NULL, stat = "identity",
position = "identity", ..., n = 101, na.rm = FALSE,
show.legend = NA, inherit.aes = TRUE)
GeomPlane
}
\arguments{
\item{mapping}{A mapping}
\item{data}{Data}
\item{position}{Which position to use}
\item{...}{Set aesthetics}
\item{n}{Number of points to use to generate path}
\item{na.rm}{Remove points with missing values}
\item{show.legend}{Show legend}
\item{inherit.aes}{Inherit aesthetics from the plot}
\item{geom}{Which geometry to use}
}
\description{
Draw a plane on a steronet
}
\keyword{datasets}
|
4ff4d09c926ad2db7dbdec9d84f31536be28ee0b
|
d73061d7fcbed9da14de118cef09e9c251c358ed
|
/code/QA_code/CDEC_daily_BAS_QA.R
|
d8a68cb913d70879a338e89bec9ffa799259ad81
|
[] |
no_license
|
kmabbott/streamtemp_classification
|
9bd3534cdcb06034f8699a6f21d054ab6cfecb4e
|
c5e1157e9421ed3d25f3ac0767d0901d62ceb953
|
refs/heads/main
| 2023-08-17T11:12:11.484170
| 2021-09-22T20:51:10
| 2021-09-22T20:51:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,420
|
r
|
CDEC_daily_BAS_QA.R
|
# Code description --------------------------------------------------------
# Code to review temperature data from gage CDEC_BAS.
library(tidyverse)
library(lubridate)
library(weathermetrics)
library(plotly)
# Get Data ----------------------------------------------------------------
file_list <- list.files("data/data_review/")
# read in next file:
file_list[[2]]
cdec_daily_BAS <- read_rds(path = paste0("data/data_review/",file_list[[2]]))
# Plot --------------------------------------------------------------------
# now make an interactive plot of first 1000 values
ggplotly(
ggplot() + geom_point(data=cdec_daily_BAS[1:1000,], aes(x=date, y=value_mean_C)))
# Review, QA, and Repeat --------------------------------------------------
cdec_daily_BAS_QA <- cdec_daily_BAS[1:1000,]
ggplotly(
ggplot() + geom_point(data=cdec_daily_BAS[1001:2000,], aes(x=date, y=value_mean_C)))
cdec_daily_BAS_1001_2000 <- cdec_daily_BAS[1001:2000,]
cdec_daily_BAS_1001_2000 <- cdec_daily_BAS_1001_2000[-c(602:608),]
ggplotly(
ggplot() + geom_point(data=cdec_daily_BAS_1001_2000, aes(x=date, y=value_mean_C)))
cdec_daily_BAS_QA <- rbind(cdec_daily_BAS_QA,cdec_daily_BAS_1001_2000)
ggplotly(
ggplot() + geom_point(data=cdec_daily_BAS[2001:3000,], aes(x=date, y=value_mean_C)))
cdec_daily_BAS_QA <- rbind(cdec_daily_BAS_QA,cdec_daily_BAS[2001:3000,])
ggplotly(
ggplot() + geom_point(data=cdec_daily_BAS[3001:3267,], aes(x=date, y=value_mean_C)))
cdec_daily_BAS_3001_3267 <- cdec_daily_BAS[3001:3267,] %>%
filter(date != "2019-02-13")
ggplotly(
ggplot() + geom_point(data=cdec_daily_BAS_3001_3267, aes(x=date, y=value_mean_C)))
cdec_daily_BAS_QA <- rbind(cdec_daily_BAS_QA,cdec_daily_BAS[3001:3267,])
# Final review ------------------------------------------------------------
#plot QA'd dataset to confirm all points look good
ggplotly(
ggplot() +geom_point(data = cdec_daily_BAS_QA, aes(x=date, y=value_mean_C)))
#save QA'd dataset as a .rds file
write_rds(cdec_daily_BAS_QA, path = "data/QA_data/cdec_daily_BAS_QA.rds")
#update the gage_QA_progress
gage_QA_progress <- read_csv("data/data_review/gage_QA_progress.csv")
#note reviewer initials, whether review is complete, and any final notes
gage_QA_progress[gage_QA_progress$site_id=="BAS",4:6] <- c("ADW", "Y", "QA complete")
#save updated dataframe to the .csv
write_csv(gage_QA_progress, path = "data/data_review/gage_QA_progress.csv")
|
ba7bae5fa18cab409fd8a537072844bdd54f72d8
|
6274edaf656fb18fc9d5eb16be467734f5bc3588
|
/tutoriais/forSuzy.r
|
7f662ba7d5f7782afa6f0962ee8979e78e75bcd5
|
[] |
no_license
|
AlertaDengue/AlertaDengueAnalise
|
107ce9ada7aafe84d738cc702acbb97e2189d035
|
167904fc48767a4bbfbd83fe94807f3a50b4d1b2
|
refs/heads/master
| 2023-03-11T22:33:44.638586
| 2023-02-21T14:46:10
| 2023-02-21T14:46:10
| 21,705,658
| 2
| 1
| null | 2022-04-21T04:33:30
| 2014-07-10T18:32:45
|
R
|
UTF-8
|
R
| false
| false
| 1,347
|
r
|
forSuzy.r
|
# extracao de dados para o estudo com o grupo da Suzy
library(AlertTools)
con<-DenguedbConnect(pass = pw)
# Notification data from geocod Municipality:
geocod<-3304557
query.txt <- paste0("SELECT * from
\"Municipio\".\"Notificacao\"
WHERE municipio_geocodigo = ", geocod)
df <- dbGetQuery(con, query.txt)
# Filter duplicate data and missing values:
target.cols <- c('nu_notific','dt_digita', 'municipio_geocodigo')
df.mun.clean <- df[!duplicated(df[, target.cols]) & !is.na(df$dt_digita),
c('cid10_codigo','dt_sin_pri' ,'dt_notific', 'dt_digita')]
# Filter by years:
df.mun <- df.mun.clean[df.mun.clean$dt_notific >= '2012-01-01' &
df.mun.clean$dt_notific <= '2019-12-31', ]
table(df.mun$cid10_codigo)
df.mun$cid10_codigo[df.mun$cid10_codigo=="A920"] <- "A92.0"
library(tidyverse)
df.mun <- df.mun %>%
filter(cid10_codigo %in% c("A90","A92.0","A928"))
# Twitter data
query.txt <- paste0("SELECT * from
\"Municipio\".\"Tweet\"
WHERE \"Municipio_geocodigo\" = ", geocod, "AND data_dia <='2020-01-01'")
tw <- dbGetQuery(con, query.txt)[c("data_dia","numero")]
tail(tw)
range(df.mun$dt_notific)
range(tw$data_dia)
# Save object:
save(df.mun, tw, file='dengue-chik-zika-Rio-2012-2019.RData')
|
8eaf38977b6cca6797441f0964293819b6ce2250
|
952ded2996c1960f1f3167749e9054df514e065c
|
/lib/Player_feature/Make_link_playoffs.R
|
60221f34ade15fb8eace8c14f0502e81e201a81b
|
[] |
no_license
|
RuxuePeng/Spr2017-proj5-grp1
|
d42b73b726d651a2b619f77a4b66c689dbe3eb38
|
5810275bb4047343a568372408b4db7a9dbc7014
|
refs/heads/master
| 2021-01-21T11:59:58.430330
| 2018-04-14T19:17:21
| 2018-04-14T19:17:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 950
|
r
|
Make_link_playoffs.R
|
# The date column changes format for the playoffs data, so we wrote a new function to create links
## The link follows pattern: /date + 0 + nameabbreviation +.html
## eg. http://www.basketball-reference.com/boxscores/201510290NYK.html
# input: Data, a table containing all game one certain team played in a certain season
# input: order, the order of the file, eg. "ATL_2015.csv" is the 1st file
# input: File_list, all the filename under the path
# output: link for each game
Make_link_playoffs = function(Datatable, order,File_list){
# cleaning Date
Date = Datatable$Date
Date = gsub("-","",Date)
# cleaning Team name abbreviation
name_list = substr(File_list,1,3)
index = which(Datatable$X == "@")
Teamname = rep(name_list[[order]],length(Datatable$Date))
Teamname[index] = Datatable$Opp[index]
links = paste0("http://www.basketball-reference.com/boxscores/",Date,"0",Teamname,".html")
return(links)
}
|
955a9f7917bb5d82a55cc40445d2e08b1671e239
|
7cb61ada8a73c86316182c2ed6eb4685ee3bffe6
|
/oldstuff/DBRicerche/ui.R
|
68216cb2bf643bb25747862bff343f9eb6571632
|
[] |
no_license
|
TMax66/IZSLEReasearch
|
4b99b7acbfe61f255e8c5388cc75202201f190d7
|
34c93f6a2d338710efede39187886b20399669b8
|
refs/heads/master
| 2023-04-08T10:47:06.737184
| 2023-03-26T16:27:38
| 2023-03-26T16:27:38
| 191,943,209
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,669
|
r
|
ui.R
|
ui <- dashboardPage(
dashboardHeader(title = "Dbase Progetti di Ricerca IZSLER", titleWidth = 450),
dashboardSidebar(
sidebarMenu(
menuItem("Dashboard", tabName = "risp", icon = icon("dashboard")),
menuItem("Ricerca Corrente", tabName = "corr", icon = icon("th")),
menuItem("Consulta database", tabName = "dati", icon = icon("th"))#,
#menuItem("Text mining Analysis", tabName="tm", icon = icon("edit"))
)
),
dashboardBody(
# tags$head(tags$style(HTML('
# .modal-lg {
# width: 88%;
#
# }
# '))),
tabItems(
#####primo tab item#####
tabItem(
tabName = "risp",
fluidPage(
# box(width=12, solidHeader = TRUE,
fluidRow(
column(3,
valueBoxOutput("Quest", width = NULL),
valueBoxOutput("corr", width=NULL),
valueBoxOutput("fin", width=NULL),
valueBoxOutput("eu", width=NULL),
valueBoxOutput("reg", width=NULL),
valueBoxOutput("af", width=NULL),
valueBoxOutput("ccm", width=NULL),
valueBoxOutput("at", width=NULL)),
column(8,
selectInput("tipo", "Seleziona la tipologia di ricerca",
c("Tutte",unique(as.character(ds$Tipologia)))),
plotOutput("trend", height = 500))
)
#),
)
)
,
#####secondo tab item########
tabItem(
tabName = "corr",
fluidPage(
tabBox( width = 12,
#####panel ricerca corrente
tabPanel("Ruolo dell'IZSLER",
fluidRow(
column(3,br(),br(),br(),
valueBoxOutput("capo", width = NULL),
valueBoxOutput("uo", width = NULL),
valueBoxOutput("solo", width = NULL)),
column(9,
box(title="Responsabili scientifici e coinvolgimento dell'IZSLER", width = 9,
solidHeader = TRUE, status = "primary", background = "black",
plotOutput("rs", height=650)
))
)
),
#####panel topic####
tabPanel("Topic",
fluidRow(
box(title="Termini maggiormente usati nei titoli",
sliderInput("nterm","# termini", min=5, max=50,value = 20),
status = "primary",solidHeader = TRUE,height=650, background = "black",
plotOutput("w")),
box(title="Cluster termini", status = "primary",solidHeader = TRUE,height=600, background = "black",
sliderInput("sparse","sparse", min=0.8, max=0.99,value = 0.956),
plotOutput("clust")),
br(),
box( title="Associazione tra termini", status = "primary",solidHeader = TRUE, height=650, width=9,
background = "black",
textInput("term", "Inserisci una parola chiave", "virus"),
sliderInput("ass", "correlazione", min=0.1, max=0.5, value=0.2),
plotOutput("asso" ))
#
# box(title="Word Cloud", status="danger", solidHeader=TRUE,height=650,
# sliderInput("size","Cloud Size", min=0.3, max=1,value = 0.4),
# wordcloud2Output('wordcloud2'))
)
))),
hr(), br()
# actionButton("tabBut", "Word Cloud"),
#
#
# bsModal("modalExample", "Word Cloud", "tabBut", size = "large",
# box(title="Word Cloud", width = NULL, status="danger", solidHeader=TRUE,
# # sliderInput("size","Cloud Size", min=0.3, max=1,value = 0.4),
# wordcloud2Output('wordcloud2')))
),
#####terzo tab item######
tabItem(
tabName = "dati",
fluidRow(
box(width = 12,
DT::dataTableOutput("db")
)
)
)
)
)
)
|
b7297ee2065bbd7081cb1148365091fad2d34875
|
ed28666d9201bf050c305f0740756f7730a66ef3
|
/NatureEE-data-archive/Run203071/JAFSdata/JAFSnumPerPatch72588.R
|
4553c2123e2d71187b51feca435e9e53db3254e0
|
[] |
no_license
|
flaxmans/NatureEE2017
|
7ee3531b08d50b3022d5c23dbcf177156c599f10
|
b3183abe6bb70f34b400d0f5ec990cce45e10b33
|
refs/heads/master
| 2021-01-12T08:02:37.153781
| 2017-01-27T15:48:55
| 2017-01-27T15:48:55
| 77,110,421
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 33
|
r
|
JAFSnumPerPatch72588.R
|
numPerPatch72588 <- c(2568,2432)
|
0d8242c483cecc6096da61c21840be9b83af241c
|
e52778187dbefd2b04c245b8b591a6608fdcd387
|
/src/prazosin_analysis.R
|
56f605a7bd28a70e8329b21722b3b57c44d8c92e
|
[
"MIT"
] |
permissive
|
philipphoman/prazosin
|
0768c834c3ad3cfc52ccdb1a1f84b8276dd145f7
|
d1aa2b0241c991d432885a52c4d5889bf91c7a07
|
refs/heads/master
| 2020-03-22T20:35:39.272035
| 2018-07-11T18:30:05
| 2018-07-11T18:30:05
| 140,613,210
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 38,181
|
r
|
prazosin_analysis.R
|
#
# Prazosin analysis in R
# Started 8/8/16
# PH
#
rm(list = ls())
library('plyr')
library('lme4')
#library('lmerTest')
library('lsmeans')
library('pwr')
# redirect output
#sink("output/R/output.txt")
plotpower <- function(df=NULL, alpha=0.05, power=0.8, n=NULL,
d=NULL, f=NULL) {
# Plots power curves for different levels of n and power
# Args:
# Returns:
# Partial_eta_squared = F * df_effect / F * df_effect + df_error
#
# In Soeter and Kindt (2011, Learn Mem), the decreased speed of
# extinction under yohimbine can thus be quantified as follows:
#
# Partial_eta_squared = 6.23 * 7 / 6.23 * 7 + 22
# = 43.61 / 22 + 43.61
# = 0.66
#
# Cohen's f^2 can then be calculated as:
#
# f^2 = eta_p^2 / 1 - eta_p^2
# = 0.66 / 0.34
# = 1.94
# f = sqrt(1.94)
# f = 1.39
#
#
# For the difference in acquisition, there is no precise F-value; but
# if we, conservatively, assume an upper bound of 0.99, we get:
#
#
# Partial_eta_squared = 0.99 * 1 / 0.99 * 1 + 28
# = 0.99 / 28 + 0.99
# = 0.03
#
# f^2 = 0.03 / 0.97
# = 0.03
# f = 0.17
#
# For the paper from 2012 (Neuropsychopharmacol), where no significant
# differences between groups in acquisition are again demonstrated,
# the F-value was 2.09, resulting in an effect size of f^2=0.07 and
# f=0.26. Regarding extinction, the effect size was f=0.87.
#
# Original effect size for extinction
pwe <- pwr.anova.test(k=2, n=NULL, f=1.39, sig.level=0.05, power=0.9)
print(pwe)
# Effect size for extinction in 2012 paper
pwe <- pwr.anova.test(k=2, n=NULL, f=0.87, sig.level=0.05, power=0.9)
print(pwe)
# Conservative lower bound would be:
pwe <- pwr.anova.test(k=2, n=NULL, f=0.5, sig.level=0.05, power=0.9)
print(pwe)
# Effect size for difference in acquisition
pwa <- pwr.anova.test(k=2, n=NULL, f=0.17, sig.level=0.05, power=0.9)
print(pwa)
# Effect size for difference in acquisition in 2012 paper
pwa <- pwr.anova.test(k=2, n=NULL, f=0.26, sig.level=0.05, power=0.9)
print(pwa)
# Convert to Cohen's d
# f^2 = d^2 / 2k
# d = sqrt(2k * f^2)
# d = sqrt(2k) * f
#
# Thus, to calculate the expected strength of the difference between
# extinction and acquisition, we transform the conservative sizes of
# both effect sizes to cohen's d and subtract them:
#
# d = 0.87 * 2 - 0.20 * 2
# d = 1.22
pwi <- pwr.t.test(n=NULL, d=1.22, sig.level=0.05, power=0.9)
print(pwi)
pwi <- pwr.t.test(n=NULL, d=1.0, sig.level=0.05, power=0.9)
print(pwi)
}
readdat <- function(df=NULL, fn=NULL, resptype="cr",
exclids=c(25, 28)) {
#
#
if (is.null(fn)) fn <- "../data/prazosin.csv"
if (is.null(df)) df <- read.csv(fn)
switch(resptype,
cr = {
df <- subset(df, stim=="CSplus"|stim=="CSminus")
},
ur = {
df <- subset(df, stim=="CSplusUS")
},
crur = {
df <- subset(df, !stim=="")
})
df$phase[df$stage=="Acquisition"&df$ctrial<5] <- "early"
df$phase[df$stage=="Acquisition"&df$ctrial>4] <- "late"
df$phase[df$stage=="Extinction" &df$ctrial<6&df$stim=="CSplus"] <- "early"
df$phase[df$stage=="Extinction" &df$ctrial>5&df$stim=="CSplus"] <- "late"
df$phase[df$stage=="Extinction" &df$ctrial<7&df$stim=="CSminus"] <- "early"
df$phase[df$stage=="Extinction" &df$ctrial>6&df$stim=="CSminus"] <- "late"
df$phase[df$stage=="Retrieval" &df$ctrial<6&df$stim=="CSplus"] <- "early"
df$phase[df$stage=="Retrieval" &df$ctrial>5&df$stim=="CSplus"] <- "late"
df$phase[df$stage=="Retrieval" &df$ctrial<7&df$stim=="CSminus"] <- "early"
df$phase[df$stage=="Retrieval" &df$ctrial>6&df$stim=="CSminus"] <- "late"
# this is to remove first CS- (which shouldn't be done)
#df$phase[df$stage=="Extinction" &df$ctrial==1&df$stim=="CSminus"] <- NA
#df$phase[df$stage=="Retrieval" &df$ctrial==1&df$stim=="CSminus"] <- NA
#df$phase[df$stage=="Extinction" &df$ctrial<5] <- "early"
#df$phase[df$stage=="Extinction" &df$ctrial>4] <- "late"
#df$phase[df$stage=="Retrieval" &df$ctrial<5] <- "early"
#df$phase[df$stage=="Retrieval" &df$ctrial>4] <- "late"
# factorize factors
# factorize factors
df$ID <- factor(df$ID)
df$group <- factor(df$group)
df$stage <- factor(df$stage)
df$stim <- factor(df$stim)
# create transformed dependent variable
df$sqrtdcm1 <- sqrt(df$DCM1)
df$logdcm1 <- log10(df$DCM1+1)
df$lndcm1 <- log(df$DCM1+1)
# mean-center the trial-variable
df$ctrialc <- df$ctrial - (4.5 + 1*(df$day>1) + 0.5 *
(df$stim=="CSminus"&df$day>1))
if (!is.null(exclids)) df <- subset(df, !ID %in% exclids)
return(df)
}
calcstages <- function(df=NULL) {
# Summarize outcome by group, stage, phase, and stim
# Args:
# df: data frame
# Returns:
# out: updated data frame
dfms <- ddply(df, c("group", "stage", "phase", "stim", "ID"),
summarize,
n=sum(!is.na(outcome)),
m=mean(outcome, na.rm=TRUE),
sd=sd(outcome, na.rm=TRUE), se=sd/sqrt(n))
dfmsd <- ddply(dfms, c("group", "stage", "phase", "ID"),
summarize,
mdiff=diff(m))
dfm <- ddply(dfmsd, c("group", "stage", "phase"),
summarize,
n=sum(!is.na(mdiff)),
m=mean(mdiff, na.rm=TRUE),
sd=sd(mdiff, na.rm=TRUE),
se=sd/sqrt(n))
ncp <- abs(qt(0.05/2, 19-1))
dfm$ci <- dfm$se * ncp
#dfm.diff <- ddply(dfm, c("group", "stage", "phase"), summarize,
# mdiff=diff(M), sd=(sqrt(sum(SD^2))),
# se=sd/sqrt(N))
#dfm.diff$ci <- dfm.diff$se * ncp
# No phases
dfmsp <- ddply(df, c("group", "stage", "stim", "ID"),
summarize,
n=sum(!is.na(outcome)),
m=mean(outcome, na.rm=TRUE),
sd=sd(outcome, na.rm=TRUE), se=sd/sqrt(n))
dfmspd <- ddply(dfmsp, c("group", "stage", "ID"),
summarize,
mdiff=diff(m))
dfmp <- ddply(dfmspd, c("group", "stage"),
summarize,
n=sum(!is.na(mdiff)),
m=mean(mdiff, na.rm=TRUE),
sd=sd(mdiff, na.rm=TRUE),
se=sd/sqrt(n))
dfmp$ci <- dfmp$se * ncp
out <- list(dfms, dfmsd, dfm, dfmsp, dfmspd, dfmp)
return(out)
}
plotgsp <- function(df=NULL, outcome="logdcm1", lb=NULL) {
# Plot overview by group, stage, phase
#
# Args:
# Returns:
tmp <- par(mar=c(14, 8, 2, 2))
dfm$ctrial <- c(1:length(dfm$m))
dfm$eb <- dfm$ci
mxys <- max(dfm$m) * 1.9
mnys <- mxys * -0.5
tmp <- plot(x=dfm$ctrial, y=dfm$m, axes=FALSE,
xlab="", ylab="", col="white", ylim=c(mnys, mxys))
tmp <- axis(1, at=c(1:length(dfm$m)), labels=paste(dfm$group, dfm$stage, dfm$phase),
las=3, cex=0.5, font=2)
tmp <- abline(h=0, lwd=1.5, lty=3)
tmp <- axis(2)
tmp <- mtext(paste(ylab, "mean with 95% CI"), 2, 3, font=2)
tmp <- plotscr(df=dfm, l=FALSE, lwd=2, pch=19)
}
createfulldf <- function(df=NULL) {
# Uses group/stage/phase/stim summaries and merges them with df
#
# Args:
# Returns:
out <- calcstages(p)
dfm <- out[[2]]
dfm$stimdiff <- dfm$mdiff
dfmm <- subset(dfm, stage=="Acquisition"&((phase=="early"|phase=="late")&stimdiff>0))
dfmm$ID <- factor(dfmm$ID)
ids <- dfmm$ID
}
getposacqids <- function(df=NULL) {
# Lists all ids with non-positive stimulus differentiation during acq
out <- calcstages(p)
dfm <- out[[5]]
dfm$stimdiff <- dfm$mdiff
dfmm <- subset(dfm, stage=="Acquisition"&stimdiff>0)
dfmm$ID <- factor(dfmm$ID)
ids <- dfmm$ID
return(ids)
}
calcmlm <- function(df=NULL, form=NULL, REML=FALSE) {
# Calculates multilevel model
# Args:
# Returns:
if(is.null(df)) df <- readdat()
if(is.null(form)) {
f <- "outcome ~ ctrialc + group*stage*stim + (1+stage*stim|ID)"
form <- as.formula(f)
}
model <- lmer(form, df, REML=REML)
return(model)
}
teststagestim <- function(df=NULL) {
# Test stimulus effect stages bottom up
#
y1 <- subset(df, stim=="CSminus")$m
y2 <- subset(df, stim=="CSplus")$m
print(t.test((y2-y1)))
# effect size
return(mean(y2-y1, na.rm=TRUE)/sd(y2-y1, na.rm=TRUE))
}
plotscr <- function(df, l=TRUE, lty=1, lwd=1, p=TRUE, pch=1,
eb=TRUE, lcol="black", cex=1.0) {
# Plots points, lines, and error bars of a scr-dataframe
#
# Args:
# df: should include: ctrial, m, (se)
# l: plot lines (default=TRUE)
# lwd: line width (default=1)
# p: plot points (default=TRUE)
# pch: format of points (default=1)
# eb: plot error bars (default=TRUE)
# lcol: line color (default=black)
# cex: size
#
# Returns:
#
if (l == TRUE) {
l <- lines(df$ctrial, df$m, cex=cex, lwd=lwd, col=lcol,
lty=lty)
}
if (p == TRUE) {
plt <- points(df$ctrial, df$m,
type="p", pch=pch,
cex=cex)
}
if (eb == TRUE) {
mses <- df$m - df$eb
pses <- df$m + df$eb
a <- arrows(df$ctrial, mses, df$ctrial, pses, length=0.00,
angle=90, code=3, lwd=lwd)
}
}
plottrialwise <- function(df=NULL, mxyp=0.65, mnys=-0.05, mxys=0.6) {
# Plot trial-wise data
p <- df
tmp <- par(mar=c(5, 5, 2, 5),oma=c(8, 4, 4, 0))
#tmp <- layout(matrix(c(1, 4, 2, 5, 3, 6), ncol=3))
tmp <- layout(matrix(c(1, 4, 2,
5, 3, 6), ncol=3))
#layout.show(tmp)
x2lb = c("Day 1:\nAcquisition", "Day 2:\nExtinction",
"Day 3:\nRe-Extinction")
mxy = mxyp
for (i in 1:nlevels(p$group)) {
for (j in 1:nlevels(p$stage)) {
print(paste(i, j))
pp <- subset(p, group==levels(p$group)[i]&
stage==levels(p$stage)[j])
# create means and se
rm <- ddply(pp, c("ctrial", "stim"),
summarise, n=sum(!is.na(outcome)),
m=mean(outcome, na.rm=TRUE),
sd=sd(outcome, na.rm=TRUE), eb=sd/sqrt(n))
#mxy <- ifelse(max(rm$m)*1.2 > mxy, max(rm$m)*1.2, mxy)
#mxy <- 0.35
mxx <- max(rm$ctrial)
rmm <- subset(rm, stim=='CSminus')
rmp <- subset(rm, stim=='CSplus')
# take care of CS-
if (j > 1) {
rmm$ctrial <- rmm$ctrial - 1
}
tmp <- plot(x=rmm$ctrial, y=rmm$m, xlab="", ylab="",
xlim=c(min(rmm$ctrial), mxx),
ylim=c(0.05, mxy), bty="n", las=1, cex=1.8,
cex.axis=1.2)
tmp <- box(lwd=1.5)
tmp <- plotscr(df=rmm, pch=1, lwd=1.2, cex=1.8)
tmp <- plotscr(df=rmp, pch=19, lwd=1.2, cex=1.8)
legend("topright", inset=0.05, lty=1, cex=1.4, lwd=1.2,
pch=c(19, 1), legend=c("CS+","CS-"),
ncol=1, xpd=NA, bty="n")
#mtext("Log aSNA Amplitude (units)", 2, 3, font=2, cex=1.0)
#mtext("Trial", 1, 3, font=2, cex=1)
mtext(ylab, 2, line=4.0, cex=1.0, font=2, las=3)
mtext("Trial", 1, line=3.0, cex=1.0, font=2)
if (i == 1) {
mtext(x2lb[j], 3, 1, cex=1.5, font=2)
}
if ((i == 1 | i == 2) & j == 3) {
text(12.95, mxy/2, labels=levels(p$group)[i], font=2,
cex=3.0, srt = -90, xpd=NA)
}
}
}
}
plotbar <- function(df=NULL, rng, col, xlim, ylim, axes=TRUE,
xpd=TRUE, abl=NULL, main=NULL,
yaxt=NULL, ylb=ylb) {
# Bar plots with error bars
#
# Args:
#
# Returns:
# create means and se
#mxy <- max(df$m)*1.45
#tabbedmeans <- tapply(df$m, factors, function(x) c(x=x))
#print(tabbedmeans)
#tabbedeb <- tapply(df$eb, factors, function(x) c(x=x))
m <- matrix(ncol=2, nrow=2, df$m[rng])
e <- matrix(ncol=2, nrow=2, df$eb[rng])
#colnames(m) <- rep(levels(df$group), 1)
colnames(m) <- c("Plac", "Praz")
rownames(m) <- levels(df$stim)
colnames(e) <- rep(levels(df$group), 1)
rownames(e) <- levels(df$stim)
#plotbar(matmean=m, mateb=e, ylim=ylim, col=c("gray54", "white"),
# main=main, yaxt=yaxt, ylb=ylb)
barcenters <- barplot(height=m,
beside=TRUE,
ylim=ylim,
col=col,
space=c(0, 0.2),
main=main,
yaxt=yaxt)
#las=1, axes=axes, col=cols, space=c(0, 0.4),
#cex.names=0.8, ylim=ylim, xpd=xpd)
s <- segments(barcenters, m-e,
barcenters, m+e,
lwd=1.0)
a <- arrows (barcenters, m-e,
barcenters, m+e,
lwd=1.0, angle=90, code=3, length=0.02)
#axis(1, at=1.5, labels="")
if (!is.null(abl)) abline(h=abl, lwd=2)
mtext(ylb, 2, 3, font=2)
#lines(0:6, rep(-0.01, 7), lwd=2)
#box(bty="l")
#box(bty="7", col="white")
}
plothist <- function(df=NULL, y="stimdiff", x2lb=NULL) {
# Plot various histograms
hi <- hist(df[, y], plot=FALSE)
hi$density <- hi$counts/sum(hi$counts) * 100
plot(hi, col="gray52", xlab="", freq=FALSE,
main=x2lb, ylim=c(0, max(hi$density*1.2)), ylab="Percentage")
}
plotcontrasts <- function(model=NULL, fn=NULL, fcts=NULL, cl,
x1lab=NULL, x2lab=NULL, eb="ci",
xlim=c(-0.1, 0.1)) {
# Plots custom contrasts
#
# Args:
# Returns:
# pairwise contrasts to understand 3-way interaction
if (!is.null(fn)) tmp <- pdf(fn)
if (is.null(fcts)) fcts <- c("group", "stage", "stim")
if (is.null(model)) model <- calcmlm(REML=TRUE)
# test custom contrasts
par(mar=c(8, 15, 8, 2))
lsm <- lsmeans(model, fcts)
tmp <- print(lsm)
lsm.contr <- contrast(lsm, cl)
tmp <- print(summary(lsm.contr))
lsm.ci <- confint(lsm.contr)
lsmdf <- lsm.ci[c("contrast", "estimate", "SE", "lower.CL",
"upper.CL")]
# create dot plot
if (is.null(x1lab)) {
x1lab <- "Log aSNA Amplitude \n (adjusted Means with 95% CI)"
}
if (is.null(x2lab)) {
x2lab <- "Stimulus discrimination \n (CS+ minus CS-)"
}
tmp <- plotdots(lsmdf, xlim=xlim, x1lab=x1lab, x2lab=x2lab, eb)
if(!is.null(fn)) dev.off()
}
plotdots <- function(df=NULL, xlim=c(-0.1, 0.1),
x1lab=NULL, x2lab=NULL, eb="ci") {
# Plots forest plot of contrasts
# Args:
# Returns:
tmp <- dotchart(df$estimate, xlim=xlim, pch=19, font.lab=2)
for (i in 1:nrow(df)) {
switch(eb,
ci = { lines(x=c(df$lower.CL[i], df$upper.CL[i]),
y=c(i, i), lwd=2)},
se = { lines(x=c(df$estimate[i] - df$SE[i],
df$estimate[i] + df$SE[i]),
y=c(i, i), lwd=2)})
}
mtext(x1lab, 1, at=0, font=2, line=3.5)
axis(2, at=(1:nrow(df)), labels=paste(df$contrast), las=2)
abline(v=0, lwd=1, lty=2)
box(lwd=2)
mtext(x2lab, 3, at=0, cex=1.1, font=2, line=0.1)
}
calccovs <- function(df=NULL, covs=NULL) {
# Calculates covariates at baseline
# Args:
# Returns:
if (is.null(covs)) {
covs <- c("Age", "Gender", "stait", "ShockerLevel",
"HeartRateBaseline", "HeartRate90min",
"BPSysBaseline", "BPSys90min",
"BPDiaBaseline", "BPDia90min")
}
# restrict to unique rows
df <- unique(df[, c("ID", "group", unlist(covs))])
covfuncs <- c("length","mean","sd")
for (i in 1:length(covs)) {
for (j in 1:nlevels(p$group)) {
cat(paste("Covariate: ", covs[i], "\n"))
ps <- subset(df, group==levels(df$group)[j])
print(length(ps[complete.cases(ps[, covs[i]]), covs[i]]))
print(mean (ps[complete.cases(ps[, covs[i]]), covs[i]]))
print(sd (ps[complete.cases(ps[, covs[i]]), covs[i]]))
}
print(t.test(df[,covs[i]] ~ df$group))
}
}
doanalysis <- function(df=NULL, covs=NULL,
contrlist=NULL, factors=NULL,
contrfn=NULL, formulas=NULL,
x1lab=NULL, x2lab=NULL, eb="ci",
xlim=c(-0.1, 0.1)) {
# Runs the actual analysis
# Args:
# Returns:
if(!is.null(covs)) tmp <- calccovs(df=df, covs=covs)
m1 <- calcmlm(form=as.formula(forms[1]), df=df, REML=FALSE)
m2 <- calcmlm(form=as.formula(forms[2]), df=df, REML=FALSE)
print(anova(m1, m2))
# Calculate significance of trial factor
m1 <- calcmlm(form=as.formula(forms[2]), df=df, REML=FALSE)
m2 <- calcmlm(form=as.formula(forms[3]), df=df, REML=FALSE)
print(anova(m1, m2))
# Demonstrate absence of 4-way interaction
m1 <- calcmlm(form=as.formula(forms[4]), df=df, REML=FALSE)
print(anova(m1))
# Rule out potential influence of covariates
#p$shockerlevelc <- scale(p$ShockerLevel, center=T)
df$staitc <- scale(df$stait, center=T)
m1 <- calcmlm(form=as.formula(forms[5]),
df=subset(df, !ID==8), REML=FALSE)
m2 <- calcmlm(form=as.formula(forms[2]),
df=subset(df, !ID==8), REML=FALSE)
print(anova(m1, m2))
m1 <- calcmlm(form=as.formula(forms[5]),
df=subset(df, !ID==8), REML=FALSE)
m2 <- calcmlm(form=as.formula(forms[6]),
df=subset(df, !ID==8), REML=FALSE)
print(anova(m1, m2))
# final model
m2 <- lmer(as.formula(forms[2]), df, REML=TRUE)
tmp <- plotcontrasts(model=m2, fn=contrfn, cl=contrlist,
fcts=factors, x1lab=x1lab, x2lab=x2lab,
eb=eb, xlim=xlim)
}
calcmlmwithcov <- function(df, forms=NULL, cov,
center=TRUE, REML=FALSE) {
# Calculates multilevel model including cov as covariate
# Args:
# Returns:
# Do this with lmerTest to make it a little easier to implement
#library('lmerTest')
for (i in 1:length(cov)) {
dfc <- df
print(paste("Covariate in model is", cov[i]))
dfc$covc <- scale(df[, cov[i]], center=center)
# restrict data frame to ids that acutally have the covariate
dfc <- subset(dfc, !is.na(covc))
# first test influence of covariate
m1 <- calcmlm(form=as.formula(forms[1]),
df=dfc, REML=FALSE)
m2 <- calcmlm(form=as.formula(forms[2]),
df=dfc, REML=FALSE)
print(anova(m1, m2))
# Then test if interaction remains significant with covariate
m3 <- calcmlm(form=as.formula(forms[3]),
df=dfc, REML=FALSE)
print(anova(m2, m3))
rm(dfc)
}
#detach("package:lmerTest", unload=TRUE)
}
doplotbar <- function(df=df, rng=NULL, ylim=NULL,
main=NULL, yaxt=NULL, ylb=NULL) {
# Produce the bar plots
#
# Args:
# Returns:
# m <- matrix(ncol=2, nrow=2, df$m[rng])
# e <- matrix(ncol=2, nrow=2, df$eb[rng])
# #colnames(m) <- rep(levels(df$group), 1)
# colnames(m) <- c("Plac", "Praz")
# rownames(m) <- levels(df$stim)
# colnames(e) <- rep(levels(df$group), 1)
# rownames(e) <- levels(df$stim)
plotbar(df=df, rng=rng, ylim=ylim, col=c("gray54", "white"),
main=main, yaxt=yaxt, ylb=ylb)
}
# Main program
# greeting
cat("This is the analysis of the prazosin-study\n")
cat("Output of citation() is:\n")
citation()
# load dataset (comma seperated format)
fn <- "../data/prazosin.csv"
p <- readdat(fn=fn, resptype="cr", exclids=c(25, 28))
# remove first CS- in ext
#p$ctrial[(!p$stage=="Acquisition")&p$stim=="CSminus"] <- p$ctrial[(!p$stage=="Acquisition")&p$stim=="CSminus"] - 1
#p <- subset(p, !ctrial==0)
# set the outcome measure here
#outcome <- "logdcm1"
#ylab <- "Log aSNA Amplitude (units)"
#outcome <- "DCM1"
outcome <- "peakscoresqrtn"
ylab <- "SQRT SCR (mS)"
# set outcome in data frame
p$outcome <- p[, outcome]
# Covariates at baseline
covs <- c("Age", "Gender", "stait", "ShockerLevel",
"HeartRateBaseline", "HeartRate90min", "BPSysBaseline",
"BPSys90min", "BPDiaBaseline", "BPDia90min")
# summarize by stages
out <- calcstages(p)
dfm <- out[[5]]
dfm$stimdiff <- dfm$mdiff
p <- merge(p, subset(dfm, select=c(ID, group, stage, stimdiff)),
all=TRUE)
dfm2 <- out[[4]]
dfm2$stimmean <- dfm2$m
p <- merge(p, subset(dfm2, select=c(ID,group, stage, stim, stimmean)),
all=TRUE)
dfm3 <- out[[2]]
dfm3$stimdiff <- dfm3$mdiff
pdfm3 <- merge(subset(p, select=c(ID, group, stage, phase)),
subset(dfm3, select=c(ID, group, stage, phase, stimdiff)),
all=TRUE)
dfm4 <- out[[1]]
dfm4$stimmean <- dfm4$m
pdfm4 <- merge(subset(p, select=c(ID, group, stage, phase, stim)),
subset(dfm4, select=c(ID, group, stage, phase, stimmean)),
all=TRUE)
# Include only ids with positive acquisition
# This should not be done
ids <- getposacqids(p)
#pp <- subset(p, ID %in% ids)
#-----------------------------------------------------------------------
# Plots
#-----------------------------------------------------------------------
pdf("../output/figures/prazosin_plots.pdf", paper="a4r",
width=0, height=0)
# main figure
#plottrialwise(subset(p, !(ID==23&stage=="Acquisition")), mxyp=0.35)
plottrialwise(p, mxyp=0.9)
dev.off()
pdf("../output/figures/prazosin_analysis.pdf")
# histogram of stim diff in acq
plothist(unique(subset(p, stage=="Acquisition", select=stimdiff)),
x2lb="Stimulus Discrimination in Acquisition")
# summaries of stim diff by group, stage, phase
dfm <- out[[3]]
lb <- paste(dfm$group, dfm$stage, dfm$phase)
plotgsp(df=dfm, lb=lb)
pdf('../output/figures/prazosin_barplots.pdf')
plothist(unique(subset(p, stage=="Acquisition", select=stimdiff)),
x2lb="Stimulus Discrimination in Acquisition")
par(mar=c(7, 2, 2, 0), oma=c(4, 4, 2, 4))
l <- layout(matrix(c(1, 1, 2, 2, 3, 3, c(4:9)), ncol=6, byrow=TRUE))
#layout.show(l)
# non centrality parameters
ncp <- abs(qt(0.05/2, 19-1))
dfm <- out[[1]]
dfmm <- ddply(dfm, c("group", "stage", "stim"), summarize,
mm=mean(m, na.rm=TRUE),
sd=sd(m, na.rm=TRUE),
n=sum(!is.na(m)),
eb=ncp*sd/sqrt(n))
dfmm <- dfmm[order(dfmm$stage, dfmm$group, rev(dfmm$stim)),]
doplotbar(df=dfmm, c(1:4), ylim=c(0, 0.3), main=levels(dfmm$stage)[1])
mtext("Log aSNA Amplitude (units)", 2, 2, cex=1.5, font=2, outer=TRUE)
text(1.25, 0.175, "**", cex=1.2)
text(3.5, 0.175, "***", cex=1.2)
doplotbar(dfmm, c(5:8), ylim=c(0, 0.3), main=levels(dfmm$stage)[2], yaxt="n")
text(3.5, 0.26, "***", cex=1.2)
doplotbar(dfmm, c(9:12), ylim=c(0, 0.3), main="Re-extinction", yaxt="n")
text(3.5, 0.23, "***", cex=1.2)
dfmm <- ddply(dfm, c("group", "stage", "phase", "stim"), summarize,
mm=mean(m, na.rm=TRUE),
sd=sd(m, na.rm=TRUE),
n=sum(!is.na(m)),
eb=sd/sqrt(n))
dfmm <- dfmm[order(dfmm$stage, dfmm$group, rev(dfmm$stim)),]
dfmm <- dfmm[order(dfmm$stage, dfmm$phase, dfmm$group, rev(dfmm$stim)),]
doplotbar(dfmm, c(1:4), ylim=c(0, 0.35), "Early")
text(1.25, 0.21, "*", cex=1.2)
text(3.5, 0.21, "*", cex=1.2)
#text(5.25, 0.35, levels(dfmm$stage)[1], xpd=NA, font=2, cex=1.2)
doplotbar(dfmm, c(5:8), ylim=c(0, 0.35), "Late", "n")
text(3.5, 0.16, "*", cex=1.2)
doplotbar(dfmm, c(9:12), ylim=c(0, 0.35), "Early", "n")
text(3.5, 0.31, "*", cex=1.2)
#text(5.25, 0.35, levels(dfmm$stage)[2], xpd=NA, font=2, cex=1.2)
doplotbar(dfmm, c(13:16), ylim=c(0, 0.35), "Late", "n")
text(1.25, 0.24, "**", cex=1.2)
text(3.5, 0.24, "**", cex=1.2)
doplotbar(dfmm, c(17:20), ylim=c(0, 0.35), "Early", "n")
text(3.5, 0.27, "*", cex=1.2)
#text(5.25, 0.35, levels(dfmm$stage)[3], xpd=NA, font=2, cex=1.2)
doplotbar(dfmm, c(21:24), ylim=c(0, 0.35), "Late", "n")
text(3.5, 0.21, "**", cex=1.2)
legend(-16.5, -0.1, legend=c("CSplus", "CSminus"), bty="n",
fill=c("gray52", "white"), xpd=NA)
dev.off()
#-----------------------------------------------------------------------
# Bottom up tests
#-----------------------------------------------------------------------
out <- calcstages(p)
dfm <- out[[1]]
teststagestim(subset(dfm, group=="Placebo"&stage=="Acquisition"&
phase=="early"))
teststagestim(subset(dfm, group=="Placebo"&stage=="Acquisition"&
phase=="late"))
teststagestim(subset(dfm, group=="Prazosin"&stage=="Acquisition"&
phase=="early"))
teststagestim(subset(dfm, group=="Prazosin"&stage=="Acquisition"&
phase=="late"))
teststagestim(subset(dfm, group=="Placebo"&stage=="Extinction"&
phase=="early"))
teststagestim(subset(dfm, group=="Placebo"&stage=="Extinction"&
phase=="late"))
teststagestim(subset(dfm, group=="Prazosin"&stage=="Extinction"&
phase=="early"))
teststagestim(subset(dfm, group=="Prazosin"&stage=="Extinction"&
phase=="late"))
teststagestim(subset(dfm, group=="Placebo"&stage=="Retrieval"&
phase=="early"))
teststagestim(subset(dfm, group=="Placebo"&stage=="Retrieval"&
phase=="late"))
teststagestim(subset(dfm, group=="Prazosin"&stage=="Retrieval"&
phase=="early"))
teststagestim(subset(dfm, group=="Prazosin"&stage=="Retrieval"&
phase=="late"))
dfm <- out[[4]]
teststagestim(subset(dfm, group=="Placebo"&stage=="Acquisition"))
teststagestim(subset(dfm, group=="Prazosin"&stage=="Acquisition"))
teststagestim(subset(dfm, group=="Placebo"&stage=="Extinction"))
teststagestim(subset(dfm, group=="Prazosin"&stage=="Extinction"))
teststagestim(subset(dfm, group=="Placebo"&stage=="Retrieval"))
teststagestim(subset(dfm, group=="Prazosin"&stage=="Retrieval"))
#-----------------------------------------------------------------------
# Post-hoc tests requested by reviewers
#-----------------------------------------------------------------------
u <- unique(subset(p, !ID==8, select=c(ID, group, stage, phase,
stimdiff)))
u$ID <- factor(u$ID)
a <- aov(stimdiff ~ group * stage + Error(ID/stage), u)
summary(a)
u <- unique(subset(p, !ID==8, select=c(ID, group, stage, stim, stimmean)))
u$ID <- factor(u$ID)
a <- aov(stimmean ~ group * stage * stim + Error(ID/(stage*stim)), u)
summary(a)
u <- unique(subset(p, !ID==8, select=c(ID, group, stage, phase,
stimdiff)))
a <- aov(stimdiff ~ phase + Error(ID/phase),
subset(u, stage=="Acquisition"&group=="Placebo"))
summary(a)
a <- aov(stimdiff ~ phase + Error(ID/phase),
subset(u, stage=="Acquisition"&group=="Prazosin"))
summary(a)
u <- unique(subset(pdfm4, !ID==8, select=c(ID, group, stage, phase,
stim, stimmean)))
a <- aov(stimmean ~ phase + Error(ID/phase),
subset(u, stage=="Extinction"&group=="Placebo"&
stim=="CSplus"))
summary(a)
a <- aov(stimmean ~ phase + Error(ID/phase),
subset(u, stage=="Extinction"&group=="Prazosin"&
stim=="CSplus"))
summary(a)
a <- aov(stimmean ~ phase + Error(ID/phase),
subset(u, stage=="Retrieval"&group=="Placebo"&
stim=="CSplus"))
summary(a)
a <- aov(stimmean ~ phase + Error(ID/phase),
subset(u, stage=="Retrieval"&group=="Prazosin"&
stim=="CSplus"))
summary(a)
a <- aov(stimmean ~ phase + Error(ID/phase),
subset(u, stage=="Extinction"&group=="Placebo"&
stim=="CSminus"))
summary(a)
a <- aov(stimmean ~ phase + Error(ID/phase),
subset(u, stage=="Extinction"&group=="Prazosin"&
stim=="CSminus"))
summary(a)
a <- aov(stimmean ~ phase + Error(ID/phase),
subset(u, stage=="Retrieval"&group=="Placebo"&
stim=="CSminus"))
summary(a)
a <- aov(stimmean ~ phase + Error(ID/phase),
subset(u, stage=="Retrieval"&group=="Prazosin"&
stim=="CSminus"))
summary(a)
a <- aov(stimmean ~ phase + Error(ID/phase),
subset(u, stage=="Extinction"&group=="Prazosin"))
summary(a)
# late acquisition to early extinction
u <- unique(subset(pdfm3, !ID==8, select=c(ID, group, stage, phase,
stimdiff)))
a <- aov(stimdiff ~ group + Error(ID),
subset(u, (stage=="Acquisition"&phase=="late")))
summary(a)
a <- aov(stimdiff ~ group * stage + Error(ID/stage),
subset(u, (stage=="Acquisition"&phase=="late")|
(stage=="Extinction"&phase=="early")))
summary(a)
# late extinction to early re-extinction
a <- aov(stimdiff ~ group * stage + Error(ID/stage),
subset(u, (stage=="Extinction"&phase=="late")|
(stage=="Retrieval"&phase=="early")))
summary(a)
# spontaneous recovery
a <- aov(stimdiff ~ group * stage + Error(ID/stage),
subset(u, (stage=="Extinction"&phase=="late")|
(stage=="Retrieval"&phase=="early")))
summary(a)
# spontaneous recover within placebo group
a <- aov(stimdiff ~ stage + Error(ID),
subset(u, group=="Placebo"&
(stage=="Extinction"&phase=="late")|
(stage=="Retrieval"&phase=="early")))
summary(a)
# spontaneous recover within prazosin group
a <- aov(stimdiff ~ stage + Error(ID),
subset(u, group=="Prazosin"&
(stage=="Extinction"&phase=="late")|
(stage=="Retrieval"&phase=="early")))
summary(a)
a <- aov(stimdiff ~ group * phase + Error(ID/phase),
subset(u, (stage=="Acquisition")))
summary(a)
a <- aov(stimdiff ~ group * phase + Error(ID/phase),
subset(u, (stage=="Extinction")))
summary(a)
a <- aov(stimdiff ~ group * phase + Error(ID/phase),
subset(u, (stage=="Retrieval")))
summary(a)
# request in revision #2: early vs. late
# by id, stage, phase, stim
dfm <- out[[1]]
dfm$stimmean <- dfm$m
u <- unique(subset(dfm, !ID==8, select=c(ID, group, stage, phase,
stim,
stimmean)))
for (i in 1:nlevels(u$stage)) {
print(levels(u$stage)[i])
a <- aov(stimmean ~ group * phase * stim + Error(ID/(phase*stim)),
data=subset(u, stage==levels(u$stage)[i]))
print(summary(a))
}
# all sessions in one model
#a <- aov(stimmean ~ group * stage * phase * stim +
# Error(ID/stage * phase * stim),
# subset(u))
#summary(a)
#-----------------------------------------------------------------------
# Primary analysis
#-----------------------------------------------------------------------
fcts <- c("group", "stage", "stim")
forms <- c("outcome ~ ctrialc + group + stage + stim +
group:stage + stage:stim + group:stim + (0 + stage*stim|ID)",
"outcome ~ ctrialc + group*stage*stim + (0 + stage*stim|ID)",
"outcome ~ group*stage*stim + (0 + stage*stim|ID)",
"outcome ~ ctrialc*group*stage*stim + (0 + stage*stim|ID)",
"outcome ~ ctrialc + staitc + group*stage*stim +
(0+stage*stim|ID)",
"outcome ~ ctrialc + staitc + group + stage + stim +
group:stage + stage:stim +
group:stim + (0 + stage*stim|ID)")
cv <- matrix(c(0, 1, 0, -1, 0, 0, 0, -1, 0, 1, 0, 0,
1, 0, -1, 0, 0, 0, -1, 0, 1, 0, 0, 0,
0, 0, 0, 0, 1, -1, 0, 0, 0, 0, -1, 1,
0, 0, 1, -1, 0, 0, 0, 0, -1, 1, 0, 0,
-1, -1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0,
1, -1, 0, 0, 0, 0, -1, 1, 0, 0, 0, 0,
-1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
0, -1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
-1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, -1, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, -1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, -1, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 1),
nrow=14, ncol=12, byrow=TRUE)
#cv <- matrix(c(0, 1, 0, -1, 0, 0, 0, -1, 0, 1, 0, 0,
# 1, 0, -1, 0, 0, 0, -1, 0, 1, 0, 0, 0,
# 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, -1, 1,
# 0, 0, 1, -1, 0, 0, 0, 0, -1, 1, 0, 0,
# -1, -1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0,
# 1, -1, 0, 0, 0, 0, -1, 1, 0, 0, 0, 0),
# nrow=6, ncol=12, byrow=TRUE)
#cl <- list("Prazosin, Acq - Ext" =cv[1, ],
#cl <- list("Prazosin, Acq - Ext" =cv[1, ],
# "Placebo, Acq - Ext" =cv[2, ],
# "Re-extinction, Prazosin - Placebo" =cv[3, ],
# "Extinction, Prazosin - Placebo" =cv[4, ],
# "Acquisition, pooled" =cv[5, ],
# "Acquisition, Prazosin - Placebo"=cv[6, ])
cl <- list("Re-extinction, Prazosin - Placebo" =cv[3, ],
"Extinction, Prazosin - Placebo" =cv[4, ],
"Acquisition, pooled" =cv[5, ],
"Acquisition, Prazosin - Placebo" =cv[6, ],
"Acquisition, Placebo" =cv[7, ],
"Acquisition, Prazosin" =cv[8, ],
"Acquisition, CS-" =cv[9, ],
"Extinction, CS-" =cv[10, ],
"Re-extinction, CS-" =cv[11, ],
"Acquisition, CS+" =cv[12, ],
"Extinction, CS+" =cv[13, ],
"Re-extinction, CS+" =cv[14, ])
#cl <- list("Re-extinction, Prazosin - Placebo" =cv[3, ],
# "Extinction, Prazosin - Placebo" =cv[4, ],
# "Acquisition, pooled" =cv[5, ],
# "Acquisition, Prazosin - Placebo" =cv[6, ])
# run analysis
pdf()
tmp <- doanalysis(df=p, covs=covs, factors=fcts,
contrlist=cl, contrfn=NULL, formulas=forms)
dev.off()
#-----------------------------------------------------------------------
# Assess influence of covariates on final model
#-----------------------------------------------------------------------
p <- readdat(fn=fn, resptype="cr", exclids=c(25, 28))
p$outcome <- p[, outcome]
forms <- c(
"outcome ~ ctrialc + group*stage*stim +
(0 + stage*stim|ID)",
"outcome ~ ctrialc + covc + group*stage*stim +
(0 + stage*stim|ID)",
"outcome ~ ctrialc + covc + group + stage + stim +
group:stage + stage:stim +
group:stim + (0 + stage*stim|ID)"
)
tmp <- calcmlmwithcov(df=p, cov=covs, center=TRUE, forms=forms)
dev.off()
# simulation showing significant interaction with non-significant main
# effects
# simulate data
set.seed(876170)
a1 <- rnorm(n=30, mean=0.30, sd=0.02)
a2 <- rnorm(n=30, mean=0.35, sd=0.12)
b1 <- rnorm(n=30, mean=0.35, sd=0.12)
b2 <- rnorm(n=30, mean=0.30, sd=0.10)
# graph the data
#par(mar=c(12, 6, 6, 26), oma=c(2, 2, 2, 2))
par(mar=c(7, 7, 7, 7), oma=c(2, 2, 2, 2))
xlab <- c("a1", "a2", "b1", "b2")
means <- c(mean(a1), mean(a2), mean(b1), mean(b2))
sds <- c(sd(a1), sd(a2), sd(b1), sd(b2))
ncp <- abs(qt(0.05/2, 30-1))
cis <- sds/sqrt(30) * ncp
bp <- barplot(means, names=xlab, ylab="Mean with 95% CI",
ylim=c(0, 0.55), cex.lab=1.5)
mses <- means - cis
pses <- means + cis
arrows(bp, mses, bp, pses, length=0.00,
angle=90, code=3, lwd=1.2)
# horizontal markers
# main effect of a
lines(bp[1:2], c(0.45, 0.45))
lines(bp[c(1,1)], c(0.45, 0.44))
lines(bp[c(2,2)], c(0.45, 0.44))
text(bp[1]+((bp[2]-bp[1])/2), 0.46,"n.s.")
# main effect of b
lines(bp[3:4], c(0.45, 0.45))
lines(bp[c(3, 3)], c(0.45, 0.44))
lines(bp[c(4, 4)], c(0.45, 0.44))
text(bp[3]+((bp[4]-bp[3])/2), 0.46,"n.s.")
# interaction
lines(c((bp[1]+bp[2])/2, (bp[3]+bp[4])/2),
c(0.5, 0.5))
lines(rep(bp[1]+(bp[2]-bp[1])/2, 2), c(0.5, 0.49))
lines(rep(bp[3]+(bp[4]-bp[3])/2, 2), c(0.5, 0.49))
text(2.5, 0.51,"*", cex=1.2)
# test interaction
t.test(x=(a1-a2), y=(b1-b2), alternative="two.sided")
# test main effect of a
t.test(x=a1, y=a2, alternative="two.sided")
# test main effect of b
t.test(x=b1, y=b2, alternative="two.sided")
|
a99af370859c75547f17335fe05aedbd65c27e5b
|
6f920ef94c1b690e238e032f9503b67ea589adbd
|
/code/helper/KaplanScan.R
|
5674e25b9ef217c35cd0818331c092b71c13b924
|
[] |
no_license
|
PichaiRaman/PDACSurvivalAnalysis
|
ec1535286e5c369e7c7bfec9c9fc622e61a33cee
|
7c4b93e38f7fd5e30d4e1d15692c0b41ab3cdef9
|
refs/heads/master
| 2020-03-19T02:46:14.990410
| 2018-06-06T15:56:36
| 2018-06-06T15:56:36
| 135,659,254
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,269
|
r
|
KaplanScan.R
|
########################################
#Kaplan-meieir based on finding optimal cut-point
########################################
#Call libraries
library("survival");
#1 = alive
#2 = dead
kmScan <- function(x, perc, tVar, eVar)
{
#First sort data frame
timeVar <- x[,as.character(tVar)];
eventVar <- x[,as.character(eVar)];
bestI <- 1;
pVal <- 1;
myDF <- data.frame();
minSamps <- round(perc*dim(x)[1])
#Now we must loop
for(i in minSamps:(dim(x)[1]-minSamps))
{
x[,"Gene"] <- 1;
x[1:i,"Gene"] <- 0;
x.Surv <- Surv(timeVar, eventVar);
myP <- pchisq(survdiff(x.Surv~Gene, data=x, rho=0)$chisq, df=1, lower=F);
if(myP<pVal)
{
pVal <- myP;
bestI <- i;
}
#put all data in a frame to get adj p-value
myDF <- rbind(myDF, c(i,myP));
}
#now p.adjust and return
adjpval <- min(p.adjust(myDF[,2], method="bonferroni"));
return(c(bestI,pVal, adjpval));
}
#This will yield a p-value and show a plot
kapmPlot <- function(metric, myData, createPlot=T, perc=.05, tVar="time", eVar="event")
{
#Get metadata
tmpMeta <- myData
tmpMeta[,metric] <- as.numeric(tmpMeta[,metric]);
tmpMeta <- tmpMeta[order(tmpMeta[,metric]),]
colnames(tmpMeta)[5] <- "Gene";
#Run scan
tmpMetaScan <- tmpMeta;
tmpMetaScan <- tmpMetaScan[,as.character(c(tVar, eVar, "Gene"))];
out <- kmScan(tmpMetaScan, perc, tVar, eVar);
#Sort DF and set it
tmpMetaScan[,"GeneBin"] <- 1;
tmpMetaScan[1:out[[1]],"GeneBin"] <- 0;
#time
timeVar <- tmpMetaScan[,tVar];
#event
eventVar <- tmpMetaScan[,eVar];
#createsurvival
t.Surv <- Surv(timeVar, eventVar);
t.survfit <- survfit(t.Surv~GeneBin, data=tmpMetaScan);
#Change strata names
myLowName <- paste("Low : n = ", t.survfit$n[[1]], sep="");
myHighName <- paste("High : n = ", t.survfit$n[[2]], sep="");
names(t.survfit$strata) <- c(myLowName, myHighName)
t.survframe <- createSurvivalFrame(t.survfit)
if(createPlot==T)
{
tmpTitle <- paste("KM Plot - Sig Score", "\nP-val(Adj) :", format(out[2], scientific=T, digits=3), "(", format(out[3], scientific=T, digits=3), ")");
myReturn <- qplot_survival(t.survframe, f.CI=F, myTitle=tmpTitle)+theme_bw()+scale_colour_manual(values=c("red", "blue") );
}
if(createPlot==F)
{
myReturn <- c(genes, out[2], out[3]);
}
myReturn;
}
createSurvivalFrame <- function(f.survfit){
# initialise frame variable
f.frame <- NULL
# check if more then one strata
if(length(names(f.survfit$strata)) == 0){
# create data.frame with data from survfit
f.frame <- data.frame(time=f.survfit$time, n.risk=f.survfit$n.risk, n.event=f.survfit$n.event, n.censor = f.survfit
$n.censor, surv=f.survfit$surv, upper=f.survfit$upper, lower=f.survfit$lower)
# create first two rows (start at 1)
f.start <- data.frame(time=c(0, f.frame$time[1]), n.risk=c(f.survfit$n, f.survfit$n), n.event=c(0,0),
n.censor=c(0,0), surv=c(1,1), upper=c(1,1), lower=c(1,1))
# add first row to dataset
f.frame <- rbind(f.start, f.frame)
# remove temporary data
rm(f.start)
}
else {
# create vector for strata identification
f.strata <- NULL
for(f.i in 1:length(f.survfit$strata)){
# add vector for one strata according to number of rows of strata
f.strata <- c(f.strata, rep(names(f.survfit$strata)[f.i], f.survfit$strata[f.i]))
}
# create data.frame with data from survfit (create column for strata)
f.frame <- data.frame(time=f.survfit$time, n.risk=f.survfit$n.risk, n.event=f.survfit$n.event, n.censor = f.survfit
$n.censor, surv=f.survfit$surv, upper=f.survfit$upper, lower=f.survfit$lower, strata=factor(f.strata))
# remove temporary data
rm(f.strata)
# create first two rows (start at 1) for each strata
for(f.i in 1:length(f.survfit$strata)){
# take only subset for this strata from data
f.subset <- subset(f.frame, strata==names(f.survfit$strata)[f.i])
# create first two rows (time: 0, time of first event)
f.start <- data.frame(time=c(0, f.subset$time[1]), n.risk=rep(f.survfit[f.i]$n, 2), n.event=c(0,0),
n.censor=c(0,0), surv=c(1,1), upper=c(1,1), lower=c(1,1), strata=rep(names(f.survfit$strata)[f.i],
2))
# add first two rows to dataset
f.frame <- rbind(f.start, f.frame)
# remove temporary data
rm(f.start, f.subset)
}
# reorder data
f.frame <- f.frame[order(f.frame$strata, f.frame$time), ]
# rename row.names
rownames(f.frame) <- NULL
}
# return frame
return(f.frame)
}
qplot_survival <- function(f.frame, f.CI="default", f.shape=3, myTitle){
# use different plotting commands dependig whether or not strata's are given
if("strata" %in% names(f.frame) == FALSE){
# confidence intervals are drawn if not specified otherwise
if(f.CI=="default" | f.CI==TRUE ){
# create plot with 4 layers (first 3 layers only events, last layer only censored)
# hint: censoring data for multiple censoring events at timepoint are overplotted
# (unlike in plot.survfit in survival package)
ggplot(data=f.frame) + geom_step(aes(x=time, y=surv), direction="hv") + geom_step(aes(x=time,
y=upper), directions="hv", linetype=2) + geom_step(aes(x=time,y=lower), direction="hv", linetype=2) +
geom_point(data=subset(f.frame, n.censor==1), aes(x=time, y=surv), shape=f.shape)+scale_y_continuous(limits = c(0, 1))
}
else {
# create plot without confidence intervalls
ggplot(data=f.frame) + geom_step(aes(x=time, y=surv), direction="hv") +
geom_point(data=subset(f.frame, n.censor==1), aes(x=time, y=surv), shape=f.shape)+scale_y_continuous(limits = c(0, 1))
}
}
else {
if(f.CI=="default" | f.CI==FALSE){
# without CI
ggplot(data=f.frame, aes(group=strata, colour=strata, shape=strata)) + geom_step(aes(x=time, y=surv),
direction="hv") + geom_point(data=subset(f.frame, n.censor==1), aes(x=time, y=surv), shape=f.shape)+ggtitle(myTitle)+scale_y_continuous(limits = c(0, 1));
}
else {
# with CI (hint: use alpha for CI)
ggplot(data=f.frame, aes(colour=strata, group=strata)) + geom_step(aes(x=time, y=surv),
direction="hv") + geom_step(aes(x=time, y=upper), directions="hv", linetype=2, alpha=0.5) +
geom_step(aes(x=time,y=lower), direction="hv", linetype=2, alpha=0.5) +
geom_point(data=subset(f.frame, n.censor==1), aes(x=time, y=surv), shape=f.shape)+scale_y_continuous(limits = c(0, 1))
}
}
}
|
a69f30d599b893294c118e252ac6f1855923d11f
|
a3ff80775cf1ec854dd5c2a8a99d72f2a66a75d6
|
/Missing Values.R
|
c1f05dea8ee8bd4dd76ab15f435292cd14f4122d
|
[] |
no_license
|
ked66/RepData_PeerAssessment1
|
3731c9ba6c1098a62a6dac4ec9de4e1bf857fd83
|
e440e6f72e85c8f83cdb30d26f8462f31493c627
|
refs/heads/master
| 2022-12-03T01:39:15.353390
| 2020-08-24T17:38:10
| 2020-08-24T17:38:10
| 274,181,797
| 0
| 0
| null | 2020-06-22T15:56:47
| 2020-06-22T15:56:46
| null |
UTF-8
|
R
| false
| false
| 1,661
|
r
|
Missing Values.R
|
## Calculate and report the total number of missing values in the dataset
## (i.e. the total number of rows with NAs)
sum(is.na(data)) ## 2304
## Devise a strategy for filling in all of the missing values in the dataset.
## The strategy does not need to be sophisticated. For example, you could use the
## mean/median for that day, or the mean for that 5-minute interval, etc.
## Replace each missing value with the mean value for that 5-minute interval
complete_data <- data
for(i in 1:length(complete_data$steps)){
if(is.na(complete_data$steps[i])){
complete_data$steps[i] <- by_interval[which(by_interval$interval == data$interval[i]), 2]
}
}
## Create a new dataset that is equal to the original dataset but with the missing data filled in.
## Make a histogram of the total number of steps taken each day and
## Calculate and report the mean and median total number of steps taken per day.
## Do these values differ from the estimates from the first part of the assignment?
## What is the impact of imputing missing data on the estimates of the total daily number of steps?
complete_per_day <- tapply(complete_data$steps, complete_data$date, sum)
hist(complete_per_day, main = "Histogram of Total Number of Steps per Day",
xlab = "Total Number of Steps", col = "maroon")
## Or with ggplot
library(ggplot2)
qplot(complete_per_day,
geom = "histogram",
binwidth = 5000,
main = "Histogram of Total Number of Steps per Day",
xlab = "Total Number of Steps",
ylab = "Frequency",
fill = I("maroon"), col = I("black"))
mean(complete_per_day) ## 10766.19
median(complete_per_day) ## 10766.19
|
c15fd4fb0bf0e83dfbc45ae2de4a29ea359a16f1
|
1b82da8fb3c5ecafd4b63a8a19368b87c64b681b
|
/R/Lynch_IntroAppliedBayesian/p286.R
|
997a907acd731e1c86de7ec55e35737f8298fad4
|
[] |
no_license
|
wmmurrah/BayesianAnalysis
|
032485eae0df9ff4e95fb691d48e6591ee11982a
|
b000b99d3f13c274c353d983004f99bb10a60f45
|
refs/heads/master
| 2021-01-20T21:59:40.988140
| 2020-12-04T01:28:23
| 2020-12-04T01:28:23
| 22,969,346
| 2
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 426
|
r
|
p286.R
|
covmat=diag(2)
covmat[1,2]=covmat[2,1]=.5
z=matrix(0,2000,2)
q=matrix(0,2000,2)
count=0
for(i in 1:2000){
#naive simulation
z[i,]=0
while(z[i,1]<=0 | z[i,2]<=0)
{count=count+1;
z[i,]=rnorm(2,0,1)%*%(chol(covmat))
}
#conditional simulation based on decomposition
q[i,1]=qnorm(runif(1,min=.5,max=1),0,1)
mm=covmat[1,2]*q[i,1]
ss=1-.5^2
q[i,2]=qnorm(runif(1,min=pnorm(0,mm,sqrt(ss)),max=1),mm,sqrt(ss))
}
|
4c0365b3436c10fb66f3431298111241587f244a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/nLTT/examples/nLTTstat_exact.Rd.R
|
ccd76ec11737607c40e1247bd06455cb5f297077
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 433
|
r
|
nLTTstat_exact.Rd.R
|
library(nLTT)
### Name: nLTTstat_exact
### Title: Calculate the exact difference between two normalized
### Lineage-Through-Time curves, given two phylogenetic trees.
### Aliases: nLTTstat_exact
### ** Examples
data(exampleTrees)
nltt_plot(exampleTrees[[1]])
nltt_lines(exampleTrees[[2]], lty = 2)
nLTTstat_exact(
exampleTrees[[1]],
exampleTrees[[2]],
distance_method = "abs",
ignore_stem = TRUE
)
|
8277d56331a84dcfe9f8e6369573afdce48eab4c
|
f697cbbb0da988fd43c07652d5955fbb82e25e38
|
/David/profileViewer/R/plotGeneDist.r
|
8554d9ce32d1de13b1fb38ca7311a9f360261ca6
|
[] |
no_license
|
aidanmacnamara/epiView
|
eec75c81b8c7d6b38c8b41aece3e67ae3053fd1c
|
b3356f6361fcda6d43bf3acce16b2436840d1047
|
refs/heads/master
| 2021-05-07T21:22:27.885143
| 2020-06-22T13:08:51
| 2020-06-22T13:08:51
| 109,008,158
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,757
|
r
|
plotGeneDist.r
|
#' plots the distribution of the genes
#'
#' @param x the measured gene data
#' @param type the type of plot required: currently only does a simple summary plot
#' @param guides if given, red reference lines are shown for the specified values
#' @param guides2 if given, blue reference lines are shown for the specified values
#' @param wrap if true, separate distributions are plotted for each cell type. Redundant for
#' quantile normalised data where they are forced to be the same
#' @param no.guides if selected, all guide lines are suppressed
#'
#' @export
#'
#' @examples
#' plotGeneDist(newProfiles$counts)
#' plotGeneDist(newProfiles$counts,guides=c(10,16))
plotGeneDist=function(x,
type="summary",
log=F,
guides=NULL,
guides2=NULL,
wrap=F,
no.guides=F){
require(tidyverse)
# x$time = ifelse(x$state %in% c("p0","PMA-","VD3-"),"pre","post")
# print(with(x,table(state,time)))
x$panel=paste(x$state,x$cell_type)
if (log){
# first, logging
g = ggplot(x,aes(log10(value))) +
geom_density(fill="skyblue") +
xlab("log10 response")
if (!is.null(guides) & !no.guides){
g=g+geom_vline(xintercept = log10(guides), colour="red")
}
if (!is.null(guides2) & !no.guides){
g=g+geom_vline(xintercept = log10(guides2), colour="blue")
}
} else {
g = ggplot(x,aes(value)) +
geom_density(fill="skyblue")
if (!is.null(guides)){
g=g+geom_vline(xintercept = guides, colour="red")
}
if (!is.null(guides2)){
g=g+geom_vline(xintercept = guides2, colour="blue")
}
}
if (wrap){
g = g +facet_wrap(~panel)
}
g
}
|
f96d64e8a5df0de98b573fc9ad4a356e3e242c36
|
10f047c7631b3aad90c7410c567c588993bfa647
|
/EcuRCode/WeightVsNestSize/PaperCode/Statistics_Output/Old/ModelSelectionPlaying.R
|
46f189d279a48e0b25f9796253ba9db9bc5a4239
|
[] |
no_license
|
ruthubc/ruthubc
|
ee5bc4aa2b3509986e8471f049b320e1b93ce1d5
|
efa8a29fcff863a2419319b3d156b293a398c3a9
|
refs/heads/master
| 2021-01-24T08:05:40.590243
| 2017-08-30T01:37:56
| 2017-08-30T01:37:56
| 34,295,740
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,783
|
r
|
ModelSelectionPlaying.R
|
# TODO: Add comment
#
# Author: user
###############################################################################
### From here http://rpubs.com/kaz_yos/exhaustive
## Create vectors for outcome and predictors
allModelsAIC <- function(outcome, predictors, dataset) {
list.of.models <- lapply(seq_along((predictors)), function(n) {
left.hand.side <- outcome
right.hand.side <- apply(X = combn(predictors, n), MARGIN = 2, paste, collapse = " + ")
right.hand.side <- paste(right.hand.side, "+ Instar + (1|NestID)")
paste(left.hand.side, right.hand.side, sep = " ~ ")
})
## Convert to a vector
vector.of.models <- unlist(list.of.models)
## Fit coxph to all models
list.of.fits <- lapply(vector.of.models, function(x) {
formula <- as.formula(x)
fit <- lmer(formula, data = dataset, REML = FALSE)
result.AIC <- extractAIC(fit)
data.frame(num.predictors = result.AIC[1],
AIC = result.AIC[2],
model = x)
})
## Collapse to a data frame
result <- do.call(rbind, list.of.fits)
result$modLen <- nchar(as.character(result$model))
result <- orderBy(~ -modLen, result)
result$dup <- duplicated(result[,c('num.predictors', 'AIC')])
result <- orderBy(~ -AIC, result)
result <- result[(result$dup == "FALSE"), ]
result <- result[, c('AIC', 'model', 'num.predictors')]
return(result)
}
## The lines below should not need modification.
## Create list of models
#print(result)
#Sys.setenv(RSTUDIO_PANDOC = "C:/Users/user/AppData/Local/Pandoc")
#Sys.getenv("RSTUDIO_PANDOC")
#Sys.setenv(pdflatex = "C:/Program Files (x86)/MiKTeX 2.9/miktex/bin")
#rmarkdown::render()
|
a01f735fae5ec3846debc337581433699bf5f898
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/mgpd/R/ml_mix.R
|
12869ebaa69ed448c2837b60da3c5ba2334455df
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,118
|
r
|
ml_mix.R
|
ml_mix <-
function(param,dat,mlmax=1e+15,fixed=FALSE,...)
{
loglik = mlmax
hxy = NA
x = dat[,1]
y = dat[,2]
error = FALSE
mux = param[1]; muy = param[4]
sigx = param[2]; sigy = param[5]
gamx = param[3]; gamy = param[6]
alpha = 1
mu = function(x,y) 1/x+1/y-alpha/(x+y)
dxdymu = function(x,y) -2*alpha/(x+y)^3
if(sigx<0 | sigy<0 ) error=TRUE
if(fixed==TRUE) {mux=0}
if(error) loglik = mlmax
if(!error)
{
tx = (1+gamx*(x-mux)/sigx)^(1/gamx)
ty = (1+gamy*(y-muy)/sigy)^(1/gamy)
tx0 = (1+gamx*(-mux)/sigx)^(1/gamx)
ty0 = (1+gamy*(-muy)/sigy)^(1/gamy)
dtx = (1/sigx)*pmax((1+gamx*(x-mux)/sigx),0)^(1/gamx-1)
dty = (1/sigy)*pmax((1+gamy*(y-muy)/sigy),0)^(1/gamy-1)
c0 = -mu(tx0,ty0)
hxy = 1/c0*dxdymu(tx,ty)*dtx*dty
hxy = as.numeric(hxy*(1-((x<0)*(y<0))))
loglik = -sum(log(hxy))
}
if(min(1+gamx*(x-mux)/sigx)<0) loglik=mlmax
if(min(1+gamy*(y-muy)/sigy)<0) loglik=mlmax
loglik
}
|
861e24cb0f66b5f1dcfb6d7a94cc7dec054a7c24
|
1d76f7728e21ac36d51ea764b526d82030d1219a
|
/hard.R
|
1d73d34c58f8a232eab7f4c40a88dd3d36bda6cf
|
[] |
no_license
|
jatinrajani/BiodiversityDataCleaning
|
ff0c874780da0e1b6241880ba5b91b0019166c59
|
4b95502d8dc5c3b4f974a6ad3e647d786b6418b3
|
refs/heads/master
| 2020-05-25T08:38:13.946046
| 2017-03-14T09:37:36
| 2017-03-14T09:37:36
| 84,927,531
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,467
|
r
|
hard.R
|
library(rgbif)
library(spocc)
library(geosphere)
setwd("~/BiodiversityDataCleaning")
# using a csv file for getting coordinates of country's centroid
countries=read.csv("country_centroids_all.csv",stringsAsFactors = FALSE,sep="\t")
# specifying range in kms
nearesttoCentroid<-function(country,range,species)
{
# getting centroid coordinates for country
# i.e latitude and longitude
coordinates<-countries[countries$SHORT_NAME==country,c(1,2)]
# getting coordinates for species
# latitude and longitude
spec<-occ_data(scientificName = species,hasCoordinate = TRUE,limit = 1000)
speccordinates<-spec$data[,c(3,4)]
# traversing loop to get species nearest to centroid coordinate
for(i in 1:length(speccordinates))
{
distance<-c()
# using package geosphere
# for using functioun distHaverine
# to calculate distance from latitude and longitude
dist<-distm(speccordinates[,c("decimalLongitude","decimalLatitude")],coordinates[,c("LONG","LAT")],fun=distHaversine)
# adding dist in distance list
distance<-c(distance,dist)
}
# converting m into kms
distance=distance/1000
#index of species lying within a particular range
withinrange<-c(which(distance<range))
# no.of species lying within particular range
len=length(withinrange)
len
}
nearesttoCentroid("India",1000,"Rhinoceros unicornis")
#returnig 9 within 1000kms
|
9bfe27f1a5883a58e0db8a18598b07c9600ceeb0
|
e679a30bf9a406ad781f3913735d7ae8f33f78e7
|
/R/mdca.R
|
aec3e2a5ac2f3893bd8bcd13db17650bf47f09da
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
gederajeg/happyr
|
b40fd1e1008eac240385f7842df397ec49972fa5
|
0ae2fe64a32716905307aa8133eafeccef1f3375
|
refs/heads/master
| 2021-12-12T22:24:36.112108
| 2021-12-10T23:29:58
| 2021-12-10T23:29:58
| 142,245,688
| 8
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,721
|
r
|
mdca.R
|
#' Multiple distinctive collexeme analysis (MDCA)
#'
#' @description Function to perform \emph{Multiple Distinctive Collexeme Analysis} (MDCA) in Rajeg (2019, Chapter 7).
#' @param df the data frame for the thesis (\code{phd_data_metaphor}) included in the package.
#' @param cxn_var character strings for the column name for the \code{constructions} variable, in this case, the "\code{synonyms}" column.
#' @param coll_var character strings for the column name for the \code{collocates} variable, in this case, the "\code{metaphors}" column.
#' @param already_count_table logical; the default is \code{FALSE} indicating \code{mdca} takes raw input data frame for observation-per-row format as in the case of \code{phd_data_metaphor}.
#' When it is \code{TRUE}, it expects tidy co-occurrence count between values of \code{var_cxn} and \code{var_coll} with three columns:
#' \tabular{rrr}{
#' synonyms \tab metaphors \tab n\cr
#' kesenangan \tab happiness is a possessable object \tab 182\cr
#' kebahagiaan \tab happiness is a possessable object \tab 181\cr
#' ... \tab ... \tab ...
#' }
#' @param assocstr_digits integer for the floating points/digits of the \emph{Association Strength}. The default is \code{3L}.
#' @param correct_holm logical; the default is \code{TRUE} for performing Holm's correction method of the \emph{p}-value (cf. Gries, 2009, p. 249).
#' @param concise_output logical; if \code{TRUE} (the default), \code{mdca} outputs the following columns:
#' \itemize{
#' \item \code{metaphors}
#' \item \code{synonyms}
#' \item \code{n} (for the \emph{observed} co-occurrence frequency between \code{metaphors} and the \code{synonyms}).
#' \item \code{exp} (for the \emph{expected} co-occurrence frequency between \code{metaphors} and the \code{synonyms}).
#' \item \code{p_binomial} (the one-tailed \emph{p}-value of the \emph{Binomial Test}).
#' \item \code{assocstr} (the log10 transformed values of the \emph{p}-value of the \emph{Binomial Test}. The \code{assocstr} values are \emph{positive} when \code{n} is higher than the \code{exp} frequency, and they are \emph{negative} when otherwise.).
#' \item \code{p_holm} (when \code{correct_holm} is \code{TRUE})
#' \item \code{dec} (significance decision after Holm's correction) (when \code{correct_holm} is \code{TRUE})
#' }
#'
#' If \code{concise_output} is \code{FALSE}, \code{mdca} returns the total tokens in the data, total frequency of each collexeme/collocate, total frequency of each construction, the sum of absolute deviation of the collexeme/collocate, the construction name, showing the largest deviation from the expected, co-occurrence frequency with the collexeme, expected probability of the co-occurrence, and the direction of the deviation from the expected frequency (i.e., whether a collexeme is attracted or repelled by a construction).
#' @details The \code{mdca} function is built on top of the core members of the \code{tidyverse} suit of packages.
#' The computation of the \emph{Association Strength} is based on the \code{\link[stats]{dbinom}} function (Gries, 2009, pp. 41-42; cf. Hilpert, 2006). The computation of the corrected \emph{p}-value of the one-tailed Binomial Test with Holm's method is performed using \code{\link[stats]{p.adjust}}.
#'
#' There is a well-known interactive R script to perform MDCA by Stefan Th. Gries that is called \href{http://www.linguistics.ucsb.edu/faculty/stgries/teaching/groningen/index.html}{\emph{Coll.analysis 3.5}} (Gries, 2014). The script includes the other codes to compute the family of methods of \emph{Collostructional Analyses}. The \code{mdca} function in happyr aims to achieve the same analytical goal as that in \emph{Coll.analysis 3.5}, but is designed differently in terms of its usage and the internal codes, as it is based on the \href{https://www.tidyverse.org/}{tidyverse}.
#'
#' \code{mdca} allows users to have input and output data frame directly in the R environment, primarily enabling them to write interactive document in R Markdown in relation to MDCA. Moreover, happyr provides two functions dedicated to handle the output of \code{mdca} to retrieve the \emph{distinctive/attracted} and \emph{repelled} collexemes/collocates for a given construction. In contrast, Stefan Gries' script has two options to either print the output into (i) terminal or (ii) into external plain text, which requires post-processing of the results, mostly on a spreadsheet.
#'
#' @return A tbl_df (cf. the \code{concise_output}).
#' @examples
#' # for distinctive metaphors
#' mdca_res <- mdca(df = phd_data_metaphor,
#' cxn_var = "synonyms",
#' coll_var = "metaphors",
#' correct_holm = TRUE,
#' concise_output = TRUE,
#' already_count_table = FALSE,
#' assocstr_digits = 3L)
#'
#' # for distinctive 4-window span collocates
#' data("colloc_input_data")
#' mdca_colloc <- mdca(df = colloc_input_data,
#' cxn_var = "synonyms",
#' coll_var = "collocates",
#' correct_holm = TRUE,
#' concise_output = TRUE,
#' already_count_table = FALSE,
#' assocstr_digits = 3L)
#' @importFrom dplyr group_by_
#' @importFrom dplyr if_else
#' @importFrom tidyr complete_
#' @importFrom stats p.adjust
#' @importFrom stats dbinom
#' @importFrom purrr pmap
#' @importFrom purrr map_dbl
#' @importFrom dplyr summarise
#' @importFrom dplyr left_join
#' @importFrom dplyr mutate
#' @importFrom stringr str_c
#' @importFrom purrr map
#' @importFrom purrr map_df
#' @importFrom dplyr rename_
#' @importFrom dplyr select_
#' @importFrom dplyr filter
#' @importFrom dplyr select
#' @importFrom dplyr matches
#' @importFrom rlang :=
#' @importFrom rlang .data
#' @importFrom tidyr nest
#' @importFrom tidyr unnest
#' @references
#' \itemize{
#' \item Gries, S. T. (2009). \emph{Statistics for linguistics with R: A practical introduction}. Berlin: Mouton de Gruyter.
#' \item Gries, S. T. (2014). Coll.analysis 3.5. A script for R to compute perform collostructional analyses. \url{http://www.linguistics.ucsb.edu/faculty/stgries/teaching/groningen/index.html}.
#' \item Hilpert, M. (2006). Distinctive collexeme analysis and diachrony. \emph{Corpus Linguistics and Linguistic Theory}, \emph{2}(2), 243–256.
#' \item Rajeg, G. P. W. (2019). \emph{Metaphorical profiles and near-synonyms: A corpus-based study of Indonesian words for HAPPINESS} (PhD Thesis). Monash University. Melbourne, Australia. \url{https://doi.org/10.26180/5cac231a97fb1}.
#' }
#' @export
mdca <- function(df = NULL,
cxn_var = "synonyms",
coll_var = "metaphors",
already_count_table = FALSE,
assocstr_digits = 3L,
correct_holm = TRUE,
concise_output = TRUE) {
assertthat::assert_that(!is.null(df), msg = "The `df` argument is NULL; please specify it with the data frame input!")
# columns names for tidyeval
cxn_var <- rlang::sym(cxn_var)
coll_var <- rlang::sym(coll_var)
cxn_sum <- dplyr::quo(cxn_sum)
colloc_sum <- dplyr::quo(colloc_sum)
dbase_token <- dplyr::quo(dbase_token)
p_binomial <- dplyr::quo(p_binomial)
p_holm <- dplyr::quo(p_holm)
dec <- dplyr::quo(dec)
assocstr <- dplyr::quo(assocstr)
exp <- dplyr::quo(exp)
exp_prob <- dplyr::quo(exp_prob)
alt <- dplyr::quo(alt)
obs_exp <- dplyr::quo(obs_exp)
abs_assocstr <- dplyr::quo(abs_assocstr)
# function IF MDCA starts here
# cross-tab the relevant variables
if (already_count_table == FALSE) {
co_occ_tb <- dplyr::count(df, !!cxn_var, !!coll_var)
co_occ_tb <- tidyr::complete(co_occ_tb, !!coll_var, !!cxn_var, fill = list(n = 0L))
} else {
co_occ_tb <- tidyr::complete(df, !!coll_var, !!cxn_var, fill = list(n = 0L))
}
# get the total database token/sum of the database
if (already_count_table == FALSE) {
co_occ_tb <- dplyr::mutate(co_occ_tb, !!dplyr::quo_name(dbase_token) := dim(df)[1])
} else {
co_occ_tb <- dplyr::mutate(co_occ_tb, !!dplyr::quo_name(dbase_token) := sum(.data$n))
}
# get the total freq. of the construction/node word
co_occ_tb <- dplyr::mutate(dplyr::group_by(co_occ_tb, !!cxn_var),
!!dplyr::quo_name(cxn_sum) := sum(.data$n))
# get the total freq. of the collocates/collexemes/context words
co_occ_tb <- dplyr::mutate(dplyr::group_by(co_occ_tb, !!coll_var),
!!dplyr::quo_name(colloc_sum) := sum(.data$n))
# get the exp.freq and exp.prob
co_occ_tb <- dplyr::mutate(dplyr::ungroup(co_occ_tb),
!!dplyr::quo_name(exp) := (cxn_sum * colloc_sum)/.data$dbase_token,
!!dplyr::quo_name(exp_prob) := exp/colloc_sum,
!!dplyr::quo_name(obs_exp) := '=',
obs_exp = dplyr::if_else(n > exp, '>', .data$obs_exp), # obs_exp diff.
obs_exp = dplyr::if_else(n < exp, '<', .data$obs_exp), # obs_exp diff.
!!dplyr::quo_name(alt) := dplyr::if_else(n >= exp, 'greater', 'less'))
# binomial test function
binomial_test <- function(n, colloc_sum, exp_prob, alt) {
if (alt == "greater") {
pbin <- sum(dbinom(n:colloc_sum, colloc_sum, exp_prob))
} else {
pbin <- sum(dbinom(0:n, colloc_sum, exp_prob))
}
return(pbin)
}
# association strength function
assoc_strength <- function(n, exp, p_binomial, assocstr_digit = assocstr_digit) {
assocstr <- dplyr::if_else(n >= exp,
round(-log10(p_binomial), assocstr_digit),
round(log10(p_binomial), assocstr_digit))
return(assocstr)
}
# run binomial test, association strength computation, and Holm's adjustment
# cf. http://rcompanion.org/rcompanion/f_01.html for example with `p.adjust()`
co_occ_tb <- dplyr::mutate(co_occ_tb,
!!dplyr::quo_name(p_binomial) := purrr::pmap_dbl(list(n, colloc_sum, exp_prob, alt),
binomial_test),
!!dplyr::quo_name(assocstr) := purrr::pmap_dbl(list(n, exp, p_binomial),
assoc_strength, assocstr_digits),
!!dplyr::quo_name(abs_assocstr) := abs(.data$assocstr),
!!dplyr::quo_name(p_holm) := stats::p.adjust(p_binomial, "holm"),
!!dplyr::quo_name(dec) := "ns", # from Gries' (2004) HCFA script
dec = dplyr::if_else(p_holm < 0.1, "ms", dec), # from Gries' (2004) HCFA script
dec = dplyr::if_else(p_holm < 0.05, "*", dec), # from Gries' (2004) HCFA script
dec = dplyr::if_else(p_holm < 0.01, "**", dec), # from Gries' (2004) HCFA script
dec = dplyr::if_else(p_holm < 0.001, "***", dec))
# Gries, Stefan Th. 2004. HCFA 3.2. A program for R. URL: <http://www.linguistics.ucsb.edu/faculty/stgries/>
# Gries' HCFA script is available from the following book:
# Gries, Stefan Th. (2009). Statistics for linguistics with R: A practical introduction. Berlin: Mouton de Gruyter.
# get the sum of absolute deviation
dbase_to_left_join <- dplyr::group_by(co_occ_tb, !!coll_var)
## generate a sum_abs_dev for the COLLOCATES
dbase_to_left_join <- dplyr::summarise(dbase_to_left_join, sum_abs_dev = sum(.data$abs_assocstr))
dbase_to_left_join <- dplyr::ungroup(dbase_to_left_join)
co_occ_tb <- dplyr::left_join(co_occ_tb, dbase_to_left_join, by = dplyr::quo_name(coll_var))
## get the CxN with the largest deviation
df_for_largest_dev <- split(co_occ_tb, co_occ_tb[, 1])
df_for_largest_dev_res <- purrr::map_df(df_for_largest_dev, function(lrg_dev) dplyr::filter(dplyr::ungroup(lrg_dev), lrg_dev$abs_assocstr == max(lrg_dev$abs_assocstr)))
# df_for_largest_dev_res <- purrr::map_df(df_for_largest_dev_res, function(lrg_dev) dplyr::ungroup(lrg_dev))
vars_to_select <- stringr::str_c(dplyr::quo_name(coll_var), dplyr::quo_name(cxn_var), 'abs_assocstr', sep = '|')
df_for_largest_dev_res <- dplyr::select(df_for_largest_dev_res,
dplyr::matches(vars_to_select))
df_for_largest_dev_res <- dplyr::rename(df_for_largest_dev_res, largest_dev = !!cxn_var)
df_for_largest_dev_res <- dplyr::select(df_for_largest_dev_res, -!!abs_assocstr)
rm(df_for_largest_dev)
## left_join the largest dev. CxN
co_occ_tb <- dplyr::ungroup(co_occ_tb)
co_occ_tb <- dplyr::select(co_occ_tb, -!!abs_assocstr)
co_occ_tb <- dplyr::left_join(co_occ_tb, df_for_largest_dev_res, by = dplyr::quo_name(coll_var))
# outputting the results
if (concise_output == TRUE) {
if (correct_holm == FALSE) {
x <- dplyr::select(co_occ_tb,
!!coll_var,
!!cxn_var,
!!rlang::sym('n'),
!!rlang::sym('exp'),
!!rlang::sym('assocstr'),
!!rlang::sym('p_binomial'))
x <- dplyr::mutate(x,
p_binomial = format(.data$p_binomial, digits = assocstr_digits + 1L))
return(x)
} else {
x <- dplyr::select(co_occ_tb,
!!coll_var,
!!cxn_var,
!!rlang::sym('n'),
!!rlang::sym('exp'),
!!rlang::sym('assocstr'),
!!rlang::sym('p_binomial'),
!!rlang::sym('p_holm'),
!!rlang::sym('dec'))
x <- dplyr::mutate(x, p_holm = format(.data$p_holm, digits = assocstr_digits + 1L),
p_binomial = format(.data$p_binomial, digits = assocstr_digits + 1L))
return(x)
}
} else {
x <- dplyr::select(co_occ_tb,
!!coll_var,
!!cxn_var,
!!rlang::sym('n'),
!!rlang::sym('exp'),
!!rlang::sym('assocstr'),
!!rlang::sym('p_binomial'),
!!rlang::sym('p_holm'),
!!rlang::sym('dec'),
!!rlang::sym('sum_abs_dev'),
!!rlang::sym('largest_dev'),
dplyr::everything()
)
return(x)
}
}
|
9c727c435f861f1d10ed9a5294136291f582ddaf
|
450cf51141602b88597d17dc8daa0170f3f1dba2
|
/R/data.R
|
2d009c1e4d2acdc980cb25751942807a5f93b0be
|
[] |
no_license
|
jacob-ogre/ecosscraper
|
6b16b133738076cb75e1336c28dfc932e1823496
|
63bafcc0213c52a2d2620cc1d17ef290d150d13b
|
refs/heads/master
| 2021-04-30T23:24:42.375356
| 2018-01-23T16:07:03
| 2018-01-23T16:07:03
| 61,709,830
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,602
|
r
|
data.R
|
#' Data.frame of base information for all ECOS species
#'
#' @details The data is the table returned using the URL:
#'
#' \url{https://goo.gl/tZRu5o}
#'
#' By default, \code{ecosscraper} checks this URL \code{.onAttach} to ensure -
#' assuming the user restarts their sessions regularly - that the source is
#' up-to-date. If the URL throws a \link[httr]{http_error} then this built-in
#' data.frame will be loaded.
#'
#' @note Also notice that \code{ecosscraper} includes \link{get_TECP_table},
#' which can be used to 'override' the built-in data.
#'
#' @note The source URL can be changed using options()$TE_list. The current data
#' was scraped on \code{29 Nov 2016}
#'
#' @seealso \link{get_TECP_table}
#'
#' @format A data frame with 2445 rows and 10 variables
#' \describe{
#' \item{Scientific_Name}{The scientific name, as recorded in ECOS}
#' \item{Common_Name}{The common name, as recorded in ECOS}
#' \item{Species_Code}{The four-character code assigned to species in ECOS}
#' \item{Critical_Habitat}{CFR section under which CH was declared}
#' \item{Species_Group}{Taxonomic group of species, as recorded in ECOS}
#' \item{Lead_Region}{FWS region responsible for recovery}
#' \item{Federal_Listing_Status}{At time of scraping}
#' \item{Special_Rules}{CFR section under which any special rules were made}
#' \item{U_S__or_ForeignListed}{One of US, US/Foreign, Foreign}
#' \item{Where_Listed}{Geographic extent of listed entity}
#' \item{Species_Page}{URL dedicated to the species on ECOS}
#' }
#' @source \link{http://ecos.fws.gov/tess_public}
"TECP_table"
|
1c80f6c27ae38ba5a1841035090506cfe8d94d79
|
e508870d7b82ca065aff9b7bf33bc34d5a6c0c1c
|
/pkg/man/GeneralInFlux.Rd
|
32309bda31ea4ccdcf47a945a6ce571f024ca33d
|
[] |
no_license
|
Dong-po/SoilR-exp
|
596be0e6c5d291f00c6e08c348952ee23803e15e
|
c10d34e035deac8af4912c55382012dfc247eedc
|
refs/heads/master
| 2021-09-03T11:12:49.199268
| 2018-01-08T15:52:17
| 2018-01-08T15:52:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 452
|
rd
|
GeneralInFlux.Rd
|
\description{no Description}
\name{GeneralInFlux}
\alias{GeneralInFlux}
\usage{GeneralInFlux(object)}
\title{GeneralInFlux S4 generic}
\arguments{
\item{object}{see the method arguments for details}
}
\section{Methods}{ \code{\link{GeneralInFlux,function-method}}\cr
\code{\link{GeneralInFlux,InFlux-method}}\cr
\code{\link{GeneralInFlux,list-method}}\cr
\code{\link{GeneralInFlux,numeric-method}}\cr
\code{\link{GeneralInFlux,TimeMap-method}}\cr}
|
56c37e981c7bb4d50134ccb8cd1b49e0064ae741
|
f902e30a623b9587cf693a2daf5da83c2bf24204
|
/man/remove_before.Rd
|
586d54705d7e3a8c65894e2354a6a04546f080a3
|
[] |
no_license
|
ormoses/TVscriptsR
|
e9ba1c378617d50d92b7f8e0bef41cc309055df9
|
075dd38fd28b2d4db40287024b5c1c6c0e4e8196
|
refs/heads/master
| 2021-01-10T22:37:10.328132
| 2016-10-13T14:16:54
| 2016-10-13T14:16:54
| 70,378,942
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 600
|
rd
|
remove_before.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/string_helper_functions.R
\name{remove_before}
\alias{remove_before}
\title{Remove everything before a pattern}
\usage{
remove_before(string, pattern, occur = "first")
}
\arguments{
\item{string}{a string to remove pattern from}
\item{pattern}{a regular expression that indicates the pattern to remove}
\item{occur}{string indicates weather to remove from/till "first" or "last" occurance}
}
\value{
a string after removing everything before the pattern
}
\description{
Remove everything before a pattern in a string
}
|
ef91f25034491b1f77333053d4a8ba6d1010bdb9
|
54a2150db5408c04b39788a23a79843aaa95dbec
|
/plot2.R
|
0c863e4ef97b19e113fe4f0a0d1e66b4d6a301d5
|
[] |
no_license
|
kennybob/ExData_Plotting1
|
cd697fe67ed0af46d2ba123649ef3915d98e11eb
|
b535e9d36edc155605d2b86adf0ac92be22c30bc
|
refs/heads/master
| 2021-01-17T18:16:35.522682
| 2014-11-08T16:22:40
| 2014-11-08T16:22:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,114
|
r
|
plot2.R
|
## Exploratory_data_analysis_project_week1
## plot2.R
## Usage: source("https://raw.githubusercontent.com/kennybob/ExData_Plotting1/master/plot2.R")
## Local variables
FetchDataAgain <- TRUE
## FUNCTION: getData
## This function downloads the dataset from the web
## ... inserts data into a data frame (hpc)
## ... filters data for dates 1/2/2007 and 2/2/2007 results stored in xhpc
## ... reformats the Date and Time columns
##
## NOTE: If we need to re-download the file from the web
## ......set the input parameter to TRUE
getData <- function() {
fileUrl <-"http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
if (!file.exists("wk1.zip")) {
download.file(fileUrl, destfile = "wk1.zip", mode="wb")
unzip("wk1.zip")
}
## Read in Household Power Consumption data (hpc)
print("Reading data into data frame. Please wait...")
flush.console()
##warning("Reading data into data frame...")
hpc <- read.table("household_power_consumption.txt"
, sep=";"
, header = TRUE
, colClasses =
c( rep("character",2) ## Read first 2 columns as character
, rep("numeric",7) ## Read remaining 7 as numeric
)
,na.strings = "?"
)
print("Reading data complete")
flush.console()
## Filter data by date column
xhpc <- hpc[hpc$Date %in% c("1/2/2007", "2/2/2007"),]
## Convert "Time" column from character to posixt
## Note: The time comprises of Date and Time character columns
xhpc$Time <- strptime(paste(xhpc$Date, xhpc$Time), format='%d/%m/%Y%H:%M:%S')
## Convert the "Date" column using as.Date
xhpc$Date <- as.Date(xhpc$Date, format="%d/%m/%Y")
##return the dataset to the caller
xhpc
}
## Main Processing
## Fetch the data##
if (FetchDataAgain == TRUE) {
xhpc <- getData()
}
## Initialise plot2.png
png(filename = "plot2.png"
, width = 480, height = 480, units = "px", bg = "white")
## Plot using lines
plot(xhpc$Time, xhpc$Global_active_power
, type = "l"
, xlab = ""
, ylab = "Global Active Power (kilowatts)"
)
## Complete writing to PNG device
dev.off()
print("done")
|
a85ea3c170a255ad03e66a8404839e713ced3d9d
|
5deadc6dcd320ead5503ae79d4492451aa5eef68
|
/tests/testthat/test-fitPM.R
|
0f6f208dec02739f55a071b751d4285d73133e71
|
[] |
no_license
|
GeoBosh/pcts
|
1a739a65efd7486c6f4a5e8702b206825bad27b2
|
8d6d83677e1f1d447680d562991a97eb288529fe
|
refs/heads/master
| 2022-05-28T07:33:58.784092
| 2022-05-18T10:02:16
| 2022-05-18T10:02:16
| 190,918,136
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,615
|
r
|
test-fitPM.R
|
context("fitPM")
test_that("test fitPM()",
{
set.seed(1234)
x <- arima.sim(list(ar = 0.9), n = 1000)
mx <- matrix(x, nrow = 4)
x_pcts <- pcts(as.numeric(x), nseasons = 4)
expect_error(fitPM(c(1.5, 2, 3, 1), x), "The PAR orders must be non-negative integer numbers")
expect_error(fitPM("dummy", x), "unable to find an inherited method for function 'fitPM'")
expect_error(fitPM(c(3,2,2,2), mx), "multivariate PAR fitting not implemented yet")
proba1 <- fitPM(c(3, 2, 2, 2), as.numeric(mx))
expect_equal_to_reference(proba1, "proba1.RDS")
expect_output(show(proba1))
expect_output(summary(proba1))
expect_error(fitPM(2, mx),
## "unable to find an inherited method for function [.]nSeasons[.] for signature [.]\"matrix\"[.]"
"unable to find an inherited method for function"
)
expect_identical(fitPM(2, x_pcts), fitPM(c(2, 2, 2, 2), as.numeric(mx)))
eps.proba1 <- residuals(proba1)
expect_identical(eps.proba1, pcts:::.whiten(proba1))
eps.proba1[1:2] <- NA
dim(eps.proba1) <- dim(mx)
pc.cconesidedsum(mx, eps.proba1, maxlag = 4)
## estimate h_{t,i}, see Boshnakov (1996)
pc.hat.h(mx, eps.proba1, maxlag = 4)
## replacing with equivalent code using the new Fraser2017
## data(Fraser, package = "pear")
Fraser <- window(Fraser2017, start = c(1912, 3), end = c(1990, 12))
logFraser <- log(Fraser)
## TODO: for now I need whole years;
## !!! However note the typo 'logfraser', the following use 'logFraser'!
logfraser <- ts(logFraser[1:936], frequency = 12)
#### co1_pear <- pear::pear(logFraser, 1)[["phi"]]
# fitPM(as.numeric(logFraser), order = rep(1, 12), period = 12, seasonof1st = 3)
az1 <- fitPM(model = rep(1, 12), as.numeric(logFraser), seasonof1st = 3)
az2 <- fitPM(model = rep(1, 12), as.numeric(logFraser))
#### expect_true(all.equal(as.vector(az1@ar@coef[ , 1]), as.vector(co1_pear[ , 1])))
#### expect_true(all.equal(as.vector(az2@ar@coef[ , 1]), as.vector(co1_pear[ , 1])[c(3:12, 1:2)]))
## pcfr2 <- pcts(dataFranses1996[ , 2 ])
pcfr23 <- pcts(dataFranses1996[ , 2:3])
expect_error(fitPM(model = rep(1, 4), pcfr23), "Multivariate case not implemented yet")
## fitPM(model = rep(1, 4), pcfr23[1]) # tests the method for PeriodicMTS ([] keep MTS class)
## fitPM(model = rep(1, 4), pcfr23[[1]]) # tests the method for PeriodicTS ('[[' drops the 'M')
expect_identical(fitPM(model = rep(1, 4), pcfr23[1]),
fitPM(model = rep(1, 4), pcfr23[[1]]))
x <- arima.sim(list(ar = 0.9), n = 960)
pcx <- pcts(x, nseasons = 4)
mx <- matrix(x, nrow = 4)
pfm1 <- PeriodicArModel(matrix(1:12, nrow = 4), order = rep(3,4), sigma2 = 1)
sipfm1 <- new("SiPeriodicArModel", iorder = 1, siorder = 1, pcmodel = pfm1)
fitPM(sipfm1, mx)
expect_output(show(sipfm1))
d4piar2 <- rbind(c(1,0.5,-0.06), c(1, 0.6, -0.08), c(1, 0.7, -0.1), c(1, 0.2, 0.15))
picoef1 <- c(0.8, 1.25, 2, 0.5)
parcoef1 <- d4piar2[, 2:3]
coef1 <- pi1ar2par(picoef1, parcoef1)
tmpval <- PeriodicArModel(parcoef1)
##pipfm <- PiParModel(piorder = 1, picoef = picoef1, par = tmpval)
pipfm <- new("PiPeriodicArModel", piorder = 1,
picoef = matrix(picoef1, ncol = 1), pcmodel = tmpval)
expect_output(show(pipfm))
perunit <- sim_pc(list(phi = coef1, p = 3, q = 0, period = 4),500)
fitPM(pipfm, perunit)
## temporary
proba1x <- new("FittedPeriodicArmaModel", as(proba1, "PeriodicArmaModel"),
theTS = proba1@theTS, ns = proba1@ns, asyCov = proba1@asyCov)
expect_identical(residuals(proba1x), residuals(proba1))
expect_error(as_pcarma_list(1:10),
"unable to find an inherited method for function 'as_pcarma_list'")
expect_output(show(proba1x))
fitted(proba1x)
predict(proba1x, 1)
predict(proba1x, 8)
n <- 100
x <- arima.sim(list(ar=0.9), n = n)
proba1 <- fitPM(c(3,2,2,2), x)
meancovmat(proba1, n/10)
meancovmat(proba1, n/10, cor = TRUE)
meancovmat(proba1, n/10, result = "")
meancovmat(proba1, n/10, cor = TRUE, result = "")
meanvarcheck(proba1, 100)
})
test_that("test mC.ss() works",
{
pcts_exdata()
## examples from mC.ss.Rd
# test0 roots
spec.coz2 <- mcompanion::mcSpec(dim = 5, mo = 4, root1 = c(1,1), order = rep(2,4))
spec.coz2
xxcoz2a <- mC.ss(spec.coz2)
## test0 roots
spec.coz4 <- mcompanion::mcSpec(dim = 5, mo = 4, root1 = c(1,1), order = rep(3,4))
xxcoz4a <- mC.ss(spec.coz4)
## excerpt from
## ~/Documents/Rwork/pctsExperiments/Rsessions/combined upto 2013-12-31 17h36m.Rhistory
spec.co2 <- mcompanion::mcSpec(dim = 5, mo = 4, siorder = 1)
tmp2 <- mC.ss(spec.co2)
## only two iters for testthat
expect_output(mc.res1ssenv2b <- tmp2$env$minimBB(nsaauto, control=list(maxit=2)))
expect_output(tmp2$env$minimBB(nsaauto, control=list(maxit=2)))
expect_output(tmp2$env$minimBBlu(nsaauto, control=list(maxit=2)))
expect_output(tmp2$env$minimBB(nsaauto, control=list(maxit=2), CONDLIK = FALSE))
tmp2$env$minim(nsaauto, control=list(maxit=2))
tmp2$env$minim(nsaauto, control=list(maxit=2), CONDLIK = FALSE)
expect_output(tmp2$env$minimBB(nsaauto, control=list(maxit=2), CONDLIK = FALSE))
mC.ss(spec.co2, generators = TRUE)
tmp2$env$mcparam2optparam()
tmp2$env$mcsigma2(nsaauto)
tmp2$env$mcsigma2(nsaauto, tmp2$env$mcparam2optparam())
mC.ss(spec.co2, init = tmp2$env$mcparam2optparam())
## this chunk was commented out in mC.ss.Rd, old testing with it.
## > xxco.1 <- mC.ss(m1.new, generators = TRUE)
##
## > datansa <- read.csv("nsadata.csv")
## > nsaauto <- ts(datansa$AUTOMOTIVEPRODNSA[113:328], start=c(1947, 1), frequency=4)
##
## > res.xxco.1 <- xxco.1$env$minimBB(nsaauto, control=list(maxit=1000))
##
## condlik is: 32.85753 persd is: 16.96771 10.40725 3.567698 7.426556
## iter: 0 f-value: 32.85753 pgrad: 14.83674
## iter: 10 f-value: 30.21297 pgrad: 0.0007615952
## Successful convergence.
##
## > res.xxco.1$value
## [1] 30.21297
## > res.xxco.1$par
## co.r1 co.r2 co.r3 co.r4
## -0.4069477 -0.5093360 -0.6026860 -0.5174826
## > res.xxco.1
## $par
## co.r1 co.r2 co.r3 co.r4
## -0.4069477 -0.5093360 -0.6026860 -0.5174826
##
## $value
## [1] 30.21297
##
## $gradient
## [1] 9.023893e-06
##
## $fn.reduction
## [1] 2.644559
##
## $iter
## [1] 14
##
## $feval
## [1] 16
##
## $convergence
## [1] 0
##
## $message
## [1] "Successful convergence"
##
## $cpar
## method M
## 2 50
##
## > with(xxco.1$env, model)
## $period
## [1] 4
##
## $p
## [1] 5
##
## $q
## [1] 0
##
## $phi
## [,1] [,2] [,3] [,4] [,5]
## [1,] 1.1646497 -1.165471e-16 -4.254923e-17 1 -1.1646497
## [2,] 0.8451102 -2.220446e-16 -5.456035e-17 1 -0.8451102
## [3,] 0.7989768 0.000000e+00 2.220446e-16 1 -0.7989768
## [4,] 1.2716195 -1.110223e-16 -6.058867e-17 1 -1.2716195
##
## > with(xxco.1$env, zapsmall(model$phi))
## [,1] [,2] [,3] [,4] [,5]
## [1,] 1.1646497 0 0 1 -1.1646497
## [2,] 0.8451102 0 0 1 -0.8451102
## [3,] 0.7989768 0 0 1 -0.7989768
## [4,] 1.2716195 0 0 1 -1.2716195
set.seed(1234)
## This prints something like:
## condlik is: 18.09375 persd is: 1.651785 1.714789 3.041003 1.577415
fitPM(spec.coz4, rnorm(100), control = list(maxit = 1))
})
|
62dae08bfac5533c05dedaeeda70eca3dd43b94f
|
d481473c7bf59ef07fb2f0f7f6353e0beff5fa48
|
/data-raw/ph.R
|
29d4965fbe00b1db22edc7ceb95177973e420efd
|
[] |
no_license
|
crumplecup/muddier
|
92e1d4845db3d13e1297060d50d0244d5b00064f
|
c4d67a17377e45a35426cbb11ace342afaed6806
|
refs/heads/master
| 2021-11-28T03:41:33.262356
| 2021-08-13T03:11:39
| 2021-08-13T03:11:39
| 175,301,894
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 624
|
r
|
ph.R
|
library(data.table)
library(magrittr)
setwd('/home/crumplecup/work/')
ph_cdfs <- fread('inher_ph_cdfs.csv')
df_ph_cdf <- ph_cdfs[1,]
ff_wbl_cdf <- ph_cdfs[2,]
fg_wbl_cdf <- ph_cdfs[3,]
df_ph_pmf <- to_pmf(unlist(df_ph_cdf))
ff_wbl_pmf <- to_pmf(unlist(ff_wbl_cdf))
fg_wbl_pmf <- to_pmf(unlist(fg_wbl_cdf))
setwd('/home/crumplecup/work/muddier/')
usethis::use_data(df_ph_cdf, overwrite = T)
usethis::use_data(ff_wbl_cdf, overwrite = T)
usethis::use_data(fg_wbl_cdf, overwrite = T)
usethis::use_data(df_ph_pmf, overwrite = T)
usethis::use_data(ff_wbl_pmf, overwrite = T)
usethis::use_data(fg_wbl_pmf, overwrite = T)
|
b209c1386759c791e0ed646d1cf44b765953fdee
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/surveillance/inst/doc/glrnb.R
|
679f9c25276e55a3ba21001772643bf98b205900
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,764
|
r
|
glrnb.R
|
### R code from vignette source 'glrnb.Rnw'
### Encoding: UTF-8
###################################################
### code chunk number 1: setup
###################################################
library("surveillance")
options(SweaveHooks=list(fig=function() par(mar=c(4,4,2,0)+.5)))
options(width=70)
set.seed(247)
## create directory for plots
dir.create("plots", showWarnings=FALSE)
###################################################
### code chunk number 2: glrnb.Rnw:93-95
###################################################
getOption("SweaveHooks")[["fig"]]()
data(shadar)
plot(shadar,main="Number of salmonella hadar cases in Germany 2001-2006")
###################################################
### code chunk number 3: glrnb.Rnw:102-104
###################################################
# Simulate data
simData <- sim.pointSource(length=300,K=0.5,r=0.6,p=0.95)
###################################################
### code chunk number 4: glrnb.Rnw:107-108
###################################################
getOption("SweaveHooks")[["fig"]]()
plot(simData)
###################################################
### code chunk number 5: glrnb.Rnw:141-143
###################################################
getOption("SweaveHooks")[["fig"]]()
survObj <- algo.glrnb(shadar,control=list(range=105:295,alpha=0))
plot(survObj,startyear=2003)
###################################################
### code chunk number 6: glrnb.Rnw:162-165 (eval = FALSE)
###################################################
## control=list(range=range,c.ARL=5,
## mu0=NULL, alpha=0, Mtilde=1, M=-1, change="intercept",theta=NULL,
## dir=c("inc","dec"),ret=c("cases","value"))
###################################################
### code chunk number 7: glrnb.Rnw:174-176 (eval = FALSE)
###################################################
## control=list(range=105:length(shadar$observed))
## algo.glrnb(disProgObj=shadar,control=control)
###################################################
### code chunk number 8: glrnb.Rnw:182-184 (eval = FALSE)
###################################################
## control=list(range=105:295,alpha=3)
## algo.glrnb(disProgObj=shadar,control=control)
###################################################
### code chunk number 9: glrnb.Rnw:192-195
###################################################
control=list(range=105:295,alpha=NULL)
surv <- algo.glrnb(shadar,control=control)
surv$control$alpha
###################################################
### code chunk number 10: glrnb.Rnw:206-208 (eval = FALSE)
###################################################
## control=list(range=105:295,mu0=list(S=2,trend=FALSE))
## algo.glrnb(disProgObj=shadar,control=control)
###################################################
### code chunk number 11: glrnb.Rnw:211-213
###################################################
control=list(range=105:295,mu0=list(S=2,trend=F,refit=T))
surv <- algo.glrnb(disProgObj=shadar,control=control)
###################################################
### code chunk number 12: glrnb.Rnw:218-220
###################################################
getOption("SweaveHooks")[["fig"]]()
plot(shadar)
with(surv$control,lines(mu0~range,lty=2,lwd=4,col=4))
###################################################
### code chunk number 13: glrnb.Rnw:226-227 (eval = FALSE)
###################################################
## surv$control$mu0Model
###################################################
### code chunk number 14: glrnb.Rnw:234-235
###################################################
estimateGLRNbHook
###################################################
### code chunk number 15: glrnb.Rnw:275-276
###################################################
coef(surv$control$mu0Model$fitted[[1]])
###################################################
### code chunk number 16: glrnb.Rnw:284-287
###################################################
control=list(range=105:295,alpha=0)
surv <- algo.glrnb(disProgObj=shadar,control=control)
table(surv$alarm)
###################################################
### code chunk number 17: glrnb.Rnw:292-296
###################################################
num <- rep(NA)
for (i in 1:6){
num[i] <- table(algo.glrnb(disProgObj=shadar,control=c(control,c.ARL=i))$alarm)[2]
}
###################################################
### code chunk number 18: glrnb.Rnw:320-322 (eval = FALSE)
###################################################
## control=list(range=105:295,theta=0.4)
## algo.glrnb(disProgObj=shadar,control=control)
###################################################
### code chunk number 19: glrnb.Rnw:327-329 (eval = FALSE)
###################################################
## control=list(range=105:295,theta=NULL)
## algo.glrnb(disProgObj=shadar,control=control)
###################################################
### code chunk number 20: glrnb.Rnw:337-339
###################################################
control=list(range=105:295,ret="cases",alpha=0)
surv2 <- algo.glrnb(disProgObj=shadar,control=control)
###################################################
### code chunk number 21: glrnb.Rnw:342-343
###################################################
getOption("SweaveHooks")[["fig"]]()
plot(surv2,startyear=2003)
###################################################
### code chunk number 22: glrnb.Rnw:353-355
###################################################
control=list(range=105:295,ret="cases",dir="dec",alpha=0)
surv3 <- algo.glrnb(disProgObj=shadar,control=control)
###################################################
### code chunk number 23: glrnb.Rnw:358-359
###################################################
getOption("SweaveHooks")[["fig"]]()
plot(surv3,startyear=2003)
|
f5c7d3810b2b1355a66bc5f3d4c3e05653435486
|
31d2d467030565c44f4d28d42c0e4d225dececaa
|
/R/start.val.ltm.R
|
525bb178f110dca5930c6a7a5f4f7e154da7e12c
|
[] |
no_license
|
cran/ltm
|
84fd858915db9fe1506a40628f61e6500a21ed1c
|
dbbabfa99fa09ad94113856a6a5ae1535e7b817f
|
refs/heads/master
| 2022-02-25T01:10:01.747125
| 2022-02-18T08:40:02
| 2022-02-18T08:40:02
| 17,697,218
| 2
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,441
|
r
|
start.val.ltm.R
|
start.val.ltm <-
function (start.val, data, factors, formula) {
n <- nrow(data)
p <- ncol(data)
cf <- paste(formula[3])
form <- paste("y ~ ", cf)
form <- as.formula(form)
q. <- length(attr(terms(form), "term.labels")) + 1
randStrVal <- length(start.val) == 1 && start.val == "random"
cmptStrVal <- is.null(start.val) || randStrVal || length(start.val) != p * q.
if (cmptStrVal) {
if (randStrVal) {
Z <- data.frame(z1 = rnorm(n))
if (factors > 1)
Z$z2 <- rnorm(n)
} else {
rs <- as.vector(rowSums(data, na.rm = TRUE))
len.uni <- length(unique(rs))
rs <- factor(rs, labels = 1:len.uni)
rs <- as.numeric(levels(rs))[as.integer(rs)]
Z <- data.frame(z1 = seq(-3, 3, len = len.uni)[rs])
if (factors > 1)
Z$z2 <- seq(3, -3, len = n)
}
old <- options(warn = (2))
on.exit(options(old))
coefs <- matrix(0, p, q.)
for (i in 1:p) {
Z$y <- data[, i]
fm <- try(glm(form, family = binomial(), data = Z), silent = TRUE)
coefs[i, ] <- if (!inherits(fm, "try-error")) {
fm$coefficients
} else {
c(0, rep(1, q. - 1))
}
}
dimnames(coefs) <- NULL
coefs
} else
start.val
}
|
56a3c2c207f38c2299c54e0ea0c45becceb1d1af
|
08c48f2627281810fe2a4a37bb1e9bc5c03eeb68
|
/Huan_link_all_script/All_result_ICGC/gene_network_merge_repurposing_model/Gene-based_data/side_effect_repo_data/test_repo_cancer_model.R
|
8ea1c4b6361cbc2adcbbbb70d5f9fb6d3d81266e
|
[] |
no_license
|
Lhhuan/drug_repurposing
|
48e7ee9a10ef6735ffcdda88b0f2d73d54f3b36c
|
4dd42b35e47976cf1e82ba308b8c89fe78f2699f
|
refs/heads/master
| 2020-04-08T11:00:30.392445
| 2019-08-07T08:58:25
| 2019-08-07T08:58:25
| 159,290,095
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,518
|
r
|
test_repo_cancer_model.R
|
library(ggplot2)
library(Rcpp)
library(readxl)
library(dplyr)
setwd("/f/mulinlab/huan/All_result_ICGC/merge_SV_CNV_repurposing_model_both/Gene-based_drug_R_or_S_optimization/side_effect_repo_data/")
#1. 测试集和训练集3、7分组
#---------------------------------------------------------------------------------------------------------
org<-read.table("11_drug_primary_calculate_for_gene_based_repo_logistic_regression_data.txt",header = T,sep = "\t") %>% as.data.frame()
#org1<-org %>% dplyr::select(Drug_claim_primary_name,oncotree_main_ID,average_drug_score,averge_gene_mutation_frequency,average_gene_CADD_score,average_mutation_map_to_gene_level_score,drug_repurposing)
org1<-org %>% dplyr::select(average_drug_score,averge_gene_mutation_frequency,average_gene_CADD_score,average_mutation_map_to_gene_level_score,
averge_gene_num_in_del_hotspot,averge_gene_num_in_dup_hotspot,averge_gene_num_in_cnv_hotspot,averge_gene_num_in_inv_hotspot,
averge_gene_num_in_tra_hotspot,repo_info)
N = length(org1$repo_info)
#ind=1的是0.7概率出现的行,ind=2是0.3概率出现的行
ind=sample(2,N,replace=TRUE,prob=c(0.7,0.3))
#生成训练集(这里训练集和测试集随机设置为原数据集的70%,30%)
aus_train <- org1[ind==1,]
#生成测试集
aus_test <- org1[ind==2,]
#----------------------------------------------------------------------------------------------------
#生成logis模型,用glm函数
#用训练集数据生成logis模型,用glm函数
#family:每一种响应分布(指数分布族)允许各种关联函数将均值和线性预测器关联起来。常用的family:binomal(link='logit')--响应变量服从二项分布,连接函数为logit,即logistic回归
#----------------------------------------------------------------------
#测试集的真实值
pre <- glm(repo_info ~.,family=binomial(link = "logit"),data = aus_train)
summary(pre)
#测试集的真实值
real <- aus_test$repo_info
#predict函数可以获得模型的预测值。这里预测所需的模型对象为pre,预测对象newdata为测试集,预测所需类型type选择response,对响应变量的区间进行调整
predict. <- predict.glm(pre,type='response',newdata=aus_test)
#按照预测值为1的概率,>0.5的返回1,其余返回0
predict =ifelse(predict.>0.5,1,0)
#数据中加入预测值一列
aus_test$predict = predict
#------------------------------------------------
#3.模型检验
##模型检验
res <- data.frame(real,predict)
#训练数据的行数,也就是样本数量
n = nrow(aus_train)
#计算Cox-Snell拟合优度
R2 <- 1-exp((pre$deviance-pre$null.deviance)/n)
cat("Cox-Snell R2=",R2,"\n")
#-------------------------------------------------------
#4.准确率和精度
true_value=aus_test[,10]
predict_value=aus_test[,11]
#计算模型精确度
error = predict_value-true_value
accuracy = (nrow(aus_test)-sum(abs(error)))/nrow(aus_test) #精确度--判断正确的数量占总数的比例
#计算Precision,Recall和F-measure
#一般来说,Precision就是检索出来的条目(比如:文档、网页等)有多少是准确的,Recall就是所有准确的条目有多少被检索出来了
#和混淆矩阵结合,Precision计算的是所有被检索到的item(TP+FP)中,"应该被检索到的item(TP)”占的比例;Recall计算的是所有检索到的item(TP)占所有"应该被检索到的item(TP+FN)"的比例。
precision=sum(true_value & predict_value)/sum(predict_value) #真实值预测值全为1 / 预测值全为1 --- 提取出的正确信息条数/提取出的信息条数
recall=sum(predict_value & true_value)/sum(true_value) #真实值预测值全为1 / 真实值全为1 --- 提取出的正确信息条数 /样本中的信息条数
#P和R指标有时候会出现的矛盾的情况,这样就需要综合考虑他们,最常见的方法就是F-Measure(又称为F-Score)
F_measure=2*precision*recall/(precision+recall) #F-Measure是Precision和Recall加权调和平均,是一个综合评价指标
#输出以上各结果
print(accuracy)
print(precision)
print(recall)
print(F_measure)
#混淆矩阵,显示结果依次为TP、FN、FP、TN
table(true_value,predict_value)
#------------------------------------------------------
#5.ROC曲线的几个方法
#------------------------------
library(ROCR)
pred <- prediction(predict.,true_value) #预测值(0.5二分类之前的预测值)和真实值
performance(pred,'auc')@y.values #AUC值
perf <- performance(pred,'tpr','fpr')
plot(perf)
#方法2
#install.packages("pROC")
library(pROC)
modelroc <- roc(true_value,predict.)
plot(modelroc, print.auc=TRUE, auc.polygon=TRUE,legacy.axes=TRUE, grid=c(0.1, 0.2),
grid.col=c("green", "red"), max.auc.polygon=TRUE,
auc.polygon.col="skyblue", print.thres=TRUE) #画出ROC曲线,标出坐标,并标出AUC的值
#方法3,按ROC定义
TPR=rep(0,1000)
FPR=rep(0,1000)
p=predict.
for(i in 1:1000)
{
p0=i/1000;
ypred<-1*(p>p0)
TPR[i]=sum(ypred*true_value)/sum(true_value)
FPR[i]=sum(ypred*(1-true_value))/sum(1-true_value)
}
plot(FPR,TPR,type="l",col=2)
points(c(0,1),c(0,1),type="l",lty=2)
#---------------------
#6.更换测试集和训练集的选取方式,采用五折交叉验证
#---------------------
org<-read.table("11_drug_primary_calculate_for_gene_based_repo_logistic_regression_data.txt",header = T,sep = "\t") %>% as.data.frame()
#org1<-org %>% dplyr::select(Drug_claim_primary_name,oncotree_main_ID,average_drug_score,averge_gene_mutation_frequency,average_gene_CADD_score,average_mutation_map_to_gene_level_score,drug_repurposing)
org1<-org %>% dplyr::select(average_drug_score,averge_gene_mutation_frequency,average_gene_CADD_score,average_mutation_map_to_gene_level_score,
averge_gene_num_in_del_hotspot,averge_gene_num_in_dup_hotspot,averge_gene_num_in_cnv_hotspot,averge_gene_num_in_inv_hotspot,
averge_gene_num_in_tra_hotspot,repo_info)
#将org1数据分成随机十等分
#install.packages("caret")
#固定folds函数的分组
set.seed(7)
require(caret)
folds <- createFolds(y=org1$repo_info,k=5)
#构建for循环,得5次交叉验证的测试集精确度、训练集精确度
max=0
num=0
rs <- data.frame()#构造一个表
for(i in 1:5){
fold_test <- org1[folds[[i]],] #取folds[[i]]作为测试集
fold_train <- org1[-folds[[i]],] # 剩下的数据作为训练集
print("***组号***")
fold_pre <- glm(repo_info ~.,family=binomial(link='logit'),data=fold_train)
fold_predict <- predict(fold_pre,type='response',newdata=fold_test)
fold_predict1 =ifelse(fold_predict>0.5,1,0)
fold_test$predict = fold_predict1
#----------------------------------------------
true_value1 =fold_test[,10]
#--------------------------把所有的被预测的测试数据的结果放到一个表里
tmp<-data.frame(true_value1= fold_test[,10],predict_value1=fold_predict)
rs <- bind_rows(rs,tmp)
#---------------------------
fold_error = fold_test[,11]-fold_test[,10]
fold_accuracy = (nrow(fold_test)-sum(abs(fold_error)))/nrow(fold_test)
print(i)
print("***测试集精确度***")
print(fold_accuracy)
print("***训练集精确度***")
fold_predict2 <- predict(fold_pre,type='response',newdata=fold_train)
fold_predict2 =ifelse(fold_predict2>0.5,1,0)
fold_train$predict = fold_predict2
fold_error2 = fold_train[,11]-fold_train[,10]
fold_accuracy2 = (nrow(fold_train)-sum(abs(fold_error2)))/nrow(fold_train)
print(fold_accuracy2)
if(fold_accuracy>max)
{
max=fold_accuracy
num=i
}
}
print(max)
print(num)
#----------------------------
library(pROC)
modelroc <- roc(rs$true_value1,rs$predict_value)
plot(modelroc, print.auc=TRUE, auc.polygon=TRUE,legacy.axes=TRUE, grid=c(0.1, 0.2),
grid.col=c("green", "red"), max.auc.polygon=TRUE,
auc.polygon.col="skyblue", print.thres=TRUE) #画出ROC曲线,标出坐标,并标出AUC的值
#-----------------------------
library(ROCR)
pred <- prediction(rs$predict_value,rs$true_value1) #预测值(0.5二分类之前的预测值)和真实值
performance(pred,'auc')@y.values #AUC值
perf <- performance(pred,'tpr','fpr')
plot(perf)
#-----------------------------
#-----------------------------------------------------------------------------------------------
##结果可以看到,精确度accuracy最大的一次为max,取folds[[num]]作为测试集,其余作为训练集。
|
fb6fc76beb20d021d672d919a83e3d1e08bdd77f
|
0684d492915ae9997046ede472e23972d68b0889
|
/task_reference/svn/trunk/octSugar.r
|
26810986c7c9b985394d68ed0695ca8ad0ddeb81
|
[] |
no_license
|
githubfun/R
|
34206d40375b6c9aacb98780240a38904937cd1b
|
48b9c8306968c59b6acdfce5ae4a0910d9d212b6
|
refs/heads/master
| 2018-05-18T22:33:29.876793
| 2011-06-30T16:40:34
| 2011-06-30T16:40:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,401
|
r
|
octSugar.r
|
## getXts:getSection:getManyplots
require(quantmod)
sugar <- as.xts(read.zoo("~/Dropbox/DATA/IDXDATA/$SUGAR.CSV", sep=",", format ="%m/%d/%Y", header=TRUE))
## segment a series of months
sugarOCT1990 <- sugar ["199010"]
sugarOCT1991 <- sugar ["199110"]
sugarOCT1992 <- sugar ["199210"]
sugarOCT1993 <- sugar ["199310"]
sugarOCT1994 <- sugar ["199410"]
sugarOCT1995 <- sugar ["199510"]
sugarOCT1996 <- sugar ["199610"]
sugarOCT1997 <- sugar ["199710"]
sugarOCT1998 <- sugar ["199810"]
sugarOCT1999 <- sugar ["199910"]
sugarOCT2000 <- sugar ["200010"]
sugarOCT2001 <- sugar ["200110"]
sugarOCT2002 <- sugar ["200210"]
sugarOCT2003 <- sugar ["200310"]
sugarOCT2004 <- sugar ["200410"]
sugarOCT2005 <- sugar ["200510"]
sugarOCT2006 <- sugar ["200610"]
sugarOCT2007 <- sugar ["200710"]
sugarOCT2008 <- sugar ["200810"]
sugarOCT2009 <- sugar ["200910"]
sugarOCT2010 <- sugar ["201010"]
## turn on png output device
png("octSugar.png")
## create multipane plot
par(mfrow=c(4,5))
plot (sugarOCT1990)
plot (sugarOCT1991)
plot (sugarOCT1992)
plot (sugarOCT1993)
plot (sugarOCT1994)
plot (sugarOCT1995)
plot (sugarOCT1996)
plot (sugarOCT1997)
plot (sugarOCT1998)
plot (sugarOCT1999)
plot (sugarOCT2000)
plot (sugarOCT2001)
plot (sugarOCT2002)
plot (sugarOCT2003)
plot (sugarOCT2004)
plot (sugarOCT2005)
plot (sugarOCT2006)
plot (sugarOCT2007)
plot (sugarOCT2008)
plot (sugarOCT2009)
|
04e36323489777418376afe832bdd0d2a7f48280
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/AlphaSimR/man/selIndex.Rd
|
9061138e5220d9529e151460ee310254b301ba8f
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,213
|
rd
|
selIndex.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{selIndex}
\alias{selIndex}
\title{Selection index}
\usage{
selIndex(Y, b, scale = FALSE)
}
\arguments{
\item{Y}{a matrix of trait values}
\item{b}{a vector of weights}
\item{scale}{should Y be scaled and centered}
}
\description{
Calculates values of a selection index given trait values and
weights. This function is intended to be used in combination with
selection functions working on populations such as
\code{\link{selectInd}}.
}
\examples{
#Create founder haplotypes
founderPop = quickHaplo(nInd=10, nChr=1, segSites=10)
#Set simulation parameters
SP = SimParam$new(founderPop)
#Model two genetically correlated traits
G = 1.5*diag(2)-0.5 #Genetic correlation matrix
SP$addTraitA(10, mean=c(0,0), var=c(1,1), corA=G)
SP$setVarE(h2=c(0.5,0.5))
#Create population
pop = newPop(founderPop, simParam=SP)
#Calculate Smith-Hazel weights
econWt = c(1, 1)
b = smithHazel(econWt, varG(pop), varP(pop))
#Selection 2 best individuals using Smith-Hazel index
#selIndex is used as a trait
pop2 = selectInd(pop, nInd=2, trait=selIndex,
simParam=SP, b=b)
}
|
6ee9214252ef2854c4e9557af99ad3181a48b5f9
|
9ea8bb629f9f4762ce4737d6e2dbafa9d05b1636
|
/Step0_Splicing_junctions_loading.R
|
41faf4050452115622ec6e7daa9d7173a25e6b06
|
[] |
no_license
|
FlameHuang/ASSC
|
f85943468888a480cbdcae96f60baaa14c63b56f
|
fe9436e559f83ef0c18ab44cf8833c2e4cef2d25
|
refs/heads/main
| 2023-03-23T22:24:39.093756
| 2021-03-25T08:33:43
| 2021-03-25T08:33:43
| 321,023,034
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,215
|
r
|
Step0_Splicing_junctions_loading.R
|
#### AS site read ---------------------------
# personal function
usePackage <- function(p) {
if (!is.element(p, installed.packages()[,1]))
install.packages(p, dependencies = TRUE)
require(p, character.only = TRUE)
}
# function for replace NA with 0 in data.table
DT.replace.NA <- function(x) {
for (j in seq_len(ncol(x))) set(x,which(is.na(x[[j]])),j,0)
}
readSJ <- function(file, input.rowsum.cutoff = NULL){
if(!file.exists(file)) {
stop("Input file does not exist!")
}
# required package ------------------------
usePackage("data.table")
# read files ------------------------------
input <- fread(file)
# replace missing value with 0 ------------
DT.replace.NA(input)
# filter by the rowsum
input.rs <- rowSums(input[, -1])
input.rowsum.cutoff <- ifelse(is.null(input.rowsum.cutoff),
max(unique(quantile(input.rs, probs = seq(0, 1, .05)))[2], 2*(ncol(input)-1)),
input.rowsum.cutoff)
junc <- input[input.rs > input.rowsum.cutoff, ]
setkey(junc, junctions)
print(paste("Before filtering ", nrow(input), " After filtering ", nrow(junc), " at ", print(Sys.time()), collapse = ""))
return(junc)
}
|
f940ba98959bd3f7bb4708730da7914978d1b096
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/extremis/examples/angdensity.Rd.R
|
17300a3a7f24f5352a222c601d324c3042fd2115
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 335
|
r
|
angdensity.Rd.R
|
library(extremis)
### Name: angdensity
### Title: Empirical-Likelihood Based Inference for the Angular Density
### Aliases: angdensity angdensity.default
### ** Examples
## de Carvalho et al (2013, Fig. 7)
data(beatenberg)
attach(beatenberg)
fit <- angdensity(beatenberg, tau = 0.98, nu = 163, raw = FALSE)
plot(fit)
rug(fit$w)
|
fba95784b867b4f1f8b18c104a647136ce01b8fd
|
b3fc678096d3fa31c4289b1e1c414fb0f1b2eeab
|
/man/growth_parameters.Rd
|
7e0e2330b79acb2c951707e8f9bd99b3cd4ea016
|
[] |
no_license
|
EricBryantPhD/screenmill
|
d59402d6fff819b120f2d7b2149cc57eae3e80ec
|
4319a2ff90a2b8dbf2bf9a27bb695b443827ef82
|
refs/heads/master
| 2023-03-05T11:06:16.676866
| 2020-01-11T19:44:06
| 2020-01-11T19:44:06
| 233,282,823
| 0
| 1
| null | 2023-02-26T19:47:15
| 2020-01-11T19:04:06
|
R
|
UTF-8
|
R
| false
| true
| 1,323
|
rd
|
growth_parameters.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/growth-parameters.R
\name{growth_parameters}
\alias{growth_parameters}
\title{Calculate growth parameters}
\usage{
growth_parameters(time, size, ...)
}
\arguments{
\item{time}{Numeric - time of growth.}
\item{size}{Numeric - size of colony.}
\item{...}{Further arguments passed to \code{\link[stats]{smooth.spline}}.}
}
\value{
Returns a data frame with the following parameters derived from the
fit curve:
\tabular{ll}{
\bold{A} \tab Maximum growth. \cr
\bold{A_t} \tab Time at maximum growth. \cr
\bold{mu} \tab Maximum growth rate. \cr
\bold{mu_t} \tab Time at maximum growth rate. \cr
\bold{mu_y} \tab Growth at maximum growth rate. \cr
\bold{lambda} \tab Lag phase (x-intercept of line tangent to max growth
rate). \cr
\bold{b} \tab y-intercept of line tangent to max growth rate. \cr
\bold{integral} \tab Area under growth curve. \cr
\bold{spar} \tab Smoothing parameter used to generate curve - can be
set by passing a \code{spar} argument to
\code{\link[stats]{smooth.spline}}.
}
}
\description{
Calculates colony growth parameters by fitting a smooth growth curve using
\code{\link[stats]{smooth.spline}}.
}
|
000d5b0436c8be436e71f0baa62dcd5589c9bb44
|
b35203fec891bdcd1a25ae2e73a444dbb79e6a44
|
/R/z%dim0%.R
|
c2412ca87cc3831ac5bad4ff38102d59a05e54fc
|
[] |
no_license
|
cran/FactoInvestigate
|
de865c9e6931b3aadc53edf35ae9b971a0748709
|
c0eb72ddaf923d1e75a8350b6de76b218b83ee18
|
refs/heads/master
| 2022-05-23T15:46:43.763570
| 2022-04-26T19:50:14
| 2022-04-26T19:50:14
| 87,923,417
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 66
|
r
|
z%dim0%.R
|
`%dim0%` <-
function(e1, e2) if(length(e1) == 0) {e2} else {e1}
|
f942fd0fbcfe1b81a80758d82000d20aae055541
|
c9a17e301097741ccf4d4cebd8fb4d0e3ebcaf40
|
/shiny.R
|
05ec0839abd7037cab976a5fa579e51c3e405554
|
[] |
no_license
|
ebfreel/potpourri
|
660a3203e42f654d5e2678e8f73acac621e22a5e
|
4813e93e4adf35fd31bc43452f3f100465832f81
|
refs/heads/master
| 2023-08-31T19:44:28.815826
| 2018-04-24T20:49:58
| 2018-04-24T20:49:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,794
|
r
|
shiny.R
|
## 4/3: Collaboration mini project I: shiny
## shiny tutorial adapted from various sites, primarily http://shiny.rstudio.com/tutorial/ and https://www.r-bloggers.com/building-shiny-apps-an-interactive-tutorial/
## Package developed by: Winston Chang, Joe Cheng, JJ Allaire, Yihui Xie and Jonathan McPherson (2017).
## shiny: Web Application Framework for R. R package version 1.0.5. https://CRAN.R-project.org/package=shiny
### PART I ###
### Installation ###
install.packages('shiny')
library(shiny)
# Launch a pre-built example app in R Studio and (if you want) your web browser
runExample('11_timer') # All good? Keep going...
### Create a shiny app template (an empty shiny app), method #1 ###
# DO NOT!! run the following script- instead, copy and paste into a new R script.
# It should be saved as: "appP2.R" or "appP3.R" or "blah.R", whatever.
# It's critical that app.R is saved in its own special folder separate from other .R files
# (unless those other files are used by your app). Don't confuse Shiny!
library(shiny)
ui <- fluidPage()
server <- function(input, output, session) {}
shinyApp(ui = ui, server = server)
# After saving this ^, R Studio will recognize that this is a Shiny app.
# Green triangle + Run will appear on the righthand side of your app.R script in R Studio. Click it.
# Not much happens, as it's an empty app, but you'll see something like "Listening on...".
# Click the red button or Escape to exit.
# You can also run the app with runApp("appP3.R").
### Create a shiny app template, method #2 ###
# You can also create a new Shiny app using R Studio's menu.
# File > New Project > New Directory > Shiny Web Application
# RStudio will create a new folder and initialize a simple Shiny app there.
# There won't be an "app.R" file, but you will see ui.R (all code assigned to the ui object, as in the script above, goes here) and server.R (same, pertaining to the server object).
# I prefer the single-file method #1, but you do you.
# From Shiny's developer site, here is a list of the types of built-in examples:
runExample("01_hello") # a histogram
runExample("02_text") # tables and data frames
runExample("03_reactivity") # a reactive expression
runExample("04_mpg") # global variables
runExample("05_sliders") # slider bars
runExample("06_tabsets") # tabbed panels
runExample("07_widgets") # help text and submit buttons
runExample("08_html") # Shiny app built from HTML
runExample("09_upload") # file upload wizard
runExample("10_download") # file download wizard
runExample("11_timer") # an automated timer
### PART II ###
# We will also be using these libraries:
library(dplyr)
library(ggplot2)
bcl <- read.csv('bclData.csv') # Briefly explore the data in your R Studio console.
# Don't forget to set your wd so R can find this file.
## Read in the data
# In 'appP2.R' (or whatever you named it), type the following code, do NOT just run it here- won't work:
# Place this line in your app as the second line, just after calling the library.
bcl <- read.csv('bclData.csv', stringsAsFactors=F) # Hint: Style rule #5
## Ensure the app can read your data
# Replace the entire server object assignment line with the following:
server = function(input, output, session) { # Hint: Style rule #8
print(str(bcl))
}
## Add elements to the UI
# Here, we'll render the text by placing some strings inside fluidPage().
fluidPage("BC Liquor Store", "prices")
# Save and run the app. Looking good? Add a few more strings (aka, column headers from the worksheet).
# Run the app again.
## Format the text in the UI
fluidPage(
h1("BC liquor product app"), "BC", "Liquor", br(), "Store", strong("prices") # Hint: Style rule #4
)
?builder # Experiment with some of the tags listed here (i.e., replace h1 with h4, etc.) and run the app.
# Overwrite your fluidPage() text with the following:
fluidPage(
titlePanel("BC Liquor Store prices")
)
## Add some structure
# At this point, you may notice the app window looks a little messy. sidebarLayout() will help.
# Inputs will be on the left, results will be in the main panel.
# Paste this inside fluidPage(), after titlePanel(). Don't forget a , after titlePanel().
sidebarLayout(
sidebarPanel("our inputs will go here"),
mainPanel("the results will go here")
)
# You can also control the minutiae of the display in fluidPage().
?column
# Replace fluidPage() with the following:
fluidRow(
titlePanel("BC Liquor Store prices"),
column(width = 4,
column(width = 3, offset = 2,
"Another formatting example"
)
)
)
# Add some UI into sidebarPanel and mainPanel, run the app, notice the changes.
# All UI functions are HTML wrappers in diguise. Shiny lets you run these without prior knowledge of HTML.
# In your R console, run the following:
ui <- fluidPage(
titlePanel("BC Liquor Store prices"),
sidebarLayout(
sidebarPanel("our inputs will go here"),
mainPanel("the output will go here")
)
)
print (ui) # Hint: Style rule #5
# See how much uglier HTML is compared to R?
# Go ahead and paste this ^ for ui, overwriting the previous code.
## Add a numbers widget
?sliderInput
# Replace your sidebarPanel() with the following code. Keep mainPanel() as-is.
# These values are from the price data in the .csv.
sidebarPanel(sliderInput("priceInput", "Price",
min = 0, max = 100;value = c(25, 40), pre = "$"), # Hint: Style rule #9
# Hold off on running the app as we continue to add stuff here- it won't work yet.
## Add buttons
?radioButtons
# Add this input code inside sidebarPanel(), after the previous input:
radioButtons("typeInput", "Product type",
choices = c("BEER", "REFRESHMENT", "SPIRITS", "WINE"),
selected = "WINE"),
# Now we want to choose the data we display in the app.
# After the radioButtions() code, paste:
uiOutput("countryOutput")), # This helps set up for reactive programming. See ?uiOutput
## Output a plot and a table summary of results
# Replace mainPanel with the following text:
mainPanel(
plotOutput("cool_plot"), # Hint: Style rule #2
br(), br(), # Coding in some breaks
tableOutput("results")
)
)
)
# ^ What did we do here? We want to show results in a plot and a table.
# Now that we've coded two output types, we need to tell Shiny what kind of graphics to display.
?renderPlot()
## Reactivity in Shiny
# Create a list of selected inputs
# Replace your server assignment line with this:
server <- function(input, output, session) {
output$countryOutput <- renderUI({
selectInput("countryInput", "Country",
sort(unique(bcl$Country)), selected = "CANADA")
})
# Why did we do ^this?
# We have to 1) assign our output object to a list (output$countryOutput) and
# 2) build the object with render* (<- a render function).
# You don't have to deeply understand these rules, but Shiny requires you to follow this protocol.
# Using library(dplyr) to filter the data based on price, type, country (our inputs)
filtered <- reactive({
if (is.null(input$countryInput)) {
return(NULL) # If country input doesn't = CANADA, don't include
}
bcl %>%
filter(Price >= input$priceInput[1], # our minimum value
Price <= input$priceInput[2], # our maximum value
Type == input$typeInput,
Country == input$countryInput
)
})
# Time to add in ggplot():
output$coolplot <- renderPlot({
if (is.null(filtered())) {
return() # If our filtered list has nulls, don't include those
}
ggplot(filtered(), aes(Alcohol_Content)) +
geom_histogram()
})
# We also want to add code for our table.
# Paste the following after the previous code:
output$results <- renderTable({
filtered <-
bcl %>%
filter(Price >= input$priceInput[1],
Price <= input$priceInput[2],
Type == input$typeInput,
Country == input$countryInput)
filtered
})
}
# Run the app and see how it looks*
# We've just created a dependency tree.
# This tells Shiny where to look/when to react to variable value changes.
# * If it fails to run, check your code against mine:
library(dplyr)
library(ggplot2)
library(shiny)
bcl <- read.csv("bclData.csv", stringsAsFactors = FALSE)
ui <- fluidPage(
titlePanel("BC Liquor Store prices"),
sidebarLayout(
sidebarPanel(sliderInput("priceInput", "Price",
min = 0, max = 100, value = c(25, 40), pre = "$"),
radioButtons("typeInput", "Product type",
choices = c("BEER", "REFRESHMENT", "SPIRITS", "WINE"),
selected = "WINE"),
uiOutput("countryOutput")),
mainPanel(
plotOutput("coolplot"),
br(), br(),
tableOutput("results")
)
)
)
server <- function(input, output, session) {
output$countryOutput <- renderUI({
selectInput("countryInput", "Country",
sort(unique(bcl$Country)), selected = "CANADA")
})
filtered <- reactive({
if (is.null(input$countryInput)) {
return(NULL)
} # Hint: Style rule #6
bcl %>%
filter(Price >= input$priceInput[1],
Price <= input$priceInput[2],
Type == input$typeInput,
Country == input$countryInput
)
})
output$coolplot <- renderPlot({
if (is.null(filtered())) {
return()
}
ggplot(filtered(), aes(Alcohol_Content)) +
geom_histogram()
})
output$results <- renderTable({
filtered <-
bcl %>%
filter(Price >= input$priceInput[1],
Price <= input$priceInput[2],
Type == input$typeInput,
Country == input$countryInput)
filtered
})
}
shinyApp(ui = ui, server = server)
# ^ All of this should be in "app.R".
# Great job- return to the worksheet.
### PART III ###
data("faithful") # Built-in eruption dataset
head(faithful)
## Add to the ui (the user interface object)
# Let's start by making the ui variable fancier. Replace ui <- fluidPage() with:
ui = fluidPage( # Hint: Style rule #8
titlePanel("Old Faithful Eruptions"), # The title of our app goes here
sidebarLayout( # Sidebar layout with spaces for inputs and outputs
sidebarPanel( # for inputs
sliderInput(inputId = "bins", # Bins for our inputs
label = "Number of bins:",
min = 5,
max = 20,
value = 10)
),
mainPanel( # for outputs
plotOutput(outputId = "distPlot") # We want to output a histogram, eventually
)
)
)
# Run the app. Ooh, nice slider.
# Now paste this code ^ into your R console, and then paste this into the console (not the app):
print (ui) # Hint: Style rule #5
# By printing the ui here, we can see how ugly HTML would be to type raw. Thanks, Shiny!
## Reactivity in Shiny: add to the server function
# Let's feed the histogram our data.
# Replace server <- function(input, output, session) {} with the following code:
server <- function(input, output) {
# Define server logic required to draw a histogram
# Calling renderPlot tells Shiny this plot is
# 1. reactive, and will auto re-execute when inputs (input$bins) change
# 2. the output type we want
output$distPlot <- renderPlot({
x <- faithful$waiting
bins <-seq(min(x), max(x), length.out = input$bins + 1) # Hint: Style rule #5
hist(x, breaks = bins, col = heat.colors(10, alpha = 1), border = "white",
xlab = "Waiting time to next eruption (mins)",
main = "Histogram of waiting times")
})} # Hint: Style rule #6
# Make your edits, save, and run the app again. Play with your slider. Pretty.
# heat.colors() can be adjusted to make the outcome more sensible. Try a different number.
# Hint for above = type ?heat.colors
## Improving the ui
# Let's mess with the text. Replace your mainPanel() assignment with:
mainPanel(
plotOutput(outputId = "distPlot"),
p("p creates a paragraph of text."),
p("A new p() command starts a new paragraph. Supply a style attribute to change the format of the entire paragraph.", style = "font-family: 'times'; font-si16pt"),
strong("strong() bolds your text."),
em("em() creates italicized (i.e, EMphasized) text.")
# Run the app, see how your commands added and formatted your text.
## Add an image
# To add an image, Shiny requires that you
# 1. Create a folder in your app repo named "www"
# 2. Place the image in this folder with nothing else added
# I am using a scenic bison-filled photo for this tutorial.
# Replace your mainPanel() with:
mainPanel(
plotOutput(outputId = "distPlot"), # We want to output a histogram
img(src = "bison.jpg", height = 170, width = 296),
p("bison beware")
## Check your code against mine:
library(shiny)
ui <- fluidPage(
titlePanel("Old Faithful Eruptions"),
sidebarLayout( # Sidebar layout with spaces for inputs and outputs
sidebarPanel( # for inputs
sliderInput(inputId = "bins",
label = "Number of bins:",
min = 5, # Hint: Style rule #4
max = 20,
value = 10)
),
mainPanel( # for outputs
plotOutput(outputId = "distPlot"),
img(src = "bison.jpg", height = 170, width = 296),
p("bison beware")
)
)
)
server <- function(input, output, session) {
output$distPlot <- renderPlot({
x <- faithful$waiting
bins <- seq(min(x), max(x), length.out = input$bins + 1)
hist(x, breaks = bins, col = heat.colors(20, alpha = 1), border = "white",
xlab = "Waiting time to next eruption (mins)",
main = "Histogram of waiting times")
})
}
shinyApp(ui = ui, server = server)
# ^ All of this should be in your "app.R" file.
# Return to the worksheet.
|
3db4a4813074fa28b221667ab4131b73c2979ce4
|
290a33cabaf69415e9b5137028fdf42b3286b27d
|
/GO_SpermAtogensis_p.R
|
1c8084585f2fa3ef110e287bba02750af490dc4e
|
[] |
no_license
|
liulihe954/Sheep-Transcriptome0327
|
2b1925d6e68053ad8878c39a0b76bc65aeeccac9
|
33ba5eb9c6b868b79e4eaeecfdf7621f73f91e2b
|
refs/heads/master
| 2021-07-08T08:25:06.904917
| 2020-10-09T07:15:24
| 2020-10-09T07:15:24
| 198,678,712
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 46,788
|
r
|
GO_SpermAtogensis_p.R
|
##################################################################################################
### Set Up and Data prep #################
################################################################################################
#setwd("/Users/liulihe95/Desktop/GO_data")
library(WGCNA);library(ppcor);library(igraph)
library(ggplot2);library(ggpubr);library(ggridges)
require(cowplot);
library(extrafont)
library(dplyr)
library(plotly)
library(geomnet)
# theme_set(theme_ridges())
## library("corrplot");library("qgraph")
## setwd('/Users/liulihe95/Desktop/Network_GO_0327');getwd()
options(stringsAsFactors = FALSE)
networkData_sperm = read.table("Spermatogenesis_GO.txt");
# reroder
a = c(2,4,5,6,9,13,16,18,19); b = c(2:19)[!c(2:19) %in% a]
networkData_sperm = networkData_sperm[,c(1,a,b,20:22)]
table(networkData_sperm$Significant) # 13 significant and 132 non-signif
datExpr_sperm <- as.data.frame(t(networkData_sperm[,c(2:19)]));names(datExpr_sperm) = networkData_sperm$Gene; rownames(datExpr_sperm) = names(networkData_sperm)[c(2:19)]
datExprGO <- datExpr_sperm[c(1:9),];datExprGT <- datExpr_sperm[c(10:18),]
### <datExprGO> data set ready to use <datExprGT> ####
###### check for missing value ##########
gsg_all_sperm = goodSamplesGenes(datExpr_sperm, verbose = 3);
gsg_all_sperm$allOK
#### distance between samples / outliers
sample_Tree_GO = hclust(dist(datExprGO), method = "average")
sample_Tree_GT = hclust(dist(datExprGT), method = "average")
plot(sample_Tree_GO, main = "Sample clustering to detect outliers GO", sub="", xlab="", cex.lab = 1.5,
cex.axis = 1.5, cex.main = 2)
plot(sample_Tree_GT, main = "Sample clustering to detect outliers GT", sub="", xlab="", cex.lab = 1.5,
cex.axis = 1.5, cex.main = 2)
##### NO NAs or missing values were detected. but distances may be susceptible (?)
##################################################################################################
##### 1. Unweighted network analysis - NC + ranking ######
################################################################################################
### 1.Define calculation function - get_adjmat(dataset, rthreshold, pthreshold) --- get unweighted adj matrx --- use PearsonCor
require(ppcor)
# get_NC_cor = function(datExpr1,datExpr2,r_thres,p_thres){
#calcu of matrx1
cormatr1 <- cor(datExpr1)
adjmatr1 = matrix(1,ncol(datExpr1),ncol(datExpr1))
colnames(adjmatr1) = colnames(datExpr1)
rownames(adjmatr1) = colnames(datExpr1)
adjmatr1[abs(cormatr1) < r_thres] = 0
#calcu of matrx2
cormatr2 <- cor(datExpr2)
adjmatr2 = matrix(1,ncol(datExpr2),ncol(datExpr2))
colnames(adjmatr2) = colnames(datExpr2)
rownames(adjmatr2) = colnames(datExpr2)
adjmatr2[abs(cormatr2) < r_thres] = 0
#use threshold
for(i in 2:ncol(datExpr1)){
r1 = cor.test(datExpr1[,i-1],datExpr1[,i])
r2 = cor.test(datExpr2[,i-1],datExpr2[,i])
if(r1$p.value >= p_thres){adjmatr1[i-1,i] = adjmatr1[i,i-1] = 0}
if(r2$p.value >= p_thres){adjmatr2[i-1,i] = adjmatr2[i,i-1] = 0}
}
#get all the basic NC
NC1 = conformityBasedNetworkConcepts(adjmatr1)
NC2 = conformityBasedNetworkConcepts(adjmatr2)
#combine, rank and show
basic_results = data.frame(density1 = NC1$fundamentalNCs$Density,
density2 = NC2$fundamentalNCs$Density,
centralization1 = NC1$fundamentalNCs$Centralization,
centralization2 = NC2$fundamentalNCs$Centralization,
heterogeneity1 = NC1$fundamentalNCs$Heterogeneity,
heterogeneity2 = NC2$fundamentalNCs$Heterogeneity)
change_results = data.frame(#gene = colnames(datExpr1),
con_1 = NC1$fundamentalNCs$Connectivity,
scl_con_1 = NC1$fundamentalNCs$ScaledConnectivity,
con_2 = NC2$fundamentalNCs$Connectivity,
scl_con_2 = NC2$fundamentalNCs$ScaledConnectivity,
con_change = -(NC1$fundamentalNCs$Connectivity - NC2$fundamentalNCs$Connectivity),
scl_con_change = -(NC1$fundamentalNCs$ScaledConnectivity - NC2$fundamentalNCs$ScaledConnectivity),
rank_scl_con = rank(-abs(NC1$fundamentalNCs$ScaledConnectivity-NC2$fundamentalNCs$ScaledConnectivity)),
cls_coef_1 = NC1$fundamentalNCs$ClusterCoef,
cls_coef_2 = NC2$fundamentalNCs$ClusterCoef,
clst_coef_change = c(NC1$fundamentalNCs$ClusterCoef - NC2$fundamentalNCs$ClusterCoef),
rank_clstcoef = rank(-abs(NC1$fundamentalNCs$ClusterCoef-NC2$fundamentalNCs$ClusterCoef)))
Results = list(NC1 = NC1, NC2 = NC2, cormatr_ref = cormatr1, cormatr_test = cormatr2, adjmatr_ref = adjmatr1, adjmatr_test = adjmatr2, basic = basic_results,change = change_results)
return(Results)
}
get_NC_pcor = function(datExpr1,datExpr2,r_thres,p_thres){
### calcu of matrx1
cormatr1 <- pcor(datExpr1)$estimate
pval_matr1 <-pcor(datExpr1)$p.value
#
adjmatr1 = matrix(1,ncol(datExpr1),ncol(datExpr1))
colnames(adjmatr1) = colnames(datExpr1)
rownames(adjmatr1) = colnames(datExpr1)
#
adjmatr1[abs(cormatr1) < r_thres] = 0
adjmatr1[abs(pval_matr1) > p_thres] = 0
### calcu of matrx2
cormatr2 <- pcor(datExpr2)$estimate
pval_matr2 <-pcor(datExpr2)$p.value
#
adjmatr2 = matrix(1,ncol(datExpr2),ncol(datExpr2))
colnames(adjmatr2) = colnames(datExpr2)
rownames(adjmatr2) = colnames(datExpr2)
#
adjmatr2[abs(cormatr2) < r_thres] = 0
adjmatr2[abs(pval_matr2) > p_thres] = 0
# use threshold
# for(i in 2:ncol(datExpr1)){
# r1 = pcor(datExpr1[,i-1],datExpr1[,i])$p.value
# r2 = pcor(datExpr2[,i-1],datExpr2[,i])$p.value
# if(r1 >= p_thres){adjmatr1[i-1,i] = adjmatr1[i,i-1] = 0}
# if(r2 >= p_thres){adjmatr2[i-1,i] = adjmatr2[i,i-1] = 0}
# }
#get all the basic NC
NC1 = conformityBasedNetworkConcepts(adjmatr1)
NC2 = conformityBasedNetworkConcepts(adjmatr2)
#combine, rank and show
basic_results = data.frame(density1 = NC1$fundamentalNCs$Density,
density2 = NC2$fundamentalNCs$Density,
centralization1 = NC1$fundamentalNCs$Centralization,
centralization2 = NC2$fundamentalNCs$Centralization,
heterogeneity1 = NC1$fundamentalNCs$Heterogeneity,
heterogeneity2 = NC2$fundamentalNCs$Heterogeneity)
change_results = data.frame(#gene = colnames(datExpr1),
con_1 = NC1$fundamentalNCs$Connectivity,
scl_con_1 = NC1$fundamentalNCs$ScaledConnectivity,
con_2 = NC2$fundamentalNCs$Connectivity,
scl_con_2 = NC2$fundamentalNCs$ScaledConnectivity,
con_change = -(NC1$fundamentalNCs$Connectivity - NC2$fundamentalNCs$Connectivity),
scl_con_change = -(NC1$fundamentalNCs$ScaledConnectivity - NC2$fundamentalNCs$ScaledConnectivity),
rank_scl_con = rank(-abs(NC1$fundamentalNCs$ScaledConnectivity-NC2$fundamentalNCs$ScaledConnectivity)),
cls_coef_1 = NC1$fundamentalNCs$ClusterCoef,
cls_coef_2 = NC2$fundamentalNCs$ClusterCoef,
clst_coef_change = c(NC1$fundamentalNCs$ClusterCoef - NC2$fundamentalNCs$ClusterCoef),
rank_clstcoef = rank(-abs(NC1$fundamentalNCs$ClusterCoef-NC2$fundamentalNCs$ClusterCoef)))
Results = list(NC1 = NC1, NC2 = NC2, cormatr_ref = cormatr1, cormatr_test = cormatr2, adjmatr_ref = adjmatr1, adjmatr_test = adjmatr2, basic = basic_results,change = change_results)
return(Results)
}
###### apply function to the dataset datExprGT is reference and datExprGO is the test testset #####
###### For the results we have
###### NC (network concept) corcatr (corcoef matrix) adjmatr ( 0 or 1) and basics(network basic) change (change of the basics)
### results
# Results_sperm_cor = get_NC_cor(datExprGT,datExprGO,0.5,0.01)
Results_sperm_pcor = get_NC_pcor(datExprGT,datExprGO,0.5,0.01)
#table(Results_sperm_cor$cormatr_test > Results_sperm_pcor$cormatr_test)
### 2.connectivity and cluster coef
###### connectivity (of each node) vector ##################
Con_ref_pcor= rowSums(Results_sperm_pcor$adjmatr_ref) - 1
Con_test_pcor= rowSums(Results_sperm_pcor$adjmatr_test) - 1
#table(Con_ref_pcor < Con_ref);table(Con_test_pcor < Con_test)
###### mean connectivity ##############
meanCon_ref_pcor = sum(Con_ref_pcor)/ncol(datExpr_sperm);meanCon_ref_pcor
meanCon_test_pcor = sum(Con_test_pcor)/ncol(datExpr_sperm);meanCon_test_pcor
###
test_cor_plot = data.frame(cor = vectorizeMatrix(Results_sperm_cor$cormatr_ref), pcor = vectorizeMatrix(Results_sperm_por$cormatr_ref))
length(vectorizeMatrix(Results_sperm_pcor$cormatr_ref)) == length(vectorizeMatrix(Results_sperm_cor$cormatr_ref))
table(vectorizeMatrix(Results_sperm_cor$cormatr_ref) < vectorizeMatrix(Results_sperm_pcor$cormatr_ref))
length(vectorizeMatrix(Results_sperm_pcor$cormatr_ref))
length(vectorizeMatrix(Results_sperm_cor$cormatr_ref))
isSymmetric((Results_sperm_pcor$cormatr_ref))
?isSymmetric()
####
###### density #################
density_ref = sum(vectorizeMatrix(Results_sperm_cor$adjmatr_ref))/(0.5*ncol(datExpr_sperm)*(ncol(datExpr_sperm)-1));density_ref
density_test = sum(vectorizeMatrix(Results_sperm_cor$adjmatr_test))/(0.5*ncol(datExpr_sperm)*(ncol(datExpr_sperm)-1));density_test
###### coef (of each node) vector ##################
clstcoef_ref= Results_sperm_cor$NC1$fundamentalNCs$ClusterCoef
clstcoef_test= Results_sperm_cor$NC2$fundamentalNCs$ClusterCoef
###### mean coef ##################
meanClstcoef_ref = sum(clstcoef_ref)/ncol(datExpr_sperm);meanClstcoef_ref
meanClstcoef_test = sum(clstcoef_test)/ncol(datExpr_sperm);meanClstcoef_test
### 3.top 10 of connectivity/clustercoeffcient ###
########## assemble dataset 1 _ con
topgene_sperm_con = data.frame(
Con_ref = Results_sperm_cor$change$con_1,
Con_rank_ref = rank(-Results_sperm_cor$change$con_1,ties.method = "min"),
#Con_ref_scl = Results_sperm_cor$change$scl_con_1,
Con_test = Results_sperm_cor$change$con_2,
#Con_ref_scl = Results_sperm_cor$change$scl_con_1,
Con_rank_test = rank(-Results_sperm_cor$change$con_2,ties.method = "min"),
Con_Change = Results_sperm_cor$change$con_change,
ConChange_rank =rank(-(abs(Results_sperm_cor$change$con_1 - Results_sperm_cor$change$con_2)),ties.method = "min")
)
rownames(topgene_sperm_con) = colnames(datExprGT)
######### assemble dataset 2 _ clscoef
topgene_sperm_clscoef = data.frame(
Clscoef_ref = Results_sperm_cor$change$cls_coef_1 ,
Clscoef_rank_ref = rank(-Results_sperm_cor$change$cls_coef_1,ties.method = "min"),
Clscoef_test = Results_sperm_cor$change$cls_coef_2,
Clscoef_rank_test = rank(-Results_sperm_cor$change$cls_coef_2,ties.method = "min"),
Clscoef_Change = Results_sperm_cor$change$clst_coef_change,
ConChange_rank = rank(-(abs(Results_sperm_cor$change$cls_coef_1 - Results_sperm_cor$change$cls_coef_2)),ties.method = "min")
)
rownames(topgene_sperm_clscoef) = colnames(datExprGT)
#### define function
SelectGene_un_cor = function(dataset,topnumber){
index1 = dataset[,2] %in% c(1:topnumber)
index2 = dataset[,4] %in% c(1:topnumber)
index3 = dataset[,6] %in% c(1:topnumber)
summary1 = dataset[index1,];summary1 = summary1[order(summary1[,2]),]
summary2 = dataset[index2,];summary2 = summary2[order(summary2[,4]),]
summary3 = dataset[index3,];summary3 = summary3[order(summary3[,6]),]
summary = list(
ref = summary1,
test = summary2,
change =summary3
)
return(summary)
}
##### example result
SelectGene_un_cor(topgene_sperm_con,5)
SelectGene_un_cor(topgene_sperm_clscoef,5)
##################################################################################################
### 2. Plotting ######
################################################################################################
######## 1. generating dataset ####################
##### con --- test_combine_dataset
ref = data.frame(
connectivity = as.numeric(Results_sperm_cor$NC1$fundamentalNCs$Connectivity),
category = rep("ref",ncol(datExprGT)))
test = data.frame(
connectivity = as.numeric(Results_sperm_cor$NC2$fundamentalNCs$Connectivity),
category = rep("test",ncol(datExprGT)))
test_combine_dataset <- do.call('rbind', list(ref,test))
str(test_combine_dataset)
table(test_combine_dataset$category)
### clst coef --- test_combine_dataset_clstcoef
ref_clscoef = data.frame(
clstcoef = as.numeric(Results_sperm_cor$NC1$fundamentalNCs$ClusterCoef),
category = rep("ref",ncol(datExprGT)))
test_clscoef = data.frame(
clstcoef = as.numeric(Results_sperm_cor$NC2$fundamentalNCs$ClusterCoef),
category = rep("test",ncol(datExprGT)))
test_combine_dataset_clstcoef <- do.call('rbind', list(ref_clscoef,test_clscoef))
str(test_combine_dataset_clstcoef)
table(test_combine_dataset$category)
######### 2. plotting ##########################
#############################################################################################
### 1. type one ---- ridge
#plot1 = ggplot(test_combine_dataset, aes(x = connectivity, y = category)) +
# geom_density_ridges(aes(fill = category),scale = 3) +
# scale_fill_manual(values = c("#00AFBB", "#FC4E07"))+
# theme_gray()+
# theme(legend.position="None")+
# labs(title="Distribution of Connectivity", x="Connectivity", y = "Density")+
# theme(plot.title = element_text(hjust = 0.5))
#print(plot1)
####
#plot2 =
#ggplot(test_combine_dataset_clstcoef, aes(x = clstcoef, y = category)) +
# geom_density_ridges(aes(fill = category),scale = 3) +
# scale_fill_manual(values = c("#00AFBB", "#FC4E07"))+
# theme_gray()+
# theme(legend.position="None")+
# labs(title="Distribution of Cluster Coefficient", x="Connectivity", y = "Density")+
# theme(plot.title = element_text(hjust = 0.5))
# tiff("Figure1_connectivity_ridge.tiff", width = 14,nrow = 2, height = 12, units = 'in', res = 300)
# plot_grid(plot1, plot2, align = c("h"),labels = c("A","B"), label_size= 20, label_colour = "darkgreen")
# dev.off()
#############################################################################################
#### 2. type two --- normal (ggplot)
#install.packages("extrafont")
#font_import(pattern="[C/c]omic")
#font_import(pattern="[A/a]rial")
#font_import(pattern="[C/c]alibri")
font_import()
loadfonts()
fonts()
plot3 =ggplot(test_combine_dataset, aes(x=connectivity, fill=category)) +
geom_histogram(binwidth=1,alpha=0.6, position="identity", aes(y = ..count..), color="black") +
# geom_density(alpha=0.6,trim = F) +
xlim(0,100)+
geom_vline(aes(xintercept=meanCon_ref), color="black", linetype="dashed", size=1) +
geom_vline(aes(xintercept=meanCon_test), color="blue4", linetype="dashed", size=1) +
theme_gray()+
theme(legend.position="top")+
labs(title="Distribution of Connectivity", x="Connectivity", y = "Frequency")+
theme(axis.text.x = element_text(size = 15, family = "Microsoft Sans Serif",color = "black", vjust = 0.5, hjust = 0.5))+
theme(axis.text.y = element_text(size = 15,family = "Microsoft Sans Serif",color = "black", vjust = 0.5, hjust = 0.5))+
theme(axis.title.x = element_text(size = 15,family = "Microsoft Sans Serif",color = "black",vjust = 0.5, hjust = 0.5))+
theme(axis.title.y = element_text(size = 15, color = "black",family = "Microsoft Sans Serif", vjust = 0.5, hjust = 0.5))+
theme(plot.title = element_text(size = 20, family = "Microsoft Sans Serif",color = "black", face = "bold", vjust = 0.5, hjust = 0.5))+
theme(plot.title = element_text(hjust = 0.5))
plot3
plot4 = ggplot(test_combine_dataset_clstcoef, aes(x=clstcoef, fill=category)) +
geom_histogram(binwidth=.01,alpha=0.6, position="identity", aes(y = ..count..), color="black") +
# geom_density(alpha=0.6,trim = F) +
xlim(0,1)+
geom_vline(aes(xintercept=meanClstcoef_ref), color="black", linetype="dashed", size=1) +
geom_vline(aes(xintercept=meanClstcoef_test), color="blue", linetype="dashed", size=1) +
theme_gray()+
theme(legend.position="None")+
labs(title="Distribution of Cluster Coefficient", x="Cluster Coefficient", y = "Frequency")+
theme(axis.text.x = element_text(size = 15, family = "Microsoft Sans Serif",color = "black", vjust = 0.5, hjust = 0.5))+
theme(axis.text.y = element_text(size = 15,family = "Microsoft Sans Serif",color = "black", vjust = 0.5, hjust = 0.5))+
theme(axis.title.x = element_text(size = 15,family = "Microsoft Sans Serif",color = "black",vjust = 0.5, hjust = 0.5))+
theme(axis.title.y = element_text(size = 15, color = "black",family = "Microsoft Sans Serif", vjust = 0.5, hjust = 0.5))+
theme(plot.title = element_text(size = 20, family = "Microsoft Sans Serif",color = "black", face = "bold", vjust = 0.5, hjust = 0.5))+
theme(plot.title = element_text(hjust = 0.5))
tiff("Figure1_DistributionChange.tiff", width = 14, height = 12, units = 'in', res = 300)
plot_grid(plot3, plot4, align = c("v"),labels = c("A","B"), nrow = 2,label_size= 20, label_colour = "darkgreen")
dev.off()
######### 3.differential expresion and differential connected ##########################
##################################################################################################
########## put togehter all the index for selection #############
ScreenDataset_change = data.frame(
LogFC = round(networkData_sperm$logFC,4),
#scale_FC = round(abs(networkData_sperm$logFC)/(max(abs(networkData_sperm$logFC))),4),
Con_Change = round(Results_sperm_cor$change$con_change,4),
scale_ConChange = round(Results_sperm_cor$change$con_change/(max(abs(Results_sperm_cor$change$con_change))),4),
Clscoef_Change = round(Results_sperm_cor$change$clst_coef_change,4),
scale_ClscoefChange = round(Results_sperm_cor$change$clst_coef_change/(max(abs((Results_sperm_cor$change$clst_coef_change)))),4),
index = rep("No", ncol(datExprGT))
)
rownames(ScreenDataset_change) = colnames(datExprGT)
### get DEs
DE_index = which(networkData_sperm$Significant == "Yes")
ScreenDataset_change[DE_index,"index"] = "Yes"
write.csv(ScreenDataset_change,file = "ScreenDataset_change.csv",row.names = T)
# head(ScreenDataset_change,6)
################ plotly ###################
#library(tidyverse)
#library(plotly)
##########
#str(ScreenDataset_change)
ScreenDataset_change$index = factor(ScreenDataset_change$index)
plot_ly(ScreenDataset_change, x = ~scale_ConChange, y = ~scale_ClscoefChange, z = ~LogFC,
type = "scatter3d", mode = "markers",
marker = list(opacity = 1, size = 3),
color = ~index, colors = c( '#BF382A','#0C4B8E'),
showlegend = T,
alpha = 0.8) %>%
add_markers() %>%
layout(
scene = list(camera = list(eye = list(x = -1.25, y = 1.25, z = .15)),
xaxis = list(title = 'Con_Change',range = c(-1,1)),
yaxis = list(title = 'Clscoef_Change',range = c(-1,1)),
zaxis = list(title = 'Log(FC)',range = c(1.2*min(ScreenDataset_change$LogFC),1.2*max(ScreenDataset_change$LogFC)))),
plot_bgcolor=c('rgb(254, 247, 234)'),
paper_bgcolor=c('rgb(254, 247, 234)'),
showlegend = FALSE
)
######################
#### Module preservation aspect #####
Con_presv = cor(vectorizeMatrix(as.matrix(Results_sperm_cor$adjmatr_ref)),as.matrix(vectorizeMatrix(Results_sperm_cor$adjmatr_test)))
Con_presv
#Clstcoef_presv = cor(Results_sperm_cor$NC1$fundamentalNCs$ClusterCoef,Results_sperm_cor$NC2$fundamentalNCs$ClusterCoef)
#Clstcoef_presv
### cor.cor ###
cor.cor_sperm = cor(vectorizeMatrix(Results_sperm_cor$cormatr_ref),vectorizeMatrix(Results_sperm_cor$cormatr_test))
cor.cor_sperm
?modulePreservation()
MP_sperm$observed$sperm_GT$intra
table(Results_sperm_cor$cormatr_test == Results_sperm_cor$cormatr_ref)
modulePreservation(Results_sperm_cor)
names(Results_sperm_cor)
dim(as.matrix(Results_sperm_cor$adjmatr1))
###############################
#####differences between network properties #######
###############################
#######change of con general ---overall plotting#####
####show the change in hist: distribution #########
par(mfrow=c(3,1))
(Results_sperm_cor$NC1$fundamentalNCs$Connectivity)
Results_sperm_cor$NC1$fundamentalNCs$Density
mean(sum(vectorizeMatrix(Results_sperm_cor$adjmatr_ref)))
sum(vectorizeMatrix(Results_sperm_cor$adjmatr_ref))/145
sum(Results_sperm_cor$NC1$fundamentalNCs$ClusterCoef)/145
sum(Results_sperm_cor$NC2$fundamentalNCs$ClusterCoef)/145
head(Results_sperm_cor$adjmatr_ref)
#hist_C1 = hist(genCon_C1,breaks = 25)
#hist_M1 = hist(genCon_M1,breaks = 25)
(vectorizeMatrix(Results_sperm_cor$adjmatr_ref))
(colnames(Results_sperm_cor$adjmatr_ref))
genCon_GT = rowSums(Results_sperm_cor$adjmatr_ref) - 1
genCon_GO = rowSums(Results_sperm_cor$adjmatr_test) - 1
cor(genCon_GT,genCon_GO)
### rest
hist_ref = hist(Results_sperm_cor$NC1$fundamentalNCs$ClusterCoef,breaks = 20)
hist_test = hist(Results_sperm_cor$NC2$fundamentalNCs$ClusterCoef,breaks = 20)
plot( hist_ref, col=rgb(0,1/2,1,1/4), xlim=c(0,1),ylim = c(1,100), main = paste("clstcoef distbt of control/treatment"));plot( hist_test, col=rgb(1,0,0,1/4), xlim=c(0,1),ylim = c(1,150),add=T);legend("topright", c("Control", "treatment"), col=c(rgb(0,1/2,1,1/4), rgb(1,0,0,1/4)), lwd=10)
# at=seq(0,1,.1),
hist_ref = hist(Results_sperm_cor$NC1$fundamentalNCs$ClusterCoef,breaks = 20)
hist_test = hist(Results_sperm_cor$NC2$fundamentalNCs$ClusterCoef,breaks = 20)
plot( hist_ref, col=rgb(0,1/2,1,1/4), xlim=c(0,150),ylim = c(1,40), at=seq(1,110,1),main = paste("Con distbt of C3/M3 _S"));plot( hist_test, col=rgb(1,0,0,1/4), xlim=c(0,150) ,ylim = c(1,100),add=T);legend("topright", c("Control", "treatment"), col=c(rgb(0,1/2,1,1/4), rgb(1,0,0,1/4)), lwd=10)
cor(vectorizeMatrix(Results_sperm_cor$adjmatr_ref,diag = F),vectorizeMatrix(Results_sperm_cor$adjmatr_test,diag = F))
cor(Results_sperm_cor$NC1$fundamentalNCs$Connectivity,Results_sperm_cor$NC2$fundamentalNCs$Connectivity)
vectorizeMatrix(Results_sperm_cor$adjmatr_ref)
length(vectorizeMatrix(Results_sperm_cor$adjmatr_ref))
####### now we have function to get basics (change of the basics) using cor and pcor ########
###### basic (basic NC) - change of connectivity and clustercoef (by ranking) #########
###### declare the parameters you want to use in the fucntion parameters - e.g.cor CUTOFF 0.5 ##
######################################################################################################
Results_sperm_pcor = get_NC_pcor(datExprGT,datExprGO,0.5,0.01)
###### Results_sperm_cor$ change has the need for plotting in cyto ######################
########### take the "basics" and do formating for the Cytoscape input ###############
############ add "rank" for plotting ###############
### 2.Define formating function - use package "igraph" ###################
###NEED
###TO
###BE
###DONE
tiff("Figure_int_slo_6_14chr_20190313.tiff", width = 14, height = 12, units = 'in', res = 300)
plot_grid(plot1, plot2,plot3,plot4, plot5,plot6, align = c("hv"), nrow = 3,
labels = c("A", "B","C","D","E","F"), label_size= 20, label_colour = "darkgreen")
dev.off()
################
{r plot single gene}
library(igraph)
diag(adjmatr_C1) = 0
diag(adjmatr_M1) = 0
Net1 = graph.adjacency(adjmatr_C1,mode = "undirected",weighted = NULL)
Net2 = graph.adjacency(adjmatr_M1,mode = "undirected",weighted = NULL)
?graph.adjacency()
#E(Net);V(Net)
##plotting
l <- layout_in_circle(Net1)
par(mfrow=c(1,2))
plot(Net1, layout=l);plot(Net2, layout=l)
##
target = 36
store = c((vcount(Net1)-target+2):vcount(Net1),1:(vcount(Net1)+1-target))
l_one <- cbind(1:vcount(Net1), c(1, vcount(Net1):2));l_one = l_one[store,]
par(mfrow=c(1,2))
plot(Net1, layout=l_one);plot(Net2, layout=l_one);
?write_graph()
write_graph(Net1, "Net1.text", format = "edgelist")
write_graph(Net2, "Net2.text", format = "edgelist")
net1_text = read.table("Net1.text")
head(net1_text,16)
Net2_NeedRank = read.csv("Net2_NeedRank.csv")
str(Net2_NeedRank)
length(table(Net2_raw$V1))
Net1_add_Rank = cbind(Net1_NeedRank[order(Net1_NeedRank$degree.layout,decreasing = T),],rev(seq(69:1)))
Net1_add_Rank
#Net1_test = Net1_add_Rank[,c("name","rev(seq(69:1))")]
Net1_test = Net1_add_Rank[,c("name","degree.layout")]
rownames(Net1_test) = Net1_test$name
Net2_raw = read.table("Net2.text")
#Net2_new = merge(Net2_raw,Net1_test,by.x ="V1",by.y="name")
Net2_new = merge(Net2_raw,Net1_test,by.x ="V1",by.y="name")
Net2_new
table(Net2_raw$V1)
table(Net2_raw$V2)
table(Net2_new$degree.layout)
write.csv(Net2_new,"Net2.new_add_old_con.csv")
?merge(Net1_raw,Net1_test,by.x ="V1",by.y="name")
?join()
?match()
names(Net1_add_Rank)
test_rank = read.csv("rank_testing_5216.csv",sep = " ")
?read.csv()
getwd()
######################################################################################################################
##### overviews of the plots, specific nodes(maybe hub nodes) may be selected for plotting?
##### basic stats : density/centralization/heterogeneity;
##### the change of connectivity (scaled by the mixmum), as well as the corresponding rank (of abs value); also, the same rationale for clustering coefficient (the density of neighboors-conncection of a node)
##### connectivity and clustering coefficient may not necessary to be conformative, potential composite stats maybe selected/proposed?
##########################################################################################
Net = graph.adjacency(dissTOMGO,mode = "undirected",weighted = TRUE)
l <- layout_in_circle(Net)
#par(mfrow=c(1,2))
plot(Net, layout=circle)
plot(Net, layout=l)
#summary(dissTOMC1)
##########################################################################################
################################################################################################################
################################################################################################################
#################################################################################################################
#=====================================================================================
# Code chunk 5 - weighted correlation network ---set up (automatic)
#=====================================================================================
##########################################################################################
############ r weighted #####################
powers = c(c(1:10), seq(from = 12, to=20, by=1))
# Call the network topology analysis function
sft_GT = pickSoftThreshold(datExprGT, networkType = "unsigned",powerVector = powers, verbose = 5)
# Plot the results:
sizeGrWindow(9, 5)
par(mfrow = c(1,2));
cex1 = 0.9;
# Scale-free topology fit index as a function of the soft-thresholding power
plot(sft_GT$fitIndices[,1], -sign(sft_GT$fitIndices[,3])*sft_GT$fitIndices[,2],
xlab="Soft Threshold (power) GT",ylab="Scale Free Topology Model Fit,signed R^2, GT",type="n",
main = paste("Scale independence GT"));text(sft_GT$fitIndices[,1], -sign(sft_GT$fitIndices[,3])*sft_GT$fitIndices[,2],
labels=powers,cex=cex1,col="red");abline(h=0.80,col="red")# this line corresponds to using an R^2 cut-off of h
# Mean connectivity as a function of the soft-thresholding power
plot(sft_GT$fitIndices[,1], sft_GT$fitIndices[,5],xlab="Soft Threshold (power) GT",ylab="Mean Connectivity GT", type="n",main = paste("Mean connectivity GT"));text(sft_GT$fitIndices[,1], sft_GT$fitIndices[,5], labels=powers, cex=cex1,col="red")
dev.off()
####### TAKE power 8, fitting index reach 0.8 ##########################
##############calculate the critical values that will be used for analysis ##########################
softPower = 8
adjacencyGT = adjacency(datExprGT,power=softPower,type="unsigned");
diag(adjacencyGT)=0
dissTOMGT = 1-TOMsimilarity(adjacencyGT, TOMType="unsigned")
adjacencyGO = adjacency(datExprGO,power=softPower,type="unsigned");
diag(adjacencyGO)=0
dissTOMGO = 1-TOMsimilarity(adjacencyGO, TOMType="unsigned")
geneTreeGT = hclust(as.dist(dissTOMGT), method="average")
geneTreeGO = hclust(as.dist(dissTOMGO), method="average")
############### visualization ##############################
#pdf("dendrogram.pdf",height=6,width=16)
par(mfrow=c(1,2))
plot(geneTreeGT,xlab="",sub="",main="Gene clustering on TOM-based dissimilarity (GT)",
labels=FALSE,hang=0.04);
plot(geneTreeGO,xlab="",sub="",main="Gene clustering on TOM-based dissimilarity (GO)",
labels=FALSE,hang=0.04);
dev.off()
########### for now we DO NOT need module assignment, all genes in this go is in one module #######
#### Next we will determine modules based on dataset GT ###########
########## Color --- all gray (only one module) ##########
mColorhGT=NULL
treeGT = cutreeHybrid(dendro = geneTreeGT, pamStage=FALSE,
minClusterSize = 5, cutHeight = 0.0001,
deepSplit = 0, distM = dissTOMGT)
mColorhGT=cbind(mColorhGT,labels2colors(treeGT$labels))
table(treeGT)
#for (ds in 0:1){
# treeC3 = cutreeHybrid(dendro = geneTreeC3, pamStage=FALSE,#
# minClusterSize = (30-1*ds), cutHeight = 0.99,
# deepSplit = ds, distM = dissTOMC3)
# mColorhC3=cbind(mColorhC3,labels2colors(treeC3$labels));
#}
# pdf("Module_choices.pdf", height=10,width=25);
plotDendroAndColors(geneTreeGT, mColorhGT, main = "",dendroLabels=F);
dev.off()
modulesGT = mColorhGT[,1] # (Chosen based on plot below)
#length(modulesGT)
###################################################################################################
#### NC based on weighted measure #####
get_NC_wt = function(datExpr1,datExpr2,softPower,signORunsign){
#calcu of matrx1
adjacency1 = adjacency(datExpr1,power=softPower,type=as.character(signORunsign));
diag(adjacency1)=0
TOMsimilarity1 = TOMsimilarity(adjacency1, TOMType=as.character(signORunsign))
dissTOMGT1 = 1 - TOMsimilarity1
#calcu of matrx2
adjacency2 = adjacency(datExpr2,power=softPower,type=as.character(signORunsign));
diag(adjacency2)=0
TOMsimilarity2 = TOMsimilarity(adjacency2, TOMType=as.character(signORunsign))
dissTOMGT2 = 1 - TOMsimilarity2
#get all the basic NC
NC1 = conformityBasedNetworkConcepts(TOMsimilarity1)
NC2 = conformityBasedNetworkConcepts(TOMsimilarity2)
#combine, rank and show
basic_results = data.frame(density1 = NC1$fundamentalNCs$Density,
density2 = NC2$fundamentalNCs$Density,
centralization1 = NC1$fundamentalNCs$Centralization,
centralization2 = NC2$fundamentalNCs$Centralization,
heterogeneity1 = NC1$fundamentalNCs$Heterogeneity,
heterogeneity2 = NC2$fundamentalNCs$Heterogeneity)
change_results = data.frame(gene = colnames(datExpr1),
scl_con_1 = NC1$fundamentalNCs$ScaledConnectivity,
scl_con_2 = NC2$fundamentalNCs$ScaledConnectivity,
scl_con_change =NC1$fundamentalNCs$ScaledConnectivity - NC2$fundamentalNCs$ScaledConnectivity,
rank_scl_con = rank(-abs(NC1$fundamentalNCs$ScaledConnectivity-NC2$fundamentalNCs$ScaledConnectivity)),
cls_coef_1 = NC1$fundamentalNCs$ClusterCoef,
cls_coef_2 = NC2$fundamentalNCs$ClusterCoef,
clst_coef_change = c(NC1$fundamentalNCs$ClusterCoef - NC2$fundamentalNCs$ClusterCoef),
rank_clstcoef = rank(-abs(NC1$fundamentalNCs$ClusterCoef-NC2$fundamentalNCs$ClusterCoef)))
Results = list(basic = basic_results,change = change_results, similarity_ref = TOMsimilarity1, similarity_test = TOMsimilarity1, adjmatr1=adjacency1,adjmatr2=adjacency2 )
return(Results)
}
Results_sperm = get_NC_wt(datExprGT,datExprGO,8,"unsigned")
names(Results_sperm)
# TOMsimilarity_GO = TOMsimilarity(adjacencyGO, TOMType="unsigned")
# TOMsimilarity_GT = TOMsimilarity(adjacencyGT, TOMType="unsigned")
# NC_GO_w=conformityBasedNetworkConcepts(TOMsimilarity_GO)
# NC_GT_w=conformityBasedNetworkConcepts(TOMsimilarity_GT)
#### steps for detail -- although included in function ######
# adjacencyGT = adjacency(datExprGT,power=softPower,type="unsigned");
# diag(adjacencyGT)=0
# dissTOMGT = 1-TOMsimilarity(adjacencyGT, TOMType="unsigned")
# adjacencyGO = adjacency(datExprGO,power=softPower,type="unsigned");
# diag(adjacencyGO)=0
# dissTOMGO = 1-TOMsimilarity(adjacencyGO, TOMType="unsigned")
geneTreeGT = hclust(as.dist(dissTOMGT), method="average")
geneTreeGO = hclust(as.dist(dissTOMGO), method="average")
###############################################################
###### now!!! do the preservatino statistics ################
########## To quantify this result module preservation statistics ############################
## substitiue gray with blue
mColorhGT = gsub("grey", "blue", mColorhGT)
rownames(mColorhGT) = networkData_sperm$Gene
#dim(mColorhGT) ; str(mColorhGT)
multiExpr_sperm = list(GT=list(data=datExprGT),GO=list(data=datExprGO))
#multiColor_sperm = list(GT = modulesGT)
multiColor_sperm = list(GT = mColorhGT,GO = mColorhGT)
MP_sperm=modulePreservation(multiExpr_sperm,multiColor_sperm,referenceNetworks=1,verbose=3,networkType="unsigned",
nPermutations=30,maxGoldModuleSize=30,maxModuleSize=145)
MP_sperm=modulePreservation(multiExpr_sperm,multiColor_sperm,referenceNetworks=1,verbose=3,networkType="unsigned",
nPermutations=0)
????(MP_sperm$observed)
?modulePreservation()
names(MP_sperm)
stats = mp1$preservation$Z$ref.C1$inColumnsAlsoPresentIn.M1
stats[order(-stats[,2]),c(1:2)]
#write.csv(stats[order(-stats[,2]),c(1:2)],"module size and pres")
#############################################################################################################################################
mergingThresh = 0.25
net = blockwiseModules(datExprGT,corType="pearson",
maxBlockSize=5000,networkType="unsigned",power=8,minModuleSize=145,
mergeCutHeight=0.0001,numericLabels=TRUE,saveTOMs = F,
pamRespectsDendro=FALSE,saveTOMFileBase="TOM_sperm_GT")
moduleLabelsAutomatic=net$colors
moduleLabelsAutomatic = gsub(0,1,moduleLabelsAutomatic)
# Convert labels to colors for plotting
moduleColorsAutomatic = labels2colors(moduleLabelsAutomatic)
# A data frame with module eigengenes can be obtained as follows
# MEsAutomatic=net$MEs
#this is the body weight
weight = as.data.frame(datTraits$weight_g)
names(weight)="weight"
# Next use this trait to define a gene significance variable
GS.weight=as.numeric(cor(datExprFemale,weight,use="p"))
# This translates the numeric values into colors
GS.weightColor=numbers2colors(GS.weight,signed=T)
blocknumber=1
datColors=data.frame(moduleColorsAutomatic,GS.weightColor)[net$blockGenes[[blocknumber]],]
# Plot the dendrogram and the module colors underneath
plotDendroAndColors(net$dendrograms[[blocknumber]],colors=datColors,
groupLabels=c("Module colors","GS.weight"),dendroLabels=FALSE,
hang=0.03,addGuide=TRUE,guideHang=0.05)
#############################################################################################################################################
adjmatrx_sperm1 = Results_sperm$adjmatr1
diag(adjmatrx_sperm1) = 1
change = as.data.frame(Results_sperm$change)
labels_sperm = substr(Results_sperm$change$gene,nchar(Results_sperm$change$gene[1])-2,nchar(Results_sperm$change$gene[1]))
#nchar(Results_sperm$change$gene[1])
change$scl_con_1[36]
change$gene[]
order1 = rank(-change$scl_con_1)
order2 = rank(-change$scl_con_2)
circlePlot(adjmatrx_sperm1, labels_sperm, order1, startNewPlot = T,
variable.cex.labels = FALSE, center = c(0.5, 0.5),
radii = c(0.35, 0.35))
circlePlot(Results_sperm$adjmatr1, labels_sperm, order1, startNewPlot = T,
variable.cex.labels = FALSE, center = c(0.5, 0.5),
radii = c(0.55, 0.55))
dev.off()
#############################################################################################################################################
# We now set up the multi-set expression data
# and corresponding module colors:
setLabels = c("sperm_GT", "sperm_GO")
datExprGT_mtrx = as.matrix(datExprGT)
datExprGO_mtrx = as.matrix(datExprGO)
str()
multiExpr_sperm=list(sperm_GT=list(data=datExprGT_mtrx),
sperm_GO=list(data=datExprGO_mtrx))
#moduleColorsGT=moduleColorsAutomatic
moduleColorsGT = c(1:145); moduleColorsGO = c(1:145)
multiColor_sperm=list(sperm_GT=moduleColorsGT,sperm_GO=moduleColorsGO)
names(multiExpr_sperm); names(multiColor_sperm)
# The number of permutations drives the computation time
# of the module preservation function. For a publication use 200 permutations.
# But for brevity, let's use a small number
nPermutations1=10
# Set it to a low number (e.g. 3) if only the medianRank statistic
# and other observed statistics are needed.
# Permutations are only needed for calculating Zsummary
# and other permutation test statistics.
# set the random seed of the permutation test analysis
set.seed(1)
system.time({
MP_sperm = modulePreservation(multiExpr_sperm, multiColor_sperm,
referenceNetworks = c(1:2),
nPermutations = nPermutations1,
randomSeed = 1,
quickCor = 0,
verbose = 3)
})
# Save the results of the module preservation analysis
save(mp, file = "modulePreservation.RData")
# If needed, reload the data:
load(file = "modulePreservation.RData")
# specify the reference and the test networks
ref=1; test = 2
Obs.PreservationStats= MP_sperm$preservation$observed[[ref]][[test]]
Z.PreservationStats=MP_sperm$preservation$Z[[ref]][[test]]
# Look at the observed preservation statistics
Obs.PreservationStats
########################################################################################################################
###############################################################
# The first PC is referred to as the module eigengene (ME), and is a single value that
# represents the highest percent of variance for all genes in a module.
#
PCs_C1 = moduleEigengenes(datExprC1, colors=modulesC1)
ME_C1 = PCs_C1$eigengenes
distPCC1 = 1-abs(cor(ME_C1,use="p"))
distPCC1 = ifelse(is.na(distPCC1), 0, distPCC1)
pcTreeC1 = hclust(as.dist(distPCC1),method="a")
MDS_C1 = cmdscale(as.dist(distPCC1),2)
colorsC1 = names(table(modulesC1))
#####
# PCs_M3 = moduleEigengenes(datExprM3, colors=modulesM3)
# ME_M3 = PCs_M3$eigengenes
# distPCM3 = 1-abs(cor(ME_M3,use="p"))
# distPCM3 = ifelse(is.na(distPCM3), 0, distPCM3)
# pcTreeM3 = hclust(as.dist(distPCM3),method="a")
# MDS_M3 = cmdscale(as.dist(distPCM3),2)
# colorsM3 = names(table(modulesM3))
### serious of plots#### maybe not useful in our ananlysis
#save.image("tutorial.RData")
#pdf("ModuleEigengeneVisualizations.pdf",height=6,width=6)
par(mfrow=c(1,1), mar=c(0, 3, 1, 1) + 0.1, cex=1)
plot(pcTreeC1, xlab="",ylab="",main="",sub="")
plot(MDS_C1, col= colorsC1, main="MDS plot", cex=2, pch=19)
ordergenesC1 = geneTreeC1$order
plotMat(scale(log(t(datExprC1)[ordergenesC1,])) , rlabels= modulesC1[ordergenesC1], clabels=
colnames(t(datExprC1)), rcols=modulesC1[ordergenesC1])
for (which.module in names(table(modulesC1))){
ME = ME_C1[, paste("ME",which.module, sep="")]
barplot(ME, col=which.module, main="", cex.main=2,
ylab="eigengene expression",xlab="array sample")
};
dev.off()
#####################################################################################################
#####Step 4: Qualitatively and quantitatively measure network preservation at the module level#####
#####
# pdf("Final_modules.pdf",height=8,width=12)
par(mfrow=c(3,1))
plotDendroAndColors(geneTreeC1, modulesC1, "Modules", dendroLabels=F, hang=0.03, addGuide=TRUE,
guideHang=0.05, main="Gene dendrogram and module colors (C1)")
plotDendroAndColors(geneTreeM1, modulesC1, "Modules", dendroLabels=FALSE, hang=0.03, addGuide=TRUE,
guideHang=0.05, main="Gene dendrogram and module colors (M1)")
dev.off()
####The "grey" module contains uncharacterized gene while the gold module contains random genes.
### We first will get the kME values, along with their associated p-values for A1
### and will then output the resulting
### table to a file ("kMEtable1.csv").
geneModuleMembershipC1 = signedKME(datExprC1, ME_C1)
colnames(geneModuleMembershipC1)=paste("PC",colorsC1,".cor",sep="");
MMPvalueC1=corPvalueStudent(as.matrix(geneModuleMembershipC1),dim(t(datExprC1))[[2]]);
colnames(MMPvalueC1)=paste("PC",colorsC1,".pval",sep="");
Gene = rownames(t(datExprC1))
kMEtableC1 = cbind(Gene,Gene,modulesC1)
for (i in 1:length(colorsC1))
kMEtableC1 = cbind(kMEtableC1, geneModuleMembershipC1[,i], MMPvalueC1[,i])
colnames(kMEtableC1)=c("PSID","Gene","Module",sort(c(colnames(geneModuleMembershipC1),
colnames(MMPvalueC1))))
write.csv(kMEtableC1,"kMEtableC1.csv",row.names=FALSE)
### Now repeat for A2, using the module assignments from A1 to determine kME values.
### First calculate MEs for A2, since we haven't done that yet
PCsM1 = moduleEigengenes(datExprM1, colors=modulesC1)
ME_M1 = PCsM1$eigengenes
geneModuleMembershipM1 = signedKME(datExprM1, ME_M1)
colnames(geneModuleMembershipM1)=paste("PC",colorsC1,".cor",sep="");
MMPvalueM1=corPvalueStudent(as.matrix(geneModuleMembershipM1),dim(t(datExprM1))[[2]]);
colnames(MMPvalueM1)=paste("PC",colorsC1,".pval",sep="");
kMEtableM1 = cbind(Gene,Gene,modulesC1)
for (i in 1:length(colorsC1))
kMEtableM1 = cbind(kMEtableM1, geneModuleMembershipM1[,i], MMPvalueM1[,i])
colnames(kMEtableM1)=colnames(kMEtableM1)
write.csv(kMEtableM1,"kMEtableM1.csv",row.names=FALSE)
### several ways to put Kme into practice #####
####The first thing we can do is plot the kME values
pdf("all_kMEtable2_vs_kMEtable1.pdf",height=8,width=8)
for (c in 1:length(colorsC1)){
verboseScatterplot(geneModuleMembershipM1[,c],geneModuleMembershipC1[,c],main=colorsC1[c],
xlab="kME in M1",ylab="kME in C1")
}
dev.off()
pdf("inModule_kMEtable2_vs_kMEtable1.pdf",height=8,width=8)
###plots for screening ###
plot(geneModuleMembershipM3[,1],geneModuleMembershipC3[,1],main= "diff kME for screening"???
col = colorsC3[1], xlim = c(-1,1), ylim = c(-1,1),
xlab="kME in M3",ylab="kME in C3")
for (c in 2:length(colorsC3)){
points(geneModuleMembershipM3[,c],geneModuleMembershipC3[,c],col = colorsC3[c],
xlab="kME in M3",ylab="kME in C3")
}
for (c in 1:length(colorsC1)){
inMod = modulesC1== colorsC1[c]
verboseScatterplot(geneModuleMembershipM1[inMod,c],geneModuleMembershipC1[inMod,c],main=colorsC1[c],
col = colorsC1[c], add = T,
xlab="kME in M1",ylab="kME in C1")
}
dev.off()
#save.image("tutorial.RData") #(optional line of code)
#(Similar
Gene_C1 = colnames(datExprC1)
###The second thing we can do is determine which genes are hubs in both networks####
### These genes represent the top 10 genes per module based on kME in both networks.
topGenesKME = NULL
for (c in 1:length(colorsC1)){
kMErank1 = rank(-geneModuleMembershipC1[,c])
kMErank2 = rank(-geneModuleMembershipM1[,c])
maxKMErank = rank(apply(cbind(kMErank1,kMErank2+.00001),1,max))
topGenesKME = cbind(topGenesKME,Gene[maxKMErank<=10])
}; colnames(topGenesKME) = colorsC1
topGenesKME
### These genes represent the top 10 genes per module based on kME in both networks.
mostkME_3 = NULL
for (c in 1:length(colorsC3)){
kMErank1 = rank(-geneModuleMembershipC3[,c])
kMErank2 = rank(-geneModuleMembershipM3[,c])
dif = kMErank2-kMErank1
mostkME_3 = cbind(mostkME_3,dif)
# maxKMErank = rank(apply(cbind(kMErank1,kMErank2+.00001),1,max))
# topGenesKME = cbind(topGenesKME,Gene[maxKMErank<=10])
};
change = cbind (Gene_C3, mostkME_3); colnames(change) = c("id",colorsC2)
change = data.frame( ID = change[,1], Gene_name = networkName[c(350:418),2], change[,c(2:3)] )
##example sorting genes in weighted##
order(abs(as.numeric(change[,3])))
##### sort data#####
rank_change = NULL
for (c in c(3:4)){
changefinal = change[order(abs(as.numeric(change[,c])), decreasing = T),c(1:2,c)]
write.csv(changefinal,paste("changefinal2_",c,sep = "_"))
}
#####Third thing is find top genes in each module and compare their changes across condition ###
### i.e. top 10 in C3 change to what in M3
topGenesKMEC1_each = NULL
for( b in 1:length(colorsC1)){
topGenesKMEC1 = NULL
geneModuleMembershipC1 = geneModuleMembershipC1[order(geneModuleMembershipC1[,b],decreasing = T),]
geneModuleMembershipM1 = geneModuleMembershipM1[order(geneModuleMembershipM1[,b],decreasing = T),]
TOP1 = data.frame(geneModuleMembershipC1[c(1:10),b],
c(1:10),
geneModuleMembershipM1[rownames(geneModuleMembershipC1)[c(1:10)],b],
match(rownames(geneModuleMembershipC1)[c(1:10)],rownames(geneModuleMembershipM1)),
row.names = rownames(geneModuleMembershipC1)[c(1:10)])
TOP2 = data.frame(geneModuleMembershipC1[c((length(Gene_C1)-9):length(Gene_C1)),b],
c((length(Gene_C1)-9):length(Gene_C1)),
geneModuleMembershipM1[rownames(geneModuleMembershipC1)[c((length(Gene_C1)-9):length(Gene_C1))],b],
match(rownames(geneModuleMembershipC1)[c((length(Gene_C1)-9):length(Gene_C1))],rownames(geneModuleMembershipM1)),
row.names = rownames(geneModuleMembershipC1)[c(1:10)])
names(TOP1) = names(TOP2)= c(paste(colorsC1[b],"C3"),"inmd_rank1",paste(colorsC1[b],"M3"),"inmd_rank2")
topGenesKMEC1 = rbind(TOP1,TOP2)
topGenesKMEC1_each = list(topGenesKMEC1_each,topGenesKMEC1)
}
topGenesKMEC1_each
##### fourth thing we can do is find genes with relatively high change #####
### question : rank or value ???
class(geneModuleMembershipC1)
head(row.names(geneModuleMembershipM1))
#####
ME_change_C1 = data.frame( geneModuleMembershipC1-geneModuleMembershipM1, row.names = row.names(geneModuleMembershipM1))
|
6aeed56027e3fda428c1ba9af3d9de220c2b2621
|
22de79245ec10fefc510ae021d39bb4b0e6bfd80
|
/code/analysis/lda.R
|
9adf5befc66677fe4b46b3bfd49e6f82cd0045d1
|
[] |
no_license
|
alex-saez/NYT-project
|
9200a176e6b224344ca5f05b7719f8e4c5f53eec
|
906f5fae110ecbfe43a2239ad8393a08bcdd3e09
|
refs/heads/master
| 2020-04-05T21:06:48.219521
| 2019-01-27T16:12:24
| 2019-01-27T16:12:24
| 68,248,218
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,574
|
r
|
lda.R
|
library(topicmodels)
setwd("/Users/Alex/Dropbox/NYT project/")
load("./data/dtm.RData")
date()
topicmodel = LDA(dtm, k=50, method = "Gibbs")
date()
terms = terms(topicmodel, 100)
terms = as.data.frame(terms)
write.csv(terms,"./data/topic_models/topic_terms_dtm2_100_Gibbs_burn1000.csv", row.names=FALSE)
# use topic model to estimate the topics of new data:------------------------------------
dict = dtm$dimnames$Terms # get dictionary
doc = VCorpus(VectorSource(data$content[seq(16446,16843,by=2)])) # new data
x = DocumentTermMatrix(doc, control = list(dictionary = dict)) # dtm using training dictionary
a = posterior(topicmodel, newdata = x)
plot(a$topics[1,])
order(a$topics[1,], decreasing=T)
p_G50 = perplexity(topicmodel_Gibbs50, newdata = x)
p_V50 = perplexity(topicmodel_VEM50, newdata = x)
p_G75 = perplexity(topicmodel_Gibbs75, newdata = x)
p_V75 = perplexity(topicmodel_VEM75, newdata = x)
p_G100 = perplexity(topicmodel_Gibbs100, newdata = x)
p_V100 = perplexity(topicmodel_VEM100, newdata = x)
p_G125 = perplexity(topicmodel_Gibbs125, newdata = x)
p_V125 = perplexity(topicmodel_VEM125, newdata = x)
p_G150 = perplexity(topicmodel_Gibbs150, newdata = x)
p_V150 = perplexity(topicmodel_VEM150, newdata = x)
p_G200 = perplexity(topicmodel_Gibbs200, newdata = x)
p_V200 = perplexity(topicmodel_VEM200, newdata = x)
peplexities = matrix(c(p_G50,p_V50,p_G75,p_V75,p_G100,p_V100,p_G125,p_V125,p_G150,p_V150,p_G200,p_V200),ncol=2,byrow=T)
matplot(c(50,75,100,125,150,200), peplexities, type="b",pch='o',lty=1, xlab='Number of topics', ylab = 'Perplexity')
|
530bdb7d988d6e5403e05d1921f518f7326dbdb8
|
a2156e6293cc09bce5cd85d59dc221132fc2c344
|
/R/LogLik.phGH.R
|
54ecfc500db0caf78dc490f0245fd6a7c5f4429e
|
[] |
no_license
|
drizopoulos/JM
|
5ff1c6f14fb5fd1dbe582ac4fa5375c3bc3e7182
|
da53677e5b7b823a75c8f5f3a30738a84778f096
|
refs/heads/master
| 2022-08-17T07:44:41.133947
| 2022-08-08T13:00:20
| 2022-08-08T13:00:20
| 43,431,362
| 33
| 6
| null | 2018-02-12T19:14:27
| 2015-09-30T12:23:35
|
R
|
UTF-8
|
R
| false
| false
| 1,592
|
r
|
LogLik.phGH.R
|
LogLik.phGH <-
function (thetas, lambda0) {
betas <- thetas[1:ncx]
sigma <- exp(thetas[ncx + 1])
gammas <- thetas[seq(ncx + 2, ncx + 1 + ncww)]
alpha <- thetas[ncx + ncww + 2]
D <- thetas[seq(ncx + ncww + 3, length(thetas))]
D <- if (diag.D) exp(D) else chol.transf(D)
# linear predictors
eta.yx <- as.vector(X %*% betas)
eta.yxT <- as.vector(Xtime %*% betas)
eta.yxT2 <- as.vector(Xtime2 %*% betas)
Y <- eta.yxT + Ztime.b
Y2 <- eta.yxT2 + Ztime2.b
eta.tw <- if (!is.null(WW)) as.vector(WW %*% gammas) else rep(0, n)
eta.t <- eta.tw + alpha * Y
eta.s <- alpha * Y2
exp.eta.s <- exp(eta.s)
mu.y <- eta.yx + Ztb
logNorm <- dnorm(y, mu.y, sigma, TRUE)
log.p.yb <- rowsum(logNorm, id); dimnames(log.p.yb) <- NULL
log.lambda0T <- log(lambda0[ind.T0])
log.lambda0T[is.na(log.lambda0T)] <- 0
log.hazard <- log.lambda0T + eta.t
S <- matrix(0, n, k)
S[unq.indT, ] <- rowsum(lambda0[ind.L1] * exp.eta.s, indT, reorder = FALSE)
log.survival <- - exp(eta.tw) * S
log.p.tb <- d * log.hazard + log.survival
log.p.b <- if (control$typeGH == "simple") {
rep(dmvnorm(b, rep(0, ncz), D, TRUE), each = n)
} else {
matrix(dmvnorm(do.call(rbind, lis.b), rep(0, ncz), D, TRUE), n, k, byrow = TRUE)
}
p.ytb <- exp(log.p.yb + log.p.tb + log.p.b)
if (control$typeGH != "simple")
p.ytb <- p.ytb * VCdets
dimnames(p.ytb) <- NULL
p.yt <- c(p.ytb %*% wGH)
p.byt <- p.ytb / p.yt
log.p.yt <- log(p.yt)
- sum(log.p.yt[is.finite(log.p.yt)], na.rm = TRUE)
}
|
7dbdb426655ea54a51669ccd56d7ab759fb6a169
|
b97ac4bc1929654b92a4c1dbadc2bbe821cfef2a
|
/gerFc2Yknn.R
|
10cabec5ddf0c8525cf32f3bc495327face8cb11
|
[] |
no_license
|
tambonbon/German-NPFDA
|
a97c5f4bf857fa9cee64bcc1df24bbd79eb9e693
|
5e3c358be8b5a7f3e1ca4c78d6774800a95b7023
|
refs/heads/master
| 2020-06-13T16:49:42.015821
| 2019-07-01T17:57:26
| 2019-07-01T17:57:26
| 194,718,382
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,576
|
r
|
gerFc2Yknn.R
|
s2 = 0
gerFc2y.past.learn = germanyFc2y[1:35,]
gerFc2y.past.testing = germanyFc2y[36:41,]
yearEndTest = germanyFc2y[41,]
tGer2y = t(germanyFc2y)
#for(h in 37:46){
for(s in 1:12){
gerFc2y.futur.s = #pred.reg2y
germanyFc2y[39:40,s]
result.pred2y.step.s = funopare.knn(gerFc2y.futur.s,gerFc2y.past.learn,gerFc2y.past.testing,
4,kind.of.kernel="quadratic",semimetric="pca", q = mu)
#pred.reg2y[h,s] = matrix()[h,result.pred2y.step.s$Predicted.values[s]]
s2[s] = result.pred2y.step.s$Predicted.values
}
# pred.reg2y[h,s] = matrix(c(h,s2))
#}
s2a=0
for(s in 1:12){
gerFc2y.futur.s1 = germanyFc2y[40:41,s]
result.pred2y.step.s = funopare.knn(gerFc2y.futur.s1,gerFc2y.past.learn,gerFc2y.past.testing,
4,kind.of.kernel="quadratic",semimetric="pca", q = mu)
#pred.reg2y[h,s] = matrix()[h,result.pred2y.step.s$Predicted.values[s]]
s2a[s] = cbind(result.pred2y.step.s$Predicted.values)
}
s2b=0
for(s in 1:12){
gerFc2y.futur.s2 = germanyFc2y[41:42,s]
result.pred2y.step.s = funopare.knn(gerFc2y.futur.s2,gerFc2y.past.learn,gerFc2y.past.testing,
4,kind.of.kernel="quadratic",semimetric="pca", q = mu)
#pred.reg2y[h,s] = matrix()[h,result.pred2y.step.s$Predicted.values[s]]
s2b[s] = cbind(result.pred2y.step.s$Predicted.values)
}
#}
s2c=0
for(s in 1:12){
gerFc2y.futur.s3 = germanyFc2y[42:43,s]
result.pred2y.step.s = funopare.knn(gerFc2y.futur.s3,gerFc2y.past.learn,gerFc2y.past.testing,
4,kind.of.kernel="quadratic",semimetric="pca", q = mu)
#pred.reg2y[h,s] = matrix()[h,result.pred2y.step.s$Predicted.values[s]]
s2c[s] = cbind(result.pred2y.step.s$Predicted.values)
}
s2d=0
for(s in 1:12){
gerFc2y.futur.s4 = germanyFc2y[43:44,s]
result.pred2y.step.s = funopare.knn(gerFc2y.futur.s4,gerFc2y.past.learn,gerFc2y.past.testing,
4,kind.of.kernel="quadratic",semimetric="pca", q = mu)
#pred.reg2y[h,s] = matrix()[h,result.pred2y.step.s$Predicted.values[s]]
s2d[s] = cbind(result.pred2y.step.s$Predicted.values)
}
mse2y1mknn = rbind(s2,s2a,s2b,s2c,s2d)
mean(mse2y1mknn)
msetest2y1mknn =0
dat = germanyFc2y[41:45,]
msetest2y1mknn = round(sum((mse2y1mknn - dat)^2)/60,4)
msetest2y1mknn = sqrt(msetest2y1mknn)
msetest2y1mknn
# cummse2y1m = 0
# for(i in 1:60){
# cummse2y1m = ((round(((as.vector(t(mse2y1m)) - as.vector(t(dat)))^2)/10,4)))
# cummse2y1m = cumsum(cummse2y1m)
# }
|
2d3502fe6be230012f267733848d095c8afddb88
|
e8667680eba33a0fd63adca9354a7cbead2aa2fd
|
/man/labelFigure.Rd
|
cc6eef4e10110413e4bac618f1c8e4b4b8da28ba
|
[] |
no_license
|
mkoohafkan/figr
|
5a831d6dbca923296d94b91e7a39d9effeddd4ef
|
1372e1d5b6d632a99e8694ac6c99e4880b862980
|
refs/heads/master
| 2021-01-17T16:35:03.174702
| 2015-01-17T10:47:31
| 2015-01-17T10:47:31
| 20,353,589
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 462
|
rd
|
labelFigure.Rd
|
\name{labelFigure}
\alias{labelFigure}
\title{Figure caption}
\description{Place a figure caption}
\usage{labelFigure(key, link, punc)}
\arguments{
\item{key}{identifier for the figure being referenced}
\item{link}{logical: If TRUE, relative link to figure is included}
\item{punc}{separator to use between reference number and caption
Defaults to ":"}
}
\value{The figure caption}
\author{Michael Koohafkan}
|
ab725a4c4de1ca0ead310099795d750d61413c6b
|
b2581e89c08a72208beb7cd01d1d5b3514c0501b
|
/server.R
|
148cf2f8e451050bdbcade8e4bbbbe64ef2fa7f6
|
[] |
no_license
|
djdhiraj/Shiny_R
|
f1fe9cd6d7b262667eb82118cc52592a516a7bb8
|
cef5527f3ee41cb4c8032e10171d04ea9c134b1a
|
refs/heads/master
| 2020-04-10T18:02:27.213853
| 2018-12-13T11:05:50
| 2018-12-13T11:05:50
| 161,192,162
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 187
|
r
|
server.R
|
library(UsingR)
data("mtcars")
shinyServer(
funtion(input,output){
output$newHist<-renderPlot(
{
hist(mtcars$hp,xlab = 'cild height',main = 'hist')
}
)
}
)
|
c0e42678681540d3dcce90b39689c9ac788771cc
|
1fd16e1779896a3306c11dc36b088acefa179349
|
/R/post_stratified_ITS.R
|
f367f8260ef14885ef35568dc22d5ab6cc651f4e
|
[] |
no_license
|
cran/simITS
|
d57144379ad2d68beab5426799b2791c8f16ae5f
|
9aa0fd95d9ee8957f0a62203e7bd0f40b7ba4b5b
|
refs/heads/master
| 2022-07-21T19:34:19.293734
| 2020-05-20T12:50:02
| 2020-05-20T12:50:02
| 259,620,598
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,288
|
r
|
post_stratified_ITS.R
|
##
## Post-stratified ITS estimator code
##
#' Calculate proportion of subgroups across time
#'
#' Calculate overall proportion of cases in each group that lie within a given
#' interval of time defined by t_min and t_max.
#'
#' @inheritParams aggregate_data
#' @param t_min The start month to aggregate cases over.
#' @param t_max The final month (default is last month).
#' @return Dataframe of each group along with overall average group weight in
#' the specified timespan.
#' @example examples/aggregate_data_etc.R
#' @export
calculate_group_weights = function( groupname, dat, t_min, t_max = max( dat$month ), Nname = "N" ) {
stopifnot( Nname %in% names( dat ) )
#groupname.q = quo( groupname )
# select target months to calibrate averages on
dat = dplyr::filter( dat, month >= t_min, month <= t_max )
# calculate the total sizes for each group
sdat = dat %>% dplyr::group_by_at( groupname ) %>%
dplyr::summarise( N = sum(!!rlang::sym(Nname)) )
sdat = sdat %>% dplyr::ungroup() %>% dplyr::mutate( pi_star = N / sum(N) )
sdat
}
#' Aggregate grouped data
#'
#' This will take a dataframe with each row being the outcomes, etc., for a
#' given group for a given month and aggregate those groups for each month.
#'
#' @param dat Dataframe with one row for each time point and group that we are
#' going to post stratify on. This dataframe should also have an column with
#' passed name "Nname" indicating the number of cases that make up each given
#' row. It should have a 'month' column for the time.
#' @param outcomename String name of the outcome variable in dat.
#' @param groupname Name of the column that has the grouping categorical
#' variable
#' @param Nname Name of variable holding the counts (weight) in each group.
#' @param rich If TRUE, add a bunch of extra columns with proportions of the
#' month that are each group and so forth.
#' @param is_count If TRUE the data are counts, and should be aggregated by sum
#' rather than by mean.
#' @param covariates group-invariant covariates to preserve in the augmented
#' rich dataframe. These are not used in this method for any calculations.
#' Pass as list of column names of dat
#' @return Dataframe of aggregated data, one row per month. If rich=TRUE many
#' extra columns with further information.
#' @example examples/aggregate_data_etc.R
#' @export
aggregate_data = function( dat, outcomename, groupname, Nname,
is_count=FALSE,
rich = TRUE, covariates = NULL ) {
if ( is.null( covariates ) ) {
covariates = c()
} else {
if ( !all( covariates %in% names( dat ) ) ) {
stop( "Covariates listed that are not in dataframe" )
}
}
if ( is_count ) {
dd <- dat %>% dplyr::group_by( month ) %>%
dplyr::summarise( .Y = sum( (!!rlang::sym(outcomename)) ),
# .Y.bar = sum( (!!rlang::sym(outcomename)) ) / sum(N),
N = sum( (!!rlang::sym(Nname)) ) )
dd[ outcomename ] = dd$.Y
# dd[ paste0( outcomename, ".bar" ) ] = dd$.Y.bar
dd$.Y = dd$.Y.bar = NULL
} else {
dd <- dat %>% dplyr::group_by( month ) %>%
dplyr::summarise( .Y = sum( (!!rlang::sym(Nname)) * (!!rlang::sym(outcomename)) ) / sum( (!!rlang::sym(Nname)) ),
N = sum( (!!rlang::sym(Nname)) ) )
dd[ outcomename ] = dd$.Y
dd$.Y = NULL
}
if ( rich ) {
# calculate group sizes
ddwts = dat %>% dplyr::select( "month", groupname, Nname ) %>%
dplyr::rename( N = {{Nname}} ) %>%
dplyr::group_by( month ) %>%
dplyr::mutate( pi = N / sum( N ) ) %>%
dplyr::select( -N ) %>%
tidyr::pivot_wider( month,
names_from = groupname, values_from = "pi",
names_prefix = "pi_" )
# throw in group baselines and covariates as well in wide form
ddg = dat[ c( "month", groupname, outcomename, covariates ) ]
ddg = tidyr::spread_( ddg, groupname, outcomename, sep="_" )
names(ddg) = gsub( groupname, outcomename, names(ddg) )
stopifnot(nrow(ddg) == nrow( dd ) ) # possibly covariates varied in spread?
ddg$month = ddwts$month = NULL
dd = dplyr::bind_cols( dd, ddg, ddwts )
}
dd
}
#' Adjust an outcome time series based on the group weights.
#'
#' Reweight the components of a series to match target weights for several
#' categories. This is a good preprocessing step to adjust for time-varying
#' covariates such as changing mix of case types.
#'
#' @param outcomename Name of column that has the outcome to calculated adjusted
#' values for.
#' @param groupname Name of categorical covariate that determines the groups.
#' @param Nname Name of column in dat that contains total cases (this is the
#' name of the variable used to generate the weights in pi_star).
#' @param include_aggregate Include aggregated (unadjusted) totals in the output
#' as well.
#' @param dat Dataframe of data. Requires an N column of total cases
#' represented in each row.
#' @param pi_star The target weights. Each month will have its groups
#' re-weighted to match these target weights.
#' @param is_count Indicator of whether outcome is count data or a continuous
#' measure (this impacts how aggregation is done).
#' @param covariates Covariates to be passed to aggregation (list of string
#' variable names).
#' @return Dataframe of adjusted data.
#' @example examples/aggregate_data_etc.R
#' @export
adjust_data = function( dat, outcomename, groupname, Nname, pi_star, is_count=FALSE,
include_aggregate = FALSE,
covariates = NULL ) {
# add the target subgroup weights to the dataframe
adat = merge( dat, pi_star[ c( groupname, "pi_star" ) ], by=groupname, all.x = TRUE )
if ( is_count ) {
adat[outcomename] = adat[[outcomename]] / adat[[Nname]]
}
# calculate adjusted outcomes
adj.dat = adat %>% dplyr::group_by( month ) %>%
dplyr::summarise( #.Y = sum( N * ( !!rlang::sym( outcomename ) ) / sum(N) ),
.Y.adj = sum( pi_star * !!rlang::sym( outcomename ) ),
N = sum(!!rlang::sym(Nname) ) )
if ( is_count ) {
adj.dat = dplyr::mutate( adj.dat, #.Y = .Y * N,
.Y.adj = .Y.adj * N )
}
oname = paste0( outcomename, ".adj" )
adj.dat[ oname ] = adj.dat$.Y.adj
# adj.dat[ outcomename ] = adj.dat$.Y
adj.dat$.Y.adj = adj.dat$.Y = NULL
if ( include_aggregate ) {
sdat = aggregate_data( dat,
outcomename=outcomename, groupname=groupname, Nname=Nname,
is_count=is_count, covariates = covariates )
adj.dat = merge( adj.dat, sdat, by=c("N","month"), all=TRUE )
}
dplyr::arrange( adj.dat, month )
}
####### For simulation studies and illustration #######
#' A fake DGP with time varying categorical covariate for illustrating the code.
#'
#' This code makes synthetic grouped data that can be used to illustrate
#' benefits of post stratification.
#'
#' @param t_min Index of first month
#' @param t_max Index of last month
#' @param t0 last pre-policy timepoint
#' @param method Type of post-stratification structure to generate (three designs of 'complex', 'linear' and 'jersey' were originally concieved of when designing simulation studies with different types of structure).
#' @return Dataframe of fake data, with one row per group per time period.
#' @examples
#' fdat = generate_fake_grouped_data(t_min=-5,t_max=10, t0 = 0)
#' table( fdat$month )
#' table( fdat$type )
#' @export
generate_fake_grouped_data = function( t_min, t0, t_max, method=c("complex","linear","jersey") ) {
stopifnot( t_min < t0 )
stopifnot( t_max > t0 )
t = t_min:t_max
method = match.arg(method)
# number of cases of each type (not impacted by policy)
# Drug is steadily declining. violent is slowly increasing.
N.drug = round( (200-800)*(t - t_min)/(t_max-t_min) + 800 )
N.violent = round( (300-100)*(t - t_min)/(t_max-t_min) + 100 )
if ( method == "complex" ) {
# Add a seasonality component
N.violent = N.violent + 55 * sin( 2 * pi * t / 12)
}
if ( method == "jersey" ) {
N.drug = stats::rpois( length( t ), lambda=700 )
N.violent = stats::rpois( length( t ), lambda=400 )
N.property = stats::rpois( length( t ), lambda=500 )
N.drug = pmax( 0.55, pmin( 1, 1 - (t - t0) / 25 ) ) * N.drug
}
if ( method=="linear" || method == "complex") {
# impact on proportion of cases with outcome
prop.base = arm::logit( seq( 0.8, 0.4, length.out=length(t) ) )
prop.violent = arm::invlogit( prop.base - 1.5 + stats::rnorm( length(t), mean=0, sd=0.05 )
+ (t>t0) * pmin( 0.3*(t-t0), 1.5 ) )
prop.drug = arm::invlogit( prop.base + stats::rnorm( length(t), mean=0, sd=0.05 )
- (t>t0) * (0.05*(t-t0)) )
} else {
# impact on proportion of cases with outcome
prop.base = arm::logit( seq( 0.5, 0.55, length.out=length(t) ) )
prop.violent = arm::invlogit( prop.base + 1.5 + stats::rnorm( length(t), mean=0, sd=0.02 )
- (t>t0) * (0.01*(t-t0)) )
prop.property = arm::invlogit( prop.base + 1 + stats::rnorm( length(t), mean=0, sd=0.02 )
- (t>t0) * (0.003*(t-t0)) )
prop.drug = arm::invlogit( prop.base + stats::rnorm( length(t), mean=0, sd=0.02 )
- (t>t0) * (0.005*(t-t0)) )
}
## Scenario 1b: multifacet, complex.
# if ( FALSE ) {
# # number of cases of each type (not impacted by policy)
# N.drug = round( 300 - 5 * t + 2 * sin( 2 * pi * t / 12) )
# N.violent = 30 + round( 100 - 0.1 * t + 10 * sin( 2 * pi * t / 12) )
#
# # impact on proportion of cases with outcome
# prop.drug = 0.6 - 0.01 * t # baseline index (will recalculate below)
# prop.violent = arm::invlogit( prop.drug/2 + stats::rnorm( length(t), mean=0, sd=0.15 )
# + (t>t0) * pmin( 0.3*(t-t0), 1.5 ) )
# prop.drug = arm::invlogit( -1 + prop.drug - (t>t0)* (0.15*(t-t0)) + stats::rnorm( length(t), mean=0, sd=0.15 ) )
# }
#
# ## Scenario 2: change in number of drug cases, but no impact on case handling within category
# ## Nonsensical, I think.
# if ( FALSE ) {
# N.drug = round( 100 - 0.5 * t - (t >= t0) * ( 10 + (t-t0) * 2 ) )
# N.violent = round( 100 - 0.1 * t + 10 * sin( 2 * pi * t / 12) )
#
# prop.drug = 0.6 - 0.01 * t
# prop.violent = arm::invlogit( prop.drug + 0.2 + stats::rnorm( length(t), mean=0, sd=0.15 ) )
# prop.drug = arm::invlogit( -2 + prop.drug + stats::rnorm( length(t), mean=0, sd=0.15 ) )
# }
# bundle our subgroups
make.frame = function( N, prop, type="unknown" ) {
Y = round( N * prop )
data.frame( month = t, type=type, N=N, Y=Y, prop = Y / N, stringsAsFactors = FALSE )
}
df = dplyr::bind_rows( make.frame( N.drug, prop.drug, "drug" ),
make.frame( N.violent, prop.violent, "violent" ) )
if ( method =="jersey" ) {
df = dplyr::bind_rows( df,
make.frame( N.property, prop.property, "property" ) )
}
df = mutate( df,
M = 1 + (month %% 12),
M.ind = as.factor(M),
A = sin( 2 * pi * month / 12 ),
B = cos( 2 * pi * month / 12 ),
Tx = as.numeric(month >= t0) )
df = dplyr::arrange( df, month )
df
}
#### Exploring and testing our fake data structure ####
if ( FALSE ) {
# fake, illustration data -- specifying the range of months
t_min = -12*6.5
t0 = 0
t_max = 18
dat = generate_fake_grouped_data( t_min, t0, t_max, method = "jersey" )
head( dat )
ss = aggregate_data( dat, "prop", "type", rich=TRUE )
head( ss )
plot( ss$pi_drug )
sdat = aggregate_data( dat, "prop", "type", is_count=FALSE, rich = FALSE )
sdat2 = aggregate_data( dat, "Y", "type", is_count=TRUE, rich= FALSE )
sdat = merge( sdat, sdat2, by=c("month","N") )
head( sdat )
sdat$type = "all"
d2 = dplyr::bind_rows( dat, sdat )
d2 = tidyr::gather( d2, Y, N, prop, key="variable", value="outcome" )
ggplot2::ggplot( d2, ggplot2::aes( month, outcome, col=type ) ) +
ggplot2::facet_wrap( ~ variable , scales = "free_y" ) +
ggplot2::geom_line() +
ggplot2::geom_vline( xintercept=t0, col="red" )
dat %>% dplyr::group_by( type ) %>% dplyr::summarise( N.bar = mean(N),
Y.bar = mean(Y),
prop.bar = mean(prop) )
}
#### Examining aggregation functions ####
if ( FALSE ) {
head( dat )
# Calculate how to weight the groups
pis = calculate_group_weights( "type", dat, t0, max(dat$month) )
pis
# looking at rates
head( dat )
sdat = aggregate_data( dat, "prop", "type", is_count=FALSE )
adjdat = adjust_data( dat, "prop", "type", pis )
head( adjdat )
adjdat = merge( adjdat, sdat, by=c("N","month"), all=TRUE )
head( adjdat )
d1 = gather( adjdat, starts_with( "pi" ), key="group", value="pi" )
head( d1 )
ggplot2::ggplot( d1, ggplot2::aes( month, pi, col=group ) ) +
ggplot2::geom_line() +
ggplot2::labs( title="Sizes of the groups")
d2 = tidyr::gather( adjdat, starts_with( "prop" ), key="outcome", value="Y" )
head( d2 )
ggplot2::ggplot( d2, ggplot2::aes( d2$month, d2$Y, col=outcome ) ) +
ggplot2::geom_line()
# checking calculations
head( adjdat )
# Looking at counts
sdat = aggregate_data( dat, "Y", "type", is_count=TRUE )
head( sdat )
adjdat = adjust_data( dat, "Y", "type", pis, is_count = TRUE )
head( adjdat )
d2 = tidyr::gather( adjdat, Y.adj, Y, starts_with( "type." ), key="outcome", value="Y" )
head( d2 )
ggplot2::ggplot( d2, ggplot2::aes( d2$month, d2$Y, col=outcome ) ) +
ggplot2::geom_line()
}
#### Illustration of the easy modeling approach ####
if ( FALSE ) {
# fake, illustration data -- specifying the range of months
t_min = -12*6.5
t0 = 0
t_max = 18
dat = generate_fake_grouped_data( t_min, t0, t_max )
head( dat )
pis = calculate_group_weights( "type", dat, t0, max(dat$month) )
pis
##
## The proportion as outcome
##
adjdat = adjust_data( dat, "prop", "type", pis, include_aggregate=TRUE )
head( adjdat )
adjdat = add_lagged_covariates(adjdat, "prop.adj", c("A","B") )
head( adjdat )
# Modeling adjusted and not
envelope.adj = process_outcome_model( "prop.adj", adjdat, t0=t0, R = 100, summarize = TRUE, smooth=FALSE )
envelope = process_outcome_model( "prop", adjdat, t0=t0, R = 100, summarize = TRUE, smooth=FALSE )
envelope.drug = process_outcome_model( "prop.drug", adjdat, t0=t0, R = 100, summarize = TRUE, smooth=FALSE )
envelope.violent = process_outcome_model( "prop.violent", adjdat, t0=t0, R = 100, summarize = TRUE, smooth=FALSE )
env = dplyr::bind_rows( raw=envelope, adjusted=envelope.adj, drug=envelope.drug, violent=envelope.violent, .id="model")
head( env )
plt <- ggplot2::ggplot( env, ggplot2::aes( month, col=model ) ) +
ggplot2::geom_line( ggplot2::aes(y= env$Ystar), lty=2 ) +
ggplot2::geom_line( ggplot2::aes(y=env$Y)) + ggplot2::geom_point( ggplot2::aes( y=env$Y ), size=0.5 ) +
#geom_line( aes(y=Ysmooth1), lty=2 ) +
ggplot2::geom_vline( xintercept=t0 )
#plt
plt + facet_wrap( ~model )
##
## And with Y (counts)
##
adjdat = adjust_data( dat, "Y", "type", pis, include_aggregate=TRUE, is_count = TRUE )
head( adjdat )
qplot( Y, Y.adj, data=adjdat )
adjdat = add_lagged_covariates(adjdat, "Y.adj", c("A","B") )
head( adjdat )
# Modeling adjusted and not
envelope.adj = process_outcome_model( "Y.adj", adjdat, t0=t0, R = 100, summarize = TRUE, smooth=FALSE )
envelope = process_outcome_model( "Y", adjdat, t0=t0, R = 100, summarize = TRUE, smooth=FALSE )
envelope.drug = process_outcome_model( "Y.drug", adjdat, t0=t0, R = 100, summarize = TRUE, smooth=FALSE )
envelope.violent = process_outcome_model( "Y.violent", adjdat, t0=t0, R = 100, summarize = TRUE, smooth=FALSE )
env = dplyr::bind_rows( raw=envelope, adjusted=envelope.adj, drug=envelope.drug, violent=envelope.violent, .id="model")
head( env )
plt <- ggplot2::ggplot( env, ggplot2::aes( month, col=model ) ) +
ggplot2::geom_line( aes(y= env$Ystar), lty=2 ) +
ggplot2::geom_line( aes(y= env$Y)) + ggplot2::geom_point( aes( y=env$Y ), size=0.5 ) +
# ggplot2::geom_line( aes(y=Ysmooth1), lty=2 ) +
ggplot2::geom_vline( xintercept=t0 )
#plt
plt + ggplot2::facet_wrap( ~model )
}
|
8f76b992fe73712f4c2f82421374048a54930b9e
|
3efe9a7e267d864bc03f560a5475d0a93020b6b5
|
/Tasks Collaboration/Nadiia/EDA RQ1.R
|
1c18975c9f2021f1b66244f0503b034024ae32a6
|
[
"MIT"
] |
permissive
|
dgquintero/Data-Science-with-R
|
2230a678ccdd11815a1fb8b6af33869b8f05ccbc
|
658a1889fe6865d590b02c13bc5c63ce13c4c21a
|
refs/heads/master
| 2020-05-16T14:31:19.122168
| 2019-01-15T16:49:30
| 2019-01-15T16:49:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,148
|
r
|
EDA RQ1.R
|
library(dplyr)
library(tidyverse)
library(ggplot2)
library(modelr)
library(gridExtra)
#Splitting the data
get_slice_for_shop <- function(col1, col2){
shop_slice <- shop_ordered_slice3_ext[,col1:col2]
colnames(shop_slice) <- c("distance","price")
return(shop_slice)
}
shop_1_data <- get_slice_for_shop(1,2)
shop_2_data <- get_slice_for_shop(3,4)
shop_3_data <- get_slice_for_shop(5,6)
shop_4_data <- get_slice_for_shop(7,8)
shop_5_data <- get_slice_for_shop(9,10)
shop_avg_data <- get_slice_for_shop(13,14)
shop_agg_min_data <- get_slice_for_shop(11,14)
shop_agg_max_data <- get_slice_for_shop(12,14)
#Combine data to the one mutated table to show all shops at the one graph
joined_shops_data <- mutate(shop_1_data, Shop="1") %>%
union_all(mutate(shop_2_data, Shop="2")) %>%
union_all(mutate(shop_3_data, Shop="3")) %>%
union_all(mutate(shop_4_data, Shop="4")) %>%
union_all(mutate(shop_5_data, Shop="5"))
#Create base for plots
get_base_for_plot <- function(dataset, caption){
plot_base <- ggplot(data = dataset, mapping = aes(x = distance, y = price)) + ggtitle(caption)
return(plot_base)
}
#Visualisation part
colours_shema <- c("Red", "Green", "Yellow", "Pink", "Blue", "Purple", "steelblue1", "tomato1")
#Covariation
# The point geom is used to create scatterplots. The scatterplot is most useful for displaying the relationship between
# two continuous variables.
# It can be used to compare one continuous and one categorical variable, or two categorical variables
add_geom_point <- function(colorNum){
geom_p <- geom_point(colour=colours_shema[colorNum], alpha=0.3)
return(geom_p)
}
draw_cov_point_plot <- function(dataset, colorNum, caption){
cov_geom_plot <- get_base_for_plot(dataset, caption) + add_geom_point(colorNum)
return(cov_geom_plot)
}
p1_1 <- draw_cov_point_plot(shop_1_data, 1, "Shop 1")
p2_1 <- draw_cov_point_plot(shop_2_data, 2, "Shop 2")
p3_1 <- draw_cov_point_plot(shop_3_data, 3, "Shop 3")
p4_1 <- draw_cov_point_plot(shop_4_data, 4, "Shop 4")
p5_1 <- draw_cov_point_plot(shop_5_data, 5, "Shop 5")
pavg_1 <- draw_cov_point_plot(shop_avg_data, 6, "Average price with average distance")
pmin_1 <- draw_cov_point_plot(shop_agg_min_data, 7, "Average price with min distance")
pmax_1 <- draw_cov_point_plot(shop_agg_max_data, 8, "Average price with max distance")
pall_1 <- get_base_for_plot(joined_shops_data, "All shops") + geom_point(mapping = aes(colour = Shop), alpha=0.3)
comb_cov_shops <- grid.arrange(p1_1, p2_1, p3_1, p4_1, p5_1,
nrow=2, ncol=3,
top="Covariation between distances and average prices")
comb_cov_aggrs <- grid.arrange(pmin_1, pmax_1,
nrow=2,
top= "Covariation between min/max distances and average prices")
comb_cov_avg <- grid.arrange(pall_1, pavg_1,
nrow=2,
top= "Covariation between distances and average prices (aggregetions)")
comb_cov_shops
comb_cov_aggrs
comb_cov_avg
# Missing values
# Covers the situation when average price is 0
draw_missing_values_plot <- function(dataset, colorNum, caption){
dataset_with_na <- dataset %>%
mutate(price = ifelse(price == 0, NA, price))%>%
mutate(missed = is.na(price))
missing_values_plot <- get_base_for_plot(dataset_with_na, caption) +
add_geom_point(colorNum)
return(missing_values_plot)
}
p1_2 <- draw_missing_values_plot(shop_1_data, 1, "Shop 1")
p2_2 <- draw_missing_values_plot(shop_2_data, 2, "Shop 2")
p3_2 <- draw_missing_values_plot(shop_3_data, 3, "Shop 3")
p4_2 <- draw_missing_values_plot(shop_4_data, 4, "Shop 4")
p5_2 <- draw_missing_values_plot(shop_5_data, 5, "Shop 5")
pavg_2 <- draw_missing_values_plot(shop_avg_data, 6, "Average price with average distance")
comb_missing_vals <- grid.arrange(p1_2, p2_2, p3_2, p4_2, p5_2, pavg_2,
nrow=2, ncol=3,
top="Covariation between distances and average prices without missing values")
comb_missing_vals
# Visualizing distribution
# It’s common to want to explore the distribution of a continuous variable broken down by a categorical variable.
# It’s much easier to understand overlapping lines than bars.
pavg_3 <- ggplot(data = shop_avg_data, mapping = aes(x = price, y = ..density..)) +
geom_freqpoly(colour=colours_shema[6], binwidth = 500) + ggtitle("Average price distribution")
pall_2 <- ggplot(data = joined_shops_data, mapping = aes(x = price, y = ..density..)) +
geom_freqpoly(mapping = aes(colour = Shop), binwidth = 500) + ggtitle("Common average price distribution")
comb_vis_distibution <- grid.arrange(pavg_3, pall_2,
nrow=2, ncol=1,
top="Distribution of an average price")
comb_vis_distibution
#Pattern visualisation
# It’s possible to use a model to remove the very strong relationship between price and distance
# so we can explore the subtleties that remain. The following code fits a model that predicts price from dependencies and then computes the residuals (the difference between the predicted value and the actual value).
# The residuals give us a view of the price, once the effect of distance has been removed
joined_shops_without_null <- filter(joined_shops_data, price != 0)
mod <- lm(log(price) ~ log(distance), data = joined_shops_without_null)
joined_shops_data2 <- joined_shops_without_null %>%
add_residuals(mod) %>%
mutate(resid = exp(resid))
pall_3 <- ggplot(data = joined_shops_data2) +
geom_point(mapping = aes(x = price, y = resid), colour=colours_shema[6], alpha=0.3) + ggtitle("Average price pattern")
pall_3
# Once you’ve removed the strong relationship between distance and price, you can see what you expect in the relationship between shop and price relative other external factors (better quality of products, products alternative, shop location and so on)
pall_4 <- ggplot(data = joined_shops_data2) +
geom_boxplot(mapping = aes(x = Shop, y = resid), color=colours_shema[6]) + ggtitle("Average price pattern")
pall_4
|
f72a9a955ca4b0bc844fbf2f09efeed5922898b3
|
799bcc71f47ce548abc0b48b3b2c320017e5d8a1
|
/Swirl/Variance_Inflation_Factors.R
|
af91a40a13441b80e3b6531377ad236e5b20b7a3
|
[] |
no_license
|
SivaguruB/Coursera-Regression-Models
|
add629b508d4bd276fb1858f6bf51d02a0f75365
|
7f9eaec79402517f03b161da1996d6695c24a0d2
|
refs/heads/master
| 2021-01-10T15:39:19.393801
| 2015-11-01T06:28:36
| 2015-11-01T06:28:36
| 43,863,009
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,803
|
r
|
Variance_Inflation_Factors.R
|
#Variance_Inflation_Factors
#SivaguruB
- Class: text
Output: "Variance Inflation Factors. (Slides for this and other Data Science courses may be found at github https://github.com/DataScienceSpecialization/courses. If you care to use them, they must be downloaded as a zip file and viewed locally. This lesson corresponds to Regression_Models/02_04_residuals_variation_diagnostics.)"
- Class: text
Output: "In modeling, our interest lies in parsimonious, interpretable representations of the data that enhance our understanding of the phenomena under study. Omitting variables results in bias in the coefficients of interest - unless their regressors are uncorrelated with the omitted ones. On the other hand, including any new variables increases (actual, not estimated) standard errors of other regressors. So we don't want to idly throw variables into the model. This lesson is about the second of these two issues, which is known as variance inflation."
- Class: text
Output: "We shall use simulations to illustrate variance inflation. The source code for these simulations is in a file named vifSims.R which I have copied into your working directory and tried to display in your source code editor. If I've failed to display it, you should open it manually."
- Class: mult_question
Output: "Find the function, makelms, at the top of vifSims.R. The final expression in makelms creates 3 linear models. The first, lm(y ~ x1), predicts y in terms of x1, the second predicts y in terms of x1 and x2, the third in terms of all three regressors. The second coefficient of each model, for instance coef(lm(y ~ x1))[2], is extracted and returned in a 3-long vector. What does this second coefficient represent?"
AnswerChoices: The coefficient of x1.; The coefficient of the intercept.; The coefficient of x2.
CorrectAnswer: The coefficient of x1.
AnswerTests: omnitest(correctVal= 'The coefficient of x1.')
Hint: "The first coefficient is that of the intercept. The rest are in the order given by the formula."
- Class: mult_question
Output: "In makelms, the simulated dependent variable, y, depends on which of the regressors?"
AnswerChoices: x1;x1 and x2;x1, x2, and x3;
CorrectAnswer: x1
AnswerTests: omnitest(correctVal= 'x1')
Hint: The dependent variable, y, is formed by the expression, y <- x1 + rnorm(length(x1), sd = .3). Which of the regressors, x1, x2, x3, appears in this expression?
- Class: mult_question
Output: "In vifSims.R, find the functions, rgp1() and rgp2(). Both functions generate 3 regressors, x1, x2, and x3. Compare the lines following the comment Point A in rgp1() with those following Point C in rgp2(). Which of the following statements about x1, x2, and x3 is true?"
AnswerChoices: x1, x2, and x3 are uncorrelated in rgp1(), but not in rgp2().;x1, x2, and x3 are correlated in rgp1(), but not in rgp1().;x1, x2, and x3 are uncorrelated in both rgp1() and rgp2().;x1, x2, and x3 are correlated in both rgp1() and rgp2().
CorrectAnswer: x1, x2, and x3 are uncorrelated in rgp1(), but not in rgp2().
AnswerTests: omnitest(correctVal= 'x1, x2, and x3 are uncorrelated in rgp1(), but not in rgp2().')
Hint: "In rgp2(), in the lines following Point C, x1 appears in the expressions which form x2 and x3. In rgp1(), in the lines following Point A, the regressors are formed by independent calls to rnorm(), which simulates independent, identically distributed samples from a normal distribution."
- Class: mult_question
Output: "In the line following Point B in rgp1(), the function maklms(x1, x2, x3) is applied 1000 times. Each time it is applied, it simulates a new dependent variable, y, and returns estimates of the coefficient of x1 for each of the 3 models, y ~ x1, y ~ x1 + x2, and y ~ x1 + x2 + x3. It thus computes 1000 estimates of the 3 coefficients, collecting the results in 3x1000 array, beta. In the next line, the expression, apply(betas, 1, var), does which of the following?"
AnswerChoices: Computes the variance of each row.;Computes the variance of each column.
CorrectAnswer: Computes the variance of each row.
AnswerTests: omnitest(correctVal= 'Computes the variance of each row.')
Hint: "We hope to illustrate the effect of extra variables on the variance of x1's coefficient. For this purpose we have 3 models, y ~ x1, y ~ x1 + x2, and y ~ x1 + x2 + x3. The three rows of beta correspond to the three models. The columns correspond to the 1000 simulated situations in which we estimate the coefficients of x1 for each of the three models. We are interested in the variance of the x1 coefficient for each of those three models."
- Class: cmd_question
Output: "The function rgp1() computes the variance in estimates of the coefficient of x1 in each of the three models, y ~ x1, y ~ x1 + x2, and y ~ x1 + x2 + x3. (The results are rounded to 5 decimal places for convenient viewing.) This simulation approximates the variance (i.e., squared standard error) of x1's coefficient in each of these three models. Recall that variance inflation is due to correlated regressors and that in rgp1() the regressors are uncorrelated. Run the simulation rgp1() now. Be patient. It takes a while."
CorrectAnswer: rgp1()
AnswerTests: omnitest(correctExpr='rgp1()')
Hint: Just enter rgp1() at the R prompt.
- Class: mult_question
Output: "The variances in each of the three models are approximately equal, as expected, since the other regressors, x2 and x3, are uncorrelated with the regressor of interest, x1. However, in rgp2(), x2 and x3 both depend on x1, so we should expect an effect. From the expressions assigning x2 and x3 which follow Point C, which is more strongly correlated with x1?"
AnswerChoices: x3;x2
CorrectAnswer: x3
AnswerTests: omnitest(correctVal= 'x3')
Hint: "In vifSims.R, look at the lines following Point C again, and note that 1/sqrt(2) in the expression for x2 is much smaller than 0.95 in the expression for x3."
- Class: cmd_question
Output: "Run rgp2() to simulate standard errors in the coefficient of x1 for cases in which x1 is correlated with the other regressors"
CorrectAnswer: rgp2()
AnswerTests: omnitest(correctExpr='rgp2()')
Hint: Just enter rgp2() at the R prompt.
- Class: text
Output: "In this case, variance inflation due to correlated regressors is clear, and is most pronounced in the third model, y ~ x1 + x2 + x3, since x3 is the regressor most strongly correlated with x1."
- Class: text
Output: "In these two simulations we had 1000 samples of estimated coefficients, hence could calculate sample variance in order to illustrate the effect. In a real case, we have only one set of coefficients and we depend on theoretical estimates. However, theoretical estimates contain an unknown constant of proportionality. We therefore depend on ratios of theoretical estimates called Variance Inflation Factors, or VIFs."
- Class: text
Output: "A variance inflation factor (VIF) is a ratio of estimated variances, the variance due to including the ith regressor, divided by that due to including a corresponding ideal regressor which is uncorrelated with the others. VIF's can be calculated directly, but the car package provides a convenient method for the purpose as we will illustrate using the Swiss data from the datasets package."
- Class: cmd_question
Output: "According to its documentation, the Swiss data set consists of a standardized fertility measure and socioeconomic indicators for each of 47 French-speaking provinces of Switzerland in about 1888 when Swiss fertility rates began to fall. Type head(swiss) or View(swiss) to examine the data."
CorrectAnswer: head(swiss)
AnswerTests: ANY_of_exprs('head(swiss)', 'View(swiss)')
Hint: Enter either head(swiss) or View(swiss) at the R prompt.
- Class: cmd_question
Output: "Fertility was thought to depend on five socioeconomic factors: the percent of males working in Agriculture, the percent of draftees receiving the highest grade on the army's Examination, the percent of draftees with Education beyond primary school, the percent of the population which was Roman Catholic, and the rate of Infant Mortality in the province. Use linear regression to model Fertility in terms of these five regressors and an intercept. Store the model in a variable named mdl."
CorrectAnswer: mdl <- lm(Fertility ~ ., swiss)
AnswerTests: creates_lm_model('mdl <- lm(Fertility ~ ., swiss)')
Hint: "Entering mdl <- lm(Fertility ~ ., swiss) is the easiest way to model Fertility as a function of all five regressors. The dot after the ~ means to include all (including an intercept.)"
- Class: cmd_question
Output: "Calculate the VIF's for each of the regressors using vif(mdl)."
CorrectAnswer: vif(mdl)
AnswerTests: omnitest('vif(mdl)')
Hint: "Just enter vif(mdl) at the R prompt."
- Class: text
Output: "These VIF's show, for each regression coefficient, the variance inflation due to including all the others. For instance, the variance in the estimated coefficient of Education is 2.774943 times what it might have been if Education were not correlated with the other regressors. Since Education and score on an Examination are likely to be correlated, we might guess that most of the variance inflation for Education is due to including Examination."
- Class: cmd_question
Output: "Make a second linear model of Fertility in which Examination is omitted, but the other four regressors are included. Store the result in a variable named mdl2."
CorrectAnswer: mdl2 <- lm(Fertility ~ . -Examination, swiss)
AnswerTests: creates_lm_model('mdl2 <- lm(Fertility ~ . -Examination, swiss)')
Hint: "Entering mdl2 <- lm(Fertility ~ . -Examination, swiss) is the easiest way to model Fertility as a function of all the regressors except Examination. The dot after ~ means all, and the minus sign in front of Examination means except."
- Class: cmd_question
Output: "Calculate the VIF's for this model using vif(mdl2)."
CorrectAnswer: vif(mdl2)
AnswerTests: omnitest(correctExpr='vif(mdl2)')
Hint: Just enter vif(mdl2) at the R prompt.
- Class: text
Output: "As expected, omitting Examination has markedly decreased the VIF for Education, from 2.774943 to 1.816361. Note that omitting Examination has had almost no effect the VIF for Infant Mortality. Chances are Examination and Infant Mortality are not strongly correlated. Now, before finishing this lesson, let's review several significant points."
- Class: mult_question
Output: "A VIF describes the increase in the variance of a coefficient due to the correlation of its regressor with the other regressors. What is the relationship of a VIF to the standard error of its coefficient?"
AnswerChoices: "VIF is the square of standard error inflation.;They are the same.;There is no relationship."
CorrectAnswer: VIF is the square of standard error inflation.
AnswerTests: omnitest(correctVal= 'VIF is the square of standard error inflation.')
Hint: "Variance is the square of standard deviation, and standard error is the standard deviation of an estimated coefficient."
- Class: mult_question
Output: "If a regressor is strongly correlated with others, hence will increase their VIF's, why shouldn't we just exclude it?"
AnswerChoices: "Excluding it might bias coefficient estimates of regressors with which it is correlated.;We should always exclude it.;We should never exclude anything."
CorrectAnswer: Excluding it might bias coefficient estimates of regressors with which it is correlated.
AnswerTests: omnitest(correctVal= 'Excluding it might bias coefficient estimates of regressors with which it is correlated.')
Hint: "Excluding a regressor can bias estimates of coefficients for correlated regressors."
- Class: mult_question
Output: "The problems of variance inflation and bias due to excluded regressors both involve correlated regressors. However there are methods, such as factor analysis or principal componenent analysis, which can convert regressors to an equivalent uncorrelated set. Why then, when modeling, should we not just use uncorrelated regressors and avoid all the trouble?"
AnswerChoices: "Using converted regressors may make interpretation difficult.; Factor analysis takes too much computation.; We should always use uncorrelated regressors."
CorrectAnswer: Using converted regressors may make interpretation difficult.
AnswerTests: omnitest(correctVal= 'Using converted regressors may make interpretation difficult.')
Hint: "In modeling, our interest lies in parsimonious, interpretable representations of the data that enhance our understanding of the phenomena under study."
- Class: text
Output: That completes the exercise in variance inflation. The issue of omitting regressors is discussed in another lesson.
|
8c032a3cfc475eedf712814b86219a28640520c5
|
e42e48b9bbfd879f794ec28c803a93b2081fa804
|
/tests/testthat/test_input_is_legal.R
|
e743a479827ab4f970744551e94800d681b9cd6f
|
[] |
no_license
|
Puriney/knnsmoother
|
62c32c01c28448b07153cc269e1b4167a9478d8a
|
0408052c758d6660f5cd4d489208ebf79179bf95
|
refs/heads/master
| 2021-09-04T10:13:40.047598
| 2018-01-09T20:39:02
| 2018-01-09T20:39:02
| 116,868,014
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 43
|
r
|
test_input_is_legal.R
|
library("testthat")
library("knnsmoother")
|
1231e5c09527dcb2c52ba6d0169c648b0138eab3
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/irt/man/get_cat_administered_items.Rd
|
9c4c258e82233e264d85583929f3da39a26c22bd
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,078
|
rd
|
get_cat_administered_items.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cat_sim_helper_functions.R
\name{get_cat_administered_items}
\alias{get_cat_administered_items}
\title{Get administered items from a CAT output}
\usage{
get_cat_administered_items(cat_sim_output)
}
\arguments{
\item{cat_sim_output}{This is a list object containing elements that are
"cat_output" class.}
}
\value{
For \code{cat_output} with only one adaptive test, an
\code{Itempool} class object will be returned. For \code{cat_output} with
more than one adaptive tests, a list of \code{Itempool} class objects will
be returned.
}
\description{
This function returns an item pool object of the
administered items using the items in estimate history. If there is one
}
\examples{
cd <- create_cat_design(ip = generate_ip(n = 30), next_item_rule = 'mfi',
termination_rule = 'max_item',
termination_par = list(max_item = 10))
cat_data <- cat_sim(true_ability = rnorm(10), cd = cd)
get_cat_administered_items(cat_data)
}
\author{
Emre Gonulates
}
|
96c3b5531eb087c69f11c32904a7a05ce5394ea7
|
b3bf7b8c56b2f3e8d8594cccce6f65981c9514e5
|
/man/plot_cross_anova.Rd
|
2b9daf9d3fac7b7f98842072ed29225a01031ac8
|
[] |
no_license
|
faustovrz/bugcount
|
055ee388bcf9049e5d01cf3ad19898220f7787a2
|
f3fbb7e9ed5cecae78fdfaa1035e2a87e072be2d
|
refs/heads/master
| 2021-03-27T15:43:12.992541
| 2018-05-04T22:17:49
| 2018-05-04T22:17:49
| 104,142,648
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 584
|
rd
|
plot_cross_anova.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_cross_anova .R
\name{plot_cross_anova}
\alias{plot_cross_anova}
\title{Plot ANOVA per cross}
\usage{
plot_cross_anova(posthoc, exp_allowed)
}
\arguments{
\item{exp_allowed}{list of vectors containing indices of experiments
to merge in order to make ANOVA analysis}
\item{postcoc}{\code{multcomp} common letter display \code{cld} object from
posthoc comparison in \code{plot_fit.nb.glm()}}
}
\value{
Nothing
}
\description{
Plot ANOVA per cross
}
\examples{
# plot_cross.anova(posthoc, exp_allowed)
}
|
fa6808f55e39c05f056735ddb0739b12551ecbdc
|
f134d9004a920a17eb1f092276b68f8721afae10
|
/machine_learning/classifier/plot_evaluation.R
|
17146953fff3b86137ff73bec9fbab7213290a63
|
[] |
no_license
|
jw44lavo/redux
|
8c848198b2d8d298e45ecefb65e6c67c42b74ff6
|
c14d4d3a8e0e6a9cd9bceb7611c09a37801476a3
|
refs/heads/main
| 2023-02-23T19:11:12.595226
| 2021-01-27T09:03:11
| 2021-01-27T09:03:11
| 315,052,059
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,683
|
r
|
plot_evaluation.R
|
library(ggplot2)
library(readr)
library(reshape2)
theme_set(
theme_bw() +
theme(legend.position = "top", legend.justification = "left")
#theme(legend.position = "none")
)
height <- 8
width <- 10
font <- 20
dpi <- 300
args = commandArgs(trailingOnly=TRUE)
data <- read_csv(args[1])
data <- transform(data, coverage = as.numeric(coverage))
data[is.na(data)] <- 1000
data$min_appearance[(data$min_appearance)=="0"] <- "0 (82 classes)"
data$min_appearance[(data$min_appearance)=="50"] <- "50 (31 classes)"
data$min_appearance[(data$min_appearance)=="100"] <- "100 (17 classes)"
data$min_appearance[(data$min_appearance)=="200"] <- "200 (8 classes)"
data <- transform(data, min_appearance=factor(min_appearance,levels=c("0 (82 classes)", "50 (31 classes)", "100 (17 classes)", "200 (8 classes)"))) # sort
plot <- ggplot(data = data, mapping = aes(x = coverage, y = accuracy)) +
geom_point(aes(color = factor(model)), size = 3) +
geom_line(aes(color = model), alpha = 0.5, size = 1) +
scale_x_continuous(trans = "log10", breaks=c(1e-02, 1e-01, 1e+00, 1e+01, 1e+02, 1e+03), labels = c("0.01", "0.1", "1.0", "10.0", "100.0", "full")) +
ylim(0.0, 1.0) +
labs(y ="Accuracy", x = "Coverage", color = "Classifier") +
theme(text = element_text(size=font), axis.text.x = element_text(angle=0)) +
guides(color = guide_legend(override.aes = list(size=3))) +
#facet_grid(cols = vars(min_appearance))
scale_color_manual(labels = c("Gradient Boosting", "Most Frequent", "Neural Network"), values = c("#F8766D", "#00BFC4", "#7CAE00")) +
facet_wrap(~min_appearance, nrow = 2, labeller = label_both)
ggsave("mlearning_evaluation.png", plot, dpi = dpi, width = width, height = height)
|
d6e5a10ac00d528f5448870141645ff8f2f86f03
|
3ffbe0efc8c9de1eed0981188a1bbb6a52506369
|
/R/igraphbook.R
|
7a2661c2b435e7a894411544a9c52bb1788af0be
|
[] |
no_license
|
2wedfrt/igraphbook
|
06b0b9855085afdf8979b621ad2654cdd8e2198a
|
30e51a774efedf32162f157708819f951f0ff0d2
|
refs/heads/master
| 2022-04-07T10:48:26.802237
| 2014-12-29T20:00:53
| 2014-12-29T20:00:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 89
|
r
|
igraphbook.R
|
#' Companion package to the igraph book
#'
#' @docType package
#' @name igraphbook
NULL
|
3bcd882603bf469ff00342bc67a725bf52d9c276
|
4b7e815c41c1cc7b9e0dca42250e41dfd95e9b1d
|
/ggThemeAssist.R
|
1fc984b068896128624365addb2f47d4f191ed9c
|
[] |
no_license
|
gonggaobushang/R-Visualization
|
93a2fa5a705aaf25498cd6b6e86c8d64a5204ad9
|
44cce4b1fef7d50c15e7d9a6458cd3c70580204f
|
refs/heads/master
| 2020-06-03T20:37:03.943939
| 2020-01-15T06:35:33
| 2020-01-15T06:35:33
| 191,722,173
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 281
|
r
|
ggThemeAssist.R
|
#用鼠标选择ggplot2的框架
#返回代码
library(ggplot2)
library(ggThemeAssist)
gg <- ggplot(mtcars, aes(x = hp, y = mpg, colour = as.factor(cyl))) + geom_point()
ggThemeAssistGadget(gg)
#右上角的Done,自动输出代码
#中文最好要在代码输出后手动改
|
9ed69074764aa95816b577d1a93617d5030a25ea
|
56f4370ece1d225c094692d9b96ee52f07493853
|
/simulation_script_3.R
|
9c0acd01d1f331c8c1f0e87031387b06c53beb50
|
[] |
no_license
|
jrebelo27/Simulation-code-of-persistence
|
dd3f90efaf33b798e1fd2d89ecead9f63f257092
|
5f6871207d8611343c0650e0d84c1fe7acf2f59d
|
refs/heads/main
| 2023-01-15T10:00:09.124776
| 2020-11-12T23:35:48
| 2020-11-12T23:35:48
| 312,245,448
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,463
|
r
|
simulation_script_3.R
|
###################################################################################
#Persistent population decay according to a power law
#Persister cells do not leave the dormant state as soon as the medium becomes detoxified
###################################################################################
#Function of the detoxification radius of each resistant bacterium
resistant_detoxification_radius <- function(generation){
return(initial_diffusion_rate*sqrt(generation))}
#Exponential decay of the non-persistent population
function_f_1 <- function(generation){
return(A_1*exp(-k*(generation)))
}
#Power-law decay of the persistent population
function_f_2 <- function(generation){
return(A_2 * generation^beta)
}
#Function that determines the percentage of susceptible bacteria in the dormant state at a certain time
function_g <- function(generation){
time <- (generation*time_generation)
if(time < tau_zero){
return(1-integrate(function_f_1, lower= 0, upper = time)$value)
}else{
return(1- integrate(function_f_1, lower= 0, upper = tau_zero)$value - integrate(function_f_2, lower= tau_zero, upper = time)$value)
}
}
#Bacteria duplicate every thirty minutes
time_generation <- 30
#Iterate over all conditions and all parameters
for (each_plasmid in c("R1")){
dir.create(paste0("~/data",each_plasmid))
for (each_density in c("L", "I", "H")){
dir.create(paste0("~/data", each_plasmid, "/", each_density))
for (tau_zero in c(20,30,50,60,70,80,90,100,110,120,130,150,200,250,300,350,400)){
for(k in c(0.015,0.020,0.025,0.030,0.040,0.045,0.050,0.055,0.060,0.065,0.070,0.075,0.080,0.090,0.095,0.1,0.2)){
for (beta in c(-1.1,-1.2,-1.5,-1.7,-1.8,-1.9,-2.0,-2.1,-2.2,-2.3,-2.4,-2.5,-2.7,-2.9,-3.1,-3.3,-3.5)){
#Calculate the values of A1 and A2 to ensure that the two functions have the same value when time is equal to tau zero
p <- exp(k*tau_zero) - 1 - (tau_zero*k/(1+beta))
A_1 <- (k*exp(k*tau_zero))/p
A_2 <- (k/(tau_zero**beta))/p
#Calculate integrals to save time
integrals <- c()
for (i in 1:31){
if(function_g(i) > 0){
integrals <- c(integrals, function_g(i))
}else{
integrals <- c(integrals, 0)
}
}
plasmid <- each_plasmid
density <- each_density
dir.create(paste0("~/data", each_plasmid, "/", each_density, "/tau_zero", as.character(tau_zero), "-k1", as.character(k1), "-k3", as.character(k3)))
setwd("~/original data")
data_excel <- read.csv(file = paste0("data_", plasmid, "_", density, "_averages.csv"), header = TRUE, sep = ",", stringsAsFactors = FALSE, dec=",")
data_excel[,1] <- as.numeric(data_excel[,1])
data_excel[,2] <- as.numeric(data_excel[,2])
data_excel[,3] <- as.numeric(data_excel[,3])
data_excel[,4] <- as.numeric(data_excel[,4])
colnames(data_excel) <- c("initial_resistant", "final_resistant", "initial_susceptible", "final_susceptible")
#Iterate to test diferent diffusion rates
for (each_initial_diffusion_rate in c(0.2, 2)){
radius_value <- 0
incrementing_value <- each_initial_diffusion_rate
for (each_row in 1:3){
number_repetitions <- 1
number_conditions <- 10
initial_diffusion_rate <- radius_value
incrementing_radius <- incrementing_value
surviving_susceptibles_generation_final_path <- c()
surviving_persistents_generation_final_path <- c()
susceptibles_asleep_generation_final_path <- c()
actual_row <- -1
for (each_condition in 1:number_conditions){
initial_diffusion_rate <- initial_diffusion_rate + incrementing_radius
for (each_repetition in 1:3){
actual_row <- actual_row + 1
setwd("~/original data")
#Import the dataframes of the distances between each susceptible cell and the closer resistant one
new_dataframe <- read.csv(file = paste0("distances_", plasmid, "_", density, "_L", each_row, "_R", each_repetition, ".csv"), header = TRUE, sep = ";", stringsAsFactors = FALSE, dec=",")
assign(paste0(plasmid, "_L", each_row, "_R", each_repetition), new_dataframe)
for (each_equal_repetition in 1:number_repetitions){
all_distances <- new_dataframe[,1]
all_distances <- as.data.frame(all_distances)
number_initial_susceptible <- round(data_excel$initial_susceptible[[each_row]])
number_generations <- round(log2(data_excel$final_resistant[[each_row]]/data_excel$initial_resistant[[each_row]]))
surviving_susceptibles_generation <- c()
surviving_persistents_generation <- c()
susceptibles_asleep_generation <- c()
#Iterate as many genarations as the ones completed by resistant cells
for (each_generation in 0:(number_generations)){
resistant_detoxification_radius_bacterium <- resistant_detoxification_radius(each_generation)
awake_susceptibles <- c()
#Calculate the number of susceptible cells that leave the dormant state in this cycle
if (nrow(all_distances) >= 1){
amount_susceptibles_alive <- round(integrals[each_generation+1]*number_initial_susceptible,0)
if(amount_susceptibles_alive < nrow(all_distances)){
amount_susceptibles_to_awake <- nrow(all_distances) - amount_susceptibles_alive
awake_susceptibles <- (1:amount_susceptibles_to_awake)
}
}
awake_susceptibles_distances <- c()
if (length(awake_susceptibles) >= 1 && nrow(all_distances)>0){
awake_susceptibles_distances <- all_distances[awake_susceptibles, 1]
all_distances <- as.data.frame(all_distances[-awake_susceptibles,])
}
surviving_susceptibles_this_generation <- 0
surviving_persistents_this_generation <- 0
#Intervals before tau zero (all susceptible bacteria are considered non-persistent)
if((each_generation*time_generation) <= (tau_zero%/%time_generation * time_generation)){
#Verify how many susceptible bacteria survive
if (length(awake_susceptibles_distances) != 0) {
surviving_susceptibles_this_generation <- sum(awake_susceptibles_distances < resistant_detoxification_radius_bacterium)
}
#Intervals after tau zero (all susceptible bacteria are considered persistent)
}else if((each_generation*time_generation) > (tau_zero%/%time_generation * time_generation + time_generation)){
#Verify how many susceptible bacteria survive
if (length(awake_susceptibles_distances) != 0) {
surviving_persistents_this_generation <- sum(awake_susceptibles_distances <= resistant_detoxification_radius_bacterium)
}
#Intervals after tau zero (susceptible bacteria can be considered persistent or non-persistent)
}else{
#Verify how many susceptible bacteria survive
if (length(awake_susceptibles_distances) != 0) {
surviving_susceptibles_this_generation_all <- sum(awake_susceptibles_distances <= resistant_detoxification_radius_bacterium)
}
#Verify how many susceptible are persistent and how many are non-persistent
total_weight <- integrate(function_f_1, lower= 0, upper = tau_zero)$value + integrate(function_f_2, lower= tau_zero, upper = (each_generation*time_generation))$value
susceptibles_percentage <- integrate(function_f_1, lower= 0, upper = tau_zero)$value/total_weight
surviving_susceptibles_this_generation <- round(surviving_susceptibles_this_generation_all*susceptibles_percentage,0)
surviving_persistents_this_generation <- surviving_susceptibles_this_generation_all - surviving_susceptibles_this_generation
}
surviving_susceptibles_generation <- c(surviving_susceptibles_generation, surviving_susceptibles_this_generation)
surviving_persistents_generation <- c(surviving_persistents_generation, surviving_persistents_this_generation)
susceptibles_asleep_generation <- c(susceptibles_asleep_generation, nrow(all_distances))
if (each_generation == number_generations && (nrow(all_distances) != 0)){
surviving_persistents_generation[length(surviving_persistents_generation)] <- (surviving_persistents_generation[length(surviving_persistents_generation)]+nrow(all_distances))
}
}
if (each_equal_repetition == 1){
surviving_susceptibles_generation_repetition_path <- surviving_susceptibles_generation
surviving_persistents_generation_repetition_path <- surviving_persistents_generation
susceptibles_asleep_generation_repetition_path <- susceptibles_asleep_generation
}else{
surviving_susceptibles_generation_repetition_path <- rbind(surviving_susceptibles_generation_repetition_path, surviving_susceptibles_generation)
surviving_persistents_generation_repetition_path <- rbind(surviving_persistents_generation_repetition_path, surviving_persistents_generation)
susceptibles_asleep_generation_repetition_path <- rbind(susceptibles_asleep_generation_repetition_path, susceptibles_asleep_generation)
}
}
if(actual_row == 0){
surviving_susceptibles_generation_provisional_path <- c()
surviving_persistents_generation_provisional_path <- c()
susceptibles_asleep_generation_provisional_path <- c()
}
if (actual_row%%10 == 0 && actual_row != 0){
surviving_susceptibles_generation_final_path <- unname(surviving_susceptibles_generation_final_path)
surviving_susceptibles_generation_provisional_path <- unname(surviving_susceptibles_generation_provisional_path)
surviving_persistents_generation_final_path <- unname(surviving_persistents_generation_final_path)
surviving_persistents_generation_provisional_path <- unname(surviving_persistents_generation_provisional_path)
susceptibles_asleep_generation_final_path <- unname(susceptibles_asleep_generation_final_path)
susceptibles_asleep_generation_provisional_path <- unname(susceptibles_asleep_generation_provisional_path)
if (actual_row > 10){
column_names <- c(1:ncol(surviving_susceptibles_generation_final_path))
colnames(surviving_susceptibles_generation_final_path) <- column_names
colnames(surviving_susceptibles_generation_provisional_path) <- column_names
colnames(surviving_persistents_generation_final_path) <- column_names
colnames(surviving_persistents_generation_provisional_path) <- column_names
colnames(susceptibles_asleep_generation_final_path) <- column_names
colnames(susceptibles_asleep_generation_provisional_path) <- column_names
}
surviving_susceptibles_generation_final_path <- rbind(surviving_susceptibles_generation_final_path, surviving_susceptibles_generation_provisional_path)
surviving_susceptibles_generation_final_path <- as.data.frame(surviving_susceptibles_generation_final_path)
surviving_susceptibles_generation_provisional_path <- c(surviving_susceptibles_generation_repetition_path)
surviving_persistents_generation_final_path <- rbind(surviving_persistents_generation_final_path, surviving_persistents_generation_provisional_path)
surviving_persistents_generation_final_path <- as.data.frame(surviving_persistents_generation_final_path)
surviving_persistents_generation_provisional_path <- c(surviving_persistents_generation_repetition_path)
susceptibles_asleep_generation_final_path <- rbind(susceptibles_asleep_generation_final_path, susceptibles_asleep_generation_provisional_path)
susceptibles_asleep_generation_final_path <- as.data.frame(susceptibles_asleep_generation_final_path)
susceptibles_asleep_generation_provisional_path <- c(susceptibles_asleep_generation_repetition_path)
}else{
surviving_susceptibles_generation_provisional_path <- rbind(surviving_susceptibles_generation_provisional_path, surviving_susceptibles_generation_repetition_path)
surviving_susceptibles_generation_provisional_path <- as.data.frame(surviving_susceptibles_generation_provisional_path)
surviving_persistents_generation_provisional_path <- rbind(surviving_persistents_generation_provisional_path, surviving_persistents_generation_repetition_path)
surviving_persistents_generation_provisional_path <- as.data.frame(surviving_persistents_generation_provisional_path)
susceptibles_asleep_generation_provisional_path <- rbind(susceptibles_asleep_generation_provisional_path, susceptibles_asleep_generation_repetition_path)
susceptibles_asleep_generation_provisional_path <- as.data.frame(susceptibles_asleep_generation_provisional_path)
}
}
}
surviving_susceptibles_generation_final_path <- as.data.frame(surviving_susceptibles_generation_final_path)
surviving_persistents_generation_final_path <- as.data.frame(surviving_persistents_generation_final_path)
susceptibles_asleep_generation_final_path <- as.data.frame(susceptibles_asleep_generation_final_path)
out_repetitions <- c(1:3)
in_repetitions <- c(1:number_repetitions)
conditions <- seq((initial_diffusion_rate - incrementing_radius*(number_conditions-1)), incrementing_radius*(number_conditions), incrementing_radius)
names <- c()
for (each_condition in conditions){
for (each_in_repetitions in in_repetitions){
for(each_out_repetitions in out_repetitions){
names <- c(names, paste0("R", each_in_repetitions , "_R", each_out_repetitions, "_radius_", each_condition))
}
}
}
column_names <- c(1:ncol(surviving_susceptibles_generation_final_path))
colnames(surviving_susceptibles_generation_final_path) <- column_names
colnames(surviving_susceptibles_generation_provisional_path) <- column_names
colnames(surviving_persistents_generation_final_path) <- column_names
colnames(surviving_persistents_generation_provisional_path) <- column_names
colnames(susceptibles_asleep_generation_final_path) <- column_names
colnames(susceptibles_asleep_generation_provisional_path) <- column_names
surviving_susceptibles_generation_final_path <- rbind(surviving_susceptibles_generation_final_path, surviving_susceptibles_generation_provisional_path)
surviving_persistents_generation_final_path <- rbind(surviving_persistents_generation_final_path, surviving_persistents_generation_provisional_path)
susceptibles_asleep_generation_final_path <- rbind(susceptibles_asleep_generation_final_path, susceptibles_asleep_generation_provisional_path)
surviving_susceptibles_generation_final_path <- cbind(names, surviving_susceptibles_generation_final_path, row.names = NULL)
surviving_persistents_generation_final_path <- cbind(names, surviving_persistents_generation_final_path, row.names = NULL)
susceptibles_asleep_generation_final_path <- cbind(names, susceptibles_asleep_generation_final_path, row.names = NULL)
column_names <- c("Condicao", paste0("G", c(1:(ncol(surviving_susceptibles_generation_final_path)-1))))
colnames(surviving_susceptibles_generation_final_path) <- column_names
colnames(surviving_persistents_generation_final_path) <- column_names
colnames(susceptibles_asleep_generation_final_path) <- column_names
rownames(surviving_susceptibles_generation_final_path) <- NULL
rownames(surviving_persistents_generation_final_path) <- NULL
rownames(susceptibles_asleep_generation_final_path) <- NULL
surviving_susceptibles_generation_final_path <- apply(surviving_susceptibles_generation_final_path, 2, as.character)
surviving_persistents_generation_final_path <- apply(surviving_persistents_generation_final_path, 2, as.character)
susceptibles_asleep_generation_final_path <- apply(susceptibles_asleep_generation_final_path, 2, as.character)
setwd(paste0("~/data", each_plasmid, "/", each_density, "/tau_zero", as.character(tau_zero), "-k", as.character(k), "-beta", as.character(beta)))
write.table(surviving_susceptibles_generation_final_path,
file = paste0("amount_surviving_susceptibles_generation_", plasmid, "_", each_density, "_tau_zero", as.character(tau_zero), "_k", as.character(k), "_beta", as.character(beta), "_radius_", incrementing_radius, "_", incrementing_radius*number_conditions, "_L", each_row, ".csv"),
sep = ",",
row.names = FALSE,
col.names = TRUE)
write.table(surviving_persistents_generation_final_path,
file = paste0("amount_surviving_persistents_generation_", plasmid, "_", each_density, "_tau_zero", as.character(tau_zero), "_k", as.character(k), "_beta", as.character(beta), "_radius_", incrementing_radius, "_", incrementing_radius*number_conditions, "_L", each_row, ".csv"),
sep = ",",
row.names = FALSE,
col.names = TRUE)
write.table(susceptibles_asleep_generation_final_path ,
file = paste0("susceptibles_asleep_generation_", plasmid, "_", each_density, "_tau_zero", as.character(tau_zero), "_k", as.character(k), "_beta", as.character(beta), "_radius_", incrementing_radius, "_", incrementing_radius*number_conditions, "_L", each_row, ".csv"),
sep = ",",
row.names = FALSE,
col.names = TRUE)
}
}
}
}
}
}
}
|
3ab9fb43173c279a2b0fe20c6f572ebf80456d31
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Ecfun/examples/compareLengths.Rd.R
|
bf1c2c68a477e71fb72ea158135126447d74adb5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,612
|
r
|
compareLengths.Rd.R
|
library(Ecfun)
### Name: compareLengths
### Title: Compare the lengths of two objects
### Aliases: compareLengths
### Keywords: manip
### ** Examples
##
## 1. equal
##
## Don't show:
stopifnot(
## End(Don't show)
all.equal(compareLengths(1:3, 4:6), c("equal", ''))
## Don't show:
)
## End(Don't show)
##
## 2. compatible
##
a <- 1:2
b <- letters[1:6]
comp.ab <- compareLengths(a, b, message0='Chk:')
comp.ba <- compareLengths(b, a, message0='Chk:')
# check
chk.ab <- c('compatible',
'Chk: length(b) = 6 is 3 times length(a) = 2')
## Don't show:
stopifnot(
## End(Don't show)
all.equal(comp.ab, chk.ab)
## Don't show:
)
## End(Don't show)
## Don't show:
stopifnot(
## End(Don't show)
all.equal(comp.ba, chk.ab)
## Don't show:
)
## End(Don't show)
##
## 3. incompatible
##
Z <- LETTERS[1:3]
comp.aZ <- compareLengths(a, Z)
# check
chk.aZ <- c('incompatible',
' length(Z) = 3 is not a multiple of length(a) = 2')
## Don't show:
stopifnot(
## End(Don't show)
all.equal(comp.aZ, chk.aZ)
## Don't show:
)
## End(Don't show)
##
## 4. problems with name.x and name.y
##
comp.ab2 <- compareLengths(a, b, '', '')
# check
chk.ab2 <- c('compatible',
'in compareLengths: length(y) = 6 is 3 times length(x) = 2')
## Don't show:
stopifnot(
## End(Don't show)
all.equal(comp.ab2, chk.ab2)
## Don't show:
)
## End(Don't show)
##
## 5. zeroLength
##
zeroLen <- compareLengths(logical(0), 1)
# check
zeroL <- c('compatible', ' length(logical(0)) = 0')
## Don't show:
stopifnot(
## End(Don't show)
all.equal(zeroLen, zeroL)
## Don't show:
)
## End(Don't show)
|
04adbc5032a27bb99d3871b6f6cce3acdf5a5ebf
|
aa78d24940b7d36d7e3a6e1a6e2241b8c20cc5ae
|
/R/作图基础/pie.R
|
225f805d819e08f60b1780b6c1823d3edad5a2f6
|
[] |
no_license
|
bancage/program
|
4b94af6663cfc537897730cbaab34f4db86a1c58
|
73b6d41d0a7570057dbe877fa82ed5767971ca78
|
refs/heads/master
| 2021-01-01T19:42:50.132850
| 2013-10-19T13:41:32
| 2013-10-19T13:41:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 381
|
r
|
pie.R
|
flowers<-c(12,8,6,20,16)
fruits<-c(7,11,17,9,5)
flowers_labels<-round(flowers/sum(flowers)*100,1)
flowers_labels<-paste(flowers_labels,"%",sep="")
#colors<-c("white","grey70","grey90","grey50","black")
colors<-rainbow(length(flowers))
pie(flowers,main="Flowers",col=colors,labels=flowers_labels,cex=0.8)
legend(1.5,0.5,c("Mon","Tue","Wed","Thu","Fri"),cex=0.8,fill=colors)
|
8b111947419509e8c207fdc0fd269bc4e5cb4fc0
|
63d8191d625544acbca97a939d093e61db88a525
|
/R/package.R
|
ed748e64bdaad14b120eecd2d9a67935214755b3
|
[] |
no_license
|
jamovi/jmvcore
|
80186e1c322024b455bfcacc2b9a4be40cf1534f
|
057c97aae936a53981c422891f75b3936a5f463b
|
refs/heads/master
| 2023-08-16T17:34:01.612300
| 2023-08-07T10:38:32
| 2023-08-14T08:14:15
| 70,128,789
| 5
| 8
| null | 2023-08-04T13:31:29
| 2016-10-06T06:33:43
|
R
|
UTF-8
|
R
| false
| false
| 20
|
r
|
package.R
|
#' @import R6
NULL
|
74341d456408bfbea7702897bc8c6d041300ca71
|
4efbe1d55ea0f650168d1323d1813ea0eaa2ca8b
|
/misc/boston/bostmoregamfit_int.r
|
b5ab43b3af7d6b862a3b251cdc4cf478d75816e4
|
[] |
no_license
|
panders225/semiparametric-regression
|
d8e7470576f7d3fd988ba7e677eaceed3216caea
|
1fd5a22f4283daf856aad61af2f4abccfa4fc324
|
refs/heads/master
| 2021-05-11T08:32:43.127850
| 2018-04-24T00:55:53
| 2018-04-24T00:55:53
| 118,055,099
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,831
|
r
|
bostmoregamfit_int.r
|
###########################################################
# Clear the workspace. I do this every time
###########################################################
rm(list = ls())
###########################################################
# Set the seed. I also do this every time
###########################################################
set.seed(4428967)
###########################################################
# Set the working directory. I probably do not need to
# do this (it will not work in Linux), but I do to keep
# my blood pressure down
###########################################################
setwd("C:\\Users\\Carroll\\Documents\\My_Documents\\2018_Semi_in_R\\Data_Sets\\Boston_Housing")
###########################################################
# Get the libraries
###########################################################
library(mgcv)
library(HRW)
###########################################################
# Load in data:
###########################################################
data(BostonMortgages)
###########################################################
# Get rid of the massive outlier in dir
###########################################################
dir = BostonMortgages$dir
BostonMortgages = BostonMortgages[dir<1.5,]
###########################################################
# Obtain GAM fit:
# I fit the credit score as a smooth term but with only k=4
###########################################################
fit2GAMBostMort <- gam(deny ~ black + s(dir) + s(lvr)
+ pbcr + self + single + s(ccs,k=4),
method="REML",
family = binomial(link="logit"),
data = BostonMortgages)
###########################################################
# Now allow factor by curve interactions
###########################################################
###########################################################
# Now allow factor by curve interactions
###########################################################
fitFacByCurvBostMort <- gam(deny ~ black
+ s(dir,by = factor(pbcr))
+ s(lvr,by = factor(pbcr)) +
+ pbcr + self + single + s(ccs,k = 4),
family = binomial,data = BostonMortgages)
summary(fitFacByCurvBostMort)
###########################################################
# Next I compare the two models. The as.factor model
# is more complex than the spline model, remember
###########################################################
anova(fit2GAMBostMort,fitFacByCurvBostMort,test="Chisq")
qq = anova(fit2GAMBostMort,fitFacByCurvBostMort,test="Chisq")
cat('Model comparison p-value = ',round(qq[2,5],2),"\n")
|
ec077871f761762bf078ff5d7d3def00a64cbf8b
|
190d51eb26b920930ed7033e7dae7f3f1f862733
|
/R/updateUKweatherData.R
|
278893d9cbcef1b1084186f528d0d1636352b9e9
|
[] |
no_license
|
harrysouthworth/mojito
|
4415c5c881d4c8c9523dcd92043390cfcd52d811
|
8ff57e1a5316849c9565565a0786aa0a438c37cc
|
refs/heads/master
| 2021-06-27T00:29:25.095510
| 2020-10-13T11:20:44
| 2020-10-13T11:20:44
| 155,350,671
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,613
|
r
|
updateUKweatherData.R
|
#' Read data obtained from UK Historic Weather Station Data and AZN price data and create data objects
#' @aliases updateAZN
#' @details Use \code{tail(eksdalemuir)} and \code{tail(newtonrigg)} to find
#' out if it worked. You might want to delete the .txt files once you're done.
#' These functions are intended for use to update the course data prior to
#' it being run and are not intended for use by the students.
#' NOTE THAT the function assumes the first line of the txt weather files is the
#' one starting with "yyyy mm ...". Also NOTE THAT to get the AZN data, go to
#' Yahoo finance UK, find AZN, look for the "Historical data" link in the
#' bar along the top. The start date is 10-24-1993 (i.e. US format).
#' @param dataPath String giving path to the downloaded .txt files. Defaults
#' to \code{dataPath = "data"}.
#' @param eskdalemuir Strings giving the names of the files to
#' read from the data directory. NOTE THAT the file format is assumed to be
#' space separated values for the weather data, csv for the AZN data.
#' @keywords internal
updateUKweatherData <- function(dataPath="data/", eskdalemuir = "eskdalemuir.txt",
newtonrigg = "newtonrigg.txt"){
eskdalemuir <- suppressMessages(readr::read_table(file.path(dataPath, eskdalemuir)))[-1, ]
eskdalemuir <- suppressWarnings(
mutate(eskdalemuir, tmax = as.numeric(tmax), tmin = as.numeric(tmin),
af = as.numeric(af),
sun = gsub("#", "", sun), sun = gsub("*", "", sun),
sun = gsub(" Provisional", "", sun),
sun = as.numeric(sun),
mm = ifelse(mm < 10, paste0("0", mm), as.character(mm)))
) %>% as.data.frame()
newtonrigg <- suppressMessages(readr::read_table(file.path(dataPath, newtonrigg)))[-1, ]
newtonrigg <- suppressWarnings(
mutate(newtonrigg,
tmax = as.numeric(gsub("*", "", tmax)),
tmin = as.numeric(gsub("*", "", tmin)),
rain = as.numeric(gsub("*", "", rain)),
af = as.numeric(gsub("*", "", af)),
sun = as.numeric(gsub("*", "", sun)),
mm = ifelse(mm < 10, paste0("0", mm), as.character(mm)))
) %>%
as.data.frame()
save(eskdalemuir, file = file.path(dataPath, "eskdalemuir.RData"))
save(newtonrigg, file = file.path(dataPath, "newtonrigg.RData"))
invisible()
}
updateAZN <- function(dataPath="data", azn = "azn.csv"){
azn <- readr::read_csv(file.path(dataPath, "azn.csv")) %>%
setNames(tolower(names(.))) %>%
select(-`adj close`) %>%
as.data.frame()
save(azn, file = file.path(dataPath, "azn.RData"))
invisible()
}
|
5cda80b7645749503d1116a971876fd8b2637343
|
4d2c2e6c274dc94b9b418fbf397cbb5bd06a23ed
|
/pbdr/tutorial3/tutorial3/11-npbs_for.r
|
fbe4ad23be8b26017b4774a619bc2fa95b47295c
|
[] |
no_license
|
snoweye/snoweye.github.io
|
5991f8d533e0a0b1428f1c179768925a21bb47b9
|
e5d35e49aca7520f97d0719c829df4fc11ff63e1
|
refs/heads/master
| 2023-07-22T13:35:24.683834
| 2023-07-06T22:09:43
| 2023-07-06T22:09:43
| 64,176,642
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 901
|
r
|
11-npbs_for.r
|
### Non-parametric bootstrap for the confidence interval for the
### GLMM model of Deer Ecervi L1 data set.
rm(list = ls())
source("u0-deer.r") # Load data.
source("u1-npbs.r") # Load library and utility.
### Bootstrap.
n.boot <- 100
set.seed(1234)
ret.sigma <- NULL
ret.a.i <- NULL
for(i.boot in 1:n.boot){
da.new <- gen.new.data(da)
m.glmm <- glmmPQL(Y ~ Length * Sex, random = ~ 1 | Farm,
family = binomial, data = da.new, verbose = FALSE)
### Collapse results.
ret.sigma <- cbind(ret.sigma, as.numeric(VarCorr(m.glmm)[, 2]))
ret.a.i <- cbind(ret.a.i, m.glmm$coefficient$random$Farm)
}
### Find C.I.
probs <- c(0.025, 0.975)
ci.sigma <- apply(ret.sigma, 1, quantile, probs = probs)
colnames(ci.sigma) <- c("std.a", "std.Res")
ci.a.i <- apply(ret.a.i, 1, quantile, probs = probs)
print(ci.sigma)
print(ci.a.i)
### Exercise: Estimate mean and median of variation.
|
4d910277bde3407023f7060b357f1b87acbba45c
|
e9a43e6e25f143ecd59dc17e59158656fd34fb5c
|
/batch_correction/batch_correction_source.R
|
a88851d50792323f559ddece3d110bc5ccb649c6
|
[
"CC0-1.0"
] |
permissive
|
LeahBriscoe/microbatch_vc
|
aed3a9bcfc15bc73f4be6e9ac99518f7ab9d3af5
|
1c609724fd9c8efb77a2caf4fbdcc7e57f221aae
|
refs/heads/master
| 2023-06-18T09:37:31.882697
| 2021-07-22T17:38:39
| 2021-07-22T17:38:39
| 233,694,281
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,742
|
r
|
batch_correction_source.R
|
run_ComBat <- function(mat, batch_labels,model_matrix=NULL){
require(sva)
#mat <- data$df_otu_corrected
#range(mat)
# make continuous
combat_predata = mat #log(mat + 1) #mat #
input = ComBat( dat=combat_predata, batch = batch_labels,mod = model_matrix)
return(input)
}
run_ComBat_mle <- function(mat,batch_labels){
source(paste0(script_folder,"CBX/combatx_helper.R"))
source(paste0(script_folder,"CBX/combat_mle.R"))
input = ComBat_mle( dat = mat , batch = batch_labels,estimation_method="MLE")
}
run_percentile_norm <- function(mat,data,case_class, control_class){
source(paste0(script_folder,"percentile_norm.R"))
pernorm = percentile_norm(mat,df_meta = data$df_meta,replace_zeroes=TRUE,case_class = case_class, control_class=control_class)
return(pernorm)
}
#' @param mat matrix you want to batch correct
#' @param data object containing $df_meta
#' @param ref_study Reference study to align remaining samples to
run_slope_correction <- function(mat,data,ref_study){
source(paste0(script_folder,"slope_correction.R"))
slope_data= slope_correction_pooled(mat,data$df_meta,ref_study = ref_study)
return(slope_data$df_otu)
}
run_limma <- function(mat,batch_labels,batch_labels2 = NULL){
require(limma)
input = removeBatchEffect( x=mat , batch= batch_labels,batch2 = batch_labels2)
return(input)
}
run_bmc <- function(mat,batch_labels){
require(dplyr)
corrected_mat = mat
unique_batches= unique(batch_labels)
for( b in 1:length(unique_batches)){
samples = colnames(mat)[batch_labels == unique_batches[b]]
batch_mat = mat[,samples]
corrected_mat[,samples] = sweep(mat[,samples],MARGIN = 1, rowMeans(batch_mat))
}
return(corrected_mat)
}
# run_dim_red_combat <- function(){
# require(compositions)
# ilr()
# }
pca_method <- function(input,clr_transform = FALSE,center_scale_transform =TRUE,num_pcs){
#input = otu_data$df_otu_rel_ab
orig_input = input
require(compositions)
require("bigstatsr")
if(clr_transform){
input = t(clr(t(input)))
}
if(center_scale_transform){
input = t(scale(t(input)))
}
#dim(input)
#dim(orig_input)
myFBM = as_FBM(t(input), type = c("double"))
t1 = Sys.time()
#svd_result = big_SVD(myFBM,k=20,fun.scaling = big_scale())
svd_result = big_SVD(myFBM,k=(num_pcs+10))
#?big_SVD
print(Sys.time()-t1)
pca_score <- svd_result$u %*% diag(svd_result$d)
row.names(pca_score) = colnames(orig_input)
return(list(svd_result = svd_result,pca_score=pca_score,transformed_data = input))
}
regress_out <- function(pc_scores,data,pc_index){
model_residuals<-lm(as.matrix(data) ~ pc_scores[,pc_index] )
extracted_residuals <- residuals(model_residuals)
return(t(extracted_residuals))
}
# run_smart_sva <- function(mat, batch_labels){
# mat = input_abundance_table
#
# require(SmartSVA)
# ?EstDimRMT
#
# #mat = input_abundance_table
#
# mat_scale = t(scale(t(mat)))
#
# mat = mat_scale
#
# require(SmartSVA)
#
# t1 = Sys.time()
# Y.r <- t(resid(lm(t(mat) ~ batch_labels)))
# n.sv <- EstDimRMT(Y.r, FALSE)$dim + 1
# print(n.sv)
# t2= Sys.time()
#
# mod <- model.matrix( ~ batch_labels)
# sv.obj <- smartsva.cpp(input_abundance_table, mod, mod0=NULL, n.sv=n.sv)
# t2= Sys.time()
# print(t3 -t2)
# out_mat <- t(sv.obj$sv)
# #row.names(out_mat) = row.names(mat)
# colnames(out_mat) = colnames(mat)
#
# return(out_mat)
#
# out_mat[1:4,1:4]
# dim(out_mat)
# dim(out_mat_no_scaling)
# out_mat_no_scaling[1:4,1:4]
#
# out_mat_no_scaling = out_mat
# }
#' @param mat matrix you want to batch correct
#' @param data object containing $df_meta
#'
#'
#
# mat = input_abundance_table_clr_scale
# metadata_mod= total_metadata_mod_interest
# bio_signal_formula = bio_signal_formula_interest
# num_pcs=100
# mod_ <- model.matrix( object = bio_signal_formula, data = metadata_mod)
# mat1 = mat + 1
# sv.obj <- smartsva.cpp(dat = mat, mod = mod_, alpha = .25,
# mod0=NULL, n.sv=100, B = 1000, VERBOSE = T)
#
#
#
# BMI=read.table(file = "~/Downloads/bmi_corrected.txt")
# library(data.table)
# kmers=as.matrix(data.frame(fread(file = "~/Downloads/kmer_table_clr_scaled.txt", header = T), row.names = 1))
# mod <- model.matrix( ~ bmi_corrected, BMI)
#
# sv.obj <- smartsva.cpp(dat = kmers, mod = mod, alpha = .25,
# mod0=NULL, n.sv=100, B = 1000, VERBOSE = T)
# dim(kmers)
#
# kmers[1:4,1:4]
# mat[1:4,1:4]
# mat = input_abundance_table
# metadata_mod = total_metadata_mod_interest
# bio_signal_formula = bio_signal_formula_interest
run_sva <- function(mat,metadata_mod=NULL,bio_signal_formula=NULL,num_pcs = NULL){
message("about to load smartsva")
require(SmartSVA)
message("finish load smartsva")
#mat = input_abundance_table
#metadata_mod=total_metadata_mod
#mat = mat[rowVars(mat)!=0,]
mat_scaled = mat
message(dim(mat_scaled))
message(dim(metadata_mod))
message(num_pcs)
if(!is.null(num_pcs)){
n.sv = num_pcs
}else{
if(!is.null(metadata_mod)){
#bio_signal_formula <- paste0("~",paste(colnames(metadata_mod), collapse = "+"))
bio_signal_formula_resid = as.formula(paste0("t( mat_scaled)", paste0(as.character(bio_signal_formula),collapse = '')))
dim(mat_scaled)
dim(metadata_mod)
#?smartsva.cpp
#?smartsva.cpp
#Determine number of SVs
message("about to resid")
Y.r <- t(resid(lm(bio_signal_formula_resid,data = metadata_mod)))
message("estimating RT")
t1 = Sys.time()
n.sv <- EstDimRMT(Y.r, FALSE)$dim + 1 # Very important: Add one extra dimension to compensate potential loss of 1 degree of freedom in confounded scenarios !!!
t2= Sys.time()
message(t2 -t1)
}else{
Y.r = mat_scaled
t1 = Sys.time()
n.sv <- EstDimRMT(Y.r, FALSE)$dim + 1 # Very important: Add one extra dimension to compensate potential loss of 1 degree of freedom in confounded scenarios !!!
t2= Sys.time()
message(t2 -t1)
}
print("n.sv")
print(n.sv)
}
# Run SVA
detach("package:compositions", unload=TRUE)
mod <- model.matrix( object = bio_signal_formula, data = data.frame(metadata_mod))
#mod <- model.matrix( object = ~ DiseaseState, data = data$df_meta)
sv.obj <- smartsva.cpp(dat = as.matrix(mat_scaled), mod = mod, mod0=NULL, n.sv=n.sv, B = 1000, VERBOSE = T)
t3= Sys.time()
#message(t3 -t2)
#dim(sv.obj$sv)
#dim(mat_scaled)
#To get corrected data run:
mat_scaled_corrected<- t(resid(lm(t(mat_scaled) ~ ., data=data.frame(sv.obj$sv))))
return( list(corrected_data = mat_scaled_corrected, sv.obj=sv.obj,n.sv=n.sv))
}
|
0908f2664a7222663c2b4faf16f5298a4085ff7a
|
12a8fd342e353257e8f73e9b44ca3cf25ccbbeb0
|
/data-BigData/Q56-to-CSV.R
|
ece2d0ac137b03acfb3f8c5533885cd32869bf9f
|
[] |
no_license
|
Kundjanasith/data-mini1
|
e0d5746c6005ceb1fd6033a311f99b4b1285d308
|
38636aecdba22b60f7bd394d88ffb0503ba82d6e
|
refs/heads/master
| 2020-12-25T15:39:39.608498
| 2016-07-09T17:41:20
| 2016-07-09T17:41:20
| 62,961,472
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 656
|
r
|
Q56-to-CSV.R
|
plotEiei <- function(x){
print("eiei")
print(x['ID'])
}
file <- read.csv('Q56.csv')
res <- c()
for( x in unlist(unique(file['ID']))){
#print(x)
data <- subset(file, ID==x)
ungroupdata <- data[,c("ID","EnrolledYear","Semester","GPA","X2556_GPAX","entry2553_GPAX")]
groupdata <- unique(ungroupdata)
#names(groupdata)
groupdata <- groupdata[ order(unlist(groupdata['EnrolledYear']),unlist(groupdata['Semester'])), ]
#print(groupdata)
#write.csv(groupdata, file="fff.csv", append=TRUE)
#print(nrow(groupdata))
#n <- nrow(groupdata)
res <- rbind(res, groupdata)
}
print(res)
#print(mode(unlist(groupdata)))
write.csv(res, file="Q56_BYR.csv")
|
9e0fd612e8eddc627b4a9da22520ec837a6c7261
|
5ce3329a63d78e2a86053fd47d9c0fd637cd294d
|
/plot2.R
|
d3d958a6ba824868293126717a61880367e711b3
|
[] |
no_license
|
sjjbryant/ExData_Plotting1
|
b7cff92d8ea1d6aa0f9f69f15e85913676059168
|
9e4f10dfc31abd725b4acb3363e5663281b2315e
|
refs/heads/master
| 2021-01-18T05:34:17.129662
| 2015-08-09T21:01:27
| 2015-08-09T21:01:27
| 40,444,607
| 0
| 0
| null | 2015-08-09T17:31:10
| 2015-08-09T17:31:09
| null |
UTF-8
|
R
| false
| false
| 805
|
r
|
plot2.R
|
# read the data into R
energyTable <- read.table("C:\\Users\\Steve\\Documents\\Coursera\\Data Science\\Exploratory Data Analysis\\Week 1\\household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
# Covert the Dates
energyTable$Date <- as.Date(energyTable$Date, format = "%d/%m/%Y")
# Select only dates = 2007-02-01 or 2007-02-02
subenergyTable<- subset(energyTable, energyTable$Date == "2007-02-01" | energyTable$Date == "2007-02-02")
# Combine date and time
subenergyTable$dateTime = as.POSIXlt(paste(subenergyTable$Date,subenergyTable$Time, sep = " "))
# Create graph
png(filename = "plot2.png", width = 480, height = 480, units = "px")
plot(subenergyTable$dateTime,subenergyTable$Global_active_power,type = 'l', xlab = "", ylab = "Global Active Power (killowatts)" )
dev.off()
|
36a03fc3cde55f92ce51c5b0e54d9e7086b5ec73
|
c6b261da0a15eae8fa89234ce4cad2cbcce93a28
|
/.Rprofile
|
8d5c56d3a4933f95be5708da8e7b93fcaaecaa66
|
[] |
no_license
|
optionalg/dotfiles-60
|
e9a1a95e7f8c5afa39ad52874cd44d8fc0e01480
|
bdcfe89f25600c5ae449fff0d39dad496fd63f86
|
refs/heads/master
| 2020-04-01T17:43:05.520301
| 2016-05-25T03:26:56
| 2016-05-25T03:26:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 809
|
rprofile
|
.Rprofile
|
.First <- function(){
options(
repos = c(CRAN = "https://cran.rstudio.com"),
browserNLdisabled = TRUE,
deparse.max.lines = 2,
help_type = "html",
max.print = 200,
repr.plot.width = 5,
repr.plot.height = 5,
repr.plot.res = 100,
repr.plot.pointsize = 8,
repr.plot.quality = 100,
repr.plot.antialias = "default",
jupyter.plot_mimetypes = "image/svg+xml"
)
# mac only
if (Sys.info()["sysname"] == "Darwin" && interactive()) {
options(device = "quartz")
setHook(packageEvent("grDevices", "onLoad"),
function(...) grDevices::quartz.options(
width = 3.5,
height = 3.5,
pointsize = 8
))
}
}
|
31edf468ac655fdcd839d0a6a07372604134975a
|
02372910c089fad695614717723197e7cc2ac29b
|
/man/assocTestAggregate.Rd
|
0fd84d3d2f7607f0d24d49eb82fa69ebb868215b
|
[] |
no_license
|
UW-GAC/genesis2
|
d1235ce86ea6dc4aff1492d7dc3ce9bb924f530b
|
913ad1b84b10c4fa5c9cdd818952dcb17830c2d7
|
refs/heads/master
| 2021-01-20T11:34:56.788698
| 2018-03-26T17:05:10
| 2018-03-26T17:05:10
| 101,669,905
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,709
|
rd
|
assocTestAggregate.Rd
|
\name{assocTestAggregate}
\alias{assocTestAggregate}
\alias{assocTestAggregate-methods}
\alias{assocTestAggregate,SeqVarIterator-method}
\title{Aggregate Association Testing}
\description{\code{assocTestAggregate} performs aggregate association tests using the null model fit with \code{\link{fitNullModel}}.}
\usage{
\S4method{assocTestAggregate}{SeqVarIterator}(gdsobj, null.model, AF.max=1,
weight.beta=c(1,1), weight.user=NULL,
test=c("Burden", "SKAT", "SMMAT"),
burden.test=c("Score", "Wald"), rho=0,
pval.method=c("davies", "kuonen", "liu"),
verbose=TRUE)
}
\arguments{
\item{gdsobj}{An object of class \code{\link{SeqVarIterator}} from the package \pkg{\link{SeqVarTools}} containing the genotype data for the variants and samples to be used for the analysis.}
\item{null.model}{A null model object returned by \code{fitNullModel}.}
\item{AF.max}{A numeric value specifying the upper bound on the alternate allele frequency for variants to be included in the analysis.}
\item{weight.beta}{A numeric vector of length two specifying the two parameters of the Beta distribution used to determine variant weights; weights are given by \code{dbeta(MAF, a, b)}, where MAF is the minor allele frequency, and a and b are the two parameters specified here. \code{weight.beta = c(1,25)} gives the Wu weights; \code{weight.beta = c(0.5, 0.5)} is proportional to the Madsen-Browning weights; and \code{weight.beta = c(1,1)} gives a weight of 1 to all variants. This input is ignored when \code{weight.user} is not \code{NULL}.}
\item{weight.user}{A character string specifying the name of a variable in the variantData slot of \code{gdsobj} to be used as variant weights. When left \code{NULL} (the default), the weights specified by \code{weight.beta} will be used.}
\item{test}{A character string specifying the type of test to be performed. The possibilities are \code{"Burden"} (default), \code{"SKAT"}, or \code{"SMMAT"}. When this is set to "SKAT" and the parameter \code{rho} has multiple values, a SKAT-O test is performed.}
\item{burden.test}{A character string specifying the type of Burden test to perform when \code{test = "Burden"}. The possibilities are \code{"Score"} and \code{"Wald"}. \code{"Score"} can be used for any \code{null.model}. \code{"Wald"} can not be used when the \code{null.model} is from a mixed model with a binary outcome variable.}
%\item{burden.test}{A character string specifying the type of Burden test to perform when \code{test} = "Burden". The possibilities are "Score", "Wald", and "Firth". "Score" can be used for any \code{null.model}. "Wald" can not be used when the \code{null.model} is from a mixed model with a binary outcome variable. "Firth" can only be used when the \code{null.model} is from a logistic regression with a binary outcome variable.}
\item{rho}{A numeric value (or vector of numeric values) in \code{[0,1]} specifying the rho parameter for SKAT. When \code{rho = 0}, a standard SKAT test is performed. When \code{rho = 1}, a score burden test is performed. When \code{rho} is a vector of values, SKAT-O is performed using each of those values as the search space for the optimal \code{rho}.}
\item{pval.method}{A character string specifying which method to use to calculate SKAT p-values. \code{"davies"} (the default) uses numerical integration; \code{"kuonen"} uses a saddlepoint method; and \code{"liu"} uses a moment matching approximation. If the davies method generates an error, kuonen is tried, and then liu as a last resort.}
\item{verbose}{Logical indicator of whether updates from the function should be printed to the console; the default is \code{TRUE}.}
}
\details{
The type of aggregate unit tested depends on the class of iterator used for \code{gdsobj}. Options include sliding windows, specific ranges of variants or selection of individual variants (ranges with width 1). See \code{\link{SeqVarIterator}} for more details.
The effect size estimate is for each copy of the alternate allele.
For multiallelic variants, each alternate allele is tested separately.
The SMMAT test is a hybrid of SKAT and the burdren test.
}
\value{A list with the following items:
\item{results}{A data.frame containing the results from the main analysis. Each row is a separate aggregate test:}
If \code{gdsobj} is a \code{\link{SeqVarWindowIterator}}:
\item{chr}{The chromosome value}
\item{start}{The start position of the window}
\item{end}{The end position of the window}
Always:
\item{n.site}{The number of variant sites included in the test.}
\item{n.alt}{The number of alternate alleles included in the test.}
\item{n.sample.alt}{The number of samples with an observed alternate allele at any variant in the aggregate set.}
If \code{test} is \code{"Burden"}:
%% \item{burden.skew}{The skewness of the burden value for all samples.}
If \code{burden.test} is "Score":
\item{Score}{The value of the score function}
\item{Score.SE}{The estimated standard error of the Score}
\item{Score.Stat}{The score Z test statistic}
\item{Score.pval}{The score p-value}
If \code{burden.test} is \code{"Wald"}:
\item{Est}{The effect size estimate for a one unit increase in the burden value}
\item{Est.SE}{The estimated standard error of the effect size estimate}
\item{Wald.Stat}{The Wald Z test statistic}
\item{Wald.pval}{The Wald p-value}
%% If \code{burden.test} is "Firth":
%% \item{Est}{The effect size estimate for a one unit increase in the burden value}
%% \item{SE}{The estimated standard error of the effect size estimate}
%% \item{Firth.stat}{The Firth test statistic}
%% \item{Firth.pval}{The Firth p-value}
If \code{test} is \code{"SKAT"}:
\item{Q_rho}{The SKAT test statistic for the value of rho specified. There will be as many of these variables as there are rho values chosen.}
\item{pval_rho}{The SKAT p-value for the value of rho specified. There will be as many of these variables as there are rho values chosen.}
\item{err_rho}{Takes value 1 if there was an error in calculating the p-value for the value of rho specified when using the "kuonen" or "davies" methods; 0 otherwise. When there is an error, the p-value returned is from the "liu" method. There will be as many of these variables as there are rho values chosen.}
When \code{length(rho) > 1} and SKAT-O is performed:
\item{min.pval}{The minimum p-value among the p-values calculated for each choice of rho.}
\item{opt.rho}{The optimal rho value; i.e. the rho value that gave the minimum p-value.}
\item{pval_SKATO}{The SKAT-O p-value after adjustment for searching across multiple rho values.}
If \code{test} is \code{"SMMAT"}:
\item{pval_burden}{The burden test p-value}
\item{pval_hybrid}{The SMMAT p-value}
\item{err}{Takes value 1 if there was an error calculating the hybrid p-value; 0 otherwise. If \code{err=1}, \code{pval_hybrid} is set to \code{pval_burden}.}
\item{variantInfo}{A list with as many elements as aggregate tests performed. Each element of the list is a data.frame providing information on the variants used in the aggregate test with results presented in the corresponding row of \code{results}. Each of these data.frames has the following information:}
\item{variant.id}{The variant ID}
\item{chr}{The chromosome value}
\item{pos}{The base pair position}
\item{n.obs}{The number of samples with non-missing genotypes}
\item{freq}{The estimated alternate allele frequency}
\item{weight}{The weight assigned to the variant in the analysis.}
}
\author{Matthew P. Conomos, Stephanie M. Gogarten, Tamar Sofer, Ken Rice, Chaoyu Yu}
\examples{
library(SeqVarTools)
library(Biobase)
library(GenomicRanges)
# open a sequencing GDS file
gdsfile <- seqExampleFileName("gds")
gds <- seqOpen(gdsfile)
# simulate some phenotype data
data(pedigree)
pedigree <- pedigree[match(seqGetData(gds, "sample.id"), pedigree$sample.id),]
pedigree$outcome <- rnorm(nrow(pedigree))
# construct a SeqVarData object
seqData <- SeqVarData(gds, sampleData=AnnotatedDataFrame(pedigree))
# fit the null model
nullmod <- fitNullModel(seqData, outcome="outcome", covars="sex")
# burden test - Range Iterator
gr <- GRanges(seqnames=rep(1,3), ranges=IRanges(start=c(1e6, 2e6, 3e6), width=1e6))
iterator <- SeqVarRangeIterator(seqData, variantRanges=gr)
assoc <- assocTestAggregate(iterator, nullmod, test="Burden")
assoc$results
lapply(assoc$variantInfo, head)
# SKAT test - Window Iterator
seqSetFilterChrom(seqData, include="22")
iterator <- SeqVarWindowIterator(seqData)
assoc <- assocTestAggregate(iterator, nullmod, test="SKAT")
head(assoc$results)
head(assoc$variantInfo)
# SKAT-O test - List Iterator
seqResetFilter(iterator)
gr <- GRangesList(
GRanges(seqnames=rep(22,2), ranges=IRanges(start=c(16e6, 17e6), width=1e6)),
GRanges(seqnames=rep(22,2), ranges=IRanges(start=c(18e6, 20e6), width=1e6)))
iterator <- SeqVarListIterator(seqData, variantRanges=gr)
assoc <- assocTestAggregate(iterator, nullmod, test="SKAT", rho=seq(0, 1, 0.25))
assoc$results
assoc$variantInfo
# user-specified weights
seqResetFilter(iterator)
variant.id <- seqGetData(gds, "variant.id")
weights <- data.frame(variant.id, weight=runif(length(variant.id)))
variantData(seqData) <- AnnotatedDataFrame(weights)
iterator <- SeqVarListIterator(seqData, variantRanges=gr)
assoc <- assocTestAggregate(iterator, nullmod, test="Burden", weight.user="weight")
assoc$results
assoc$variantInfo
seqClose(seqData)
}
\keyword{association}
|
64d9ab8eb03940e5f0fc39643efda6659ea3e551
|
9b72de4f01c77b92ef23cf0433d7f806802bb419
|
/SPOJ/ONP_Transform the Expression/Transform the Expression.R
|
6c23d125e3aa24192cc8cba8dcc71748c47cebd7
|
[] |
no_license
|
GitPistachio/Competitive-programming
|
ddffdbc447669a2f8ade6118dfe4981bae948669
|
f8a73f5152b2016b1603a64b7037602d2ab2c06e
|
refs/heads/master
| 2023-05-01T20:55:18.808645
| 2023-04-21T20:45:08
| 2023-04-21T20:45:08
| 167,733,575
| 8
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,317
|
r
|
Transform the Expression.R
|
# Project name : SPOJ: Transform the expression
# Author : Wojciech Raszka
# Date created : 2019-02-10
# Description :
# Status : Accepted (23205239)
# Comment :
PNToRPN <- function(exp){
cout <- character(length(exp))
s <- character(length(exp))
j = 1
i = 1
for (e in unlist(strsplit(exp, ""))){
if (e %in% letters){
cout[i] = e
i = i + 1
} else if (e == "-" || e == "+") {
while (j > 1 && s[j - 1] != "("){
j = j - 1
cout[i] = s[j]
i = i + 1
}
s[j] = e
j = j + 1
} else if (e == "*" || e == "/") {
while (j > 1 && !(s[j - 1] %in% c("(", "+", "-"))){
j = j - 1
cout[i] = s[j]
i = i + 1
}
s[j] = e
j = j + 1
} else if (e == "^") {
while (j > 1 && s[j - 1] == "^"){
j = j - 1
cout[i] = s[j]
i = i + 1
}
s[j] = e
j = j + 1
} else if (e == "(") {
s[j] = e
j = j + 1
} else if (e == ")") {
while (j > 1 && s[j - 1] != "("){
j = j - 1
cout[i] = s[j]
i = i + 1
}
j = j - 1
}
}
write(paste(cout, collapse=""), stdout())
}
f <- file('stdin', open='r')
no_of_exp <- as.integer(readLines(f, n=1))
for (i in 1:no_of_exp){
PNToRPN(readLines(f, n=1))
}
|
c08c7f7842fb63af276a1aceb3672ccb6b13832a
|
6cd4cab6f8d181bf19cba90f7ef9c005f9cc016c
|
/man/install.pkg.Rd
|
2a8802aa2af3f278e2bcdd7e0935a5af0bd228ab
|
[] |
no_license
|
mbojan/mvbutils
|
30dbce0664b15d184aa8174bd252fa8bd28941dd
|
1c4e7fef56ed27b18a1d01d35cfcf5dbcdfc3098
|
refs/heads/master
| 2021-01-01T04:55:39.056195
| 2012-02-22T00:00:00
| 2012-02-22T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,573
|
rd
|
install.pkg.Rd
|
\name{install.pkg}
\alias{install.pkg}
\alias{build.pkg}
\alias{build.pkg.binary}
\alias{check.pkg}
\alias{cull.old.builds}
\alias{set.rcmd.vars}
\title{Package building, distributing, and checking}
\description{These are convenient wrappers for R{}'s package creation and installation tools. They are designed to be used on packages created from tasks via \code{mvbutils}. However, the \code{mvbutils} approach deliberately makes re-installation a rare event, and one call to \code{install.pkg} might suffice for the entire life of a simple package. After that very first installation, you'd probably only need to call \code{install.pkg} if (when...) new versions of R{} entail re-installation of packages, and \code{build.pkg/build.pkg.binary/check.pkg} when you want to give your package to others, either directly or via CRAN etc. Set the argument \code{intern=F} if you want to see progress on-screen (but the result won't be captured); if you can handle the disconcertingly blank wait, set \code{intern=T} to get a character vector result. \code{cull.old.builds} deletes ".tar.gz" and ".zip" files for versions before the current one (as determined by the versions \code{set.rcmd.vars} does nothing (yet).
}
\usage{
# Usually: build.pkg( mypack) etc
install.pkg( pkg, character.only=FALSE, dir.above.source='+', lib=.libPaths()[1], flags=character(0), intern=TRUE)
build.pkg( pkg, character.only=FALSE, dir.above.source='+', flags=character(0), intern=TRUE)
build.pkg.binary( pkg, character.only=FALSE, dir.above.source='+', flags=character(0), intern=TRUE)
check.pkg( pkg, character.only=FALSE, dir.above.source='+', build.flags=character(0), check.flags=character( 0),
CRAN=FALSE, intern=TRUE)
cull.old.builds( pkg, character.only=FALSE, dir.above.source='+')
set.rcmd.vars( ...) # not yet implemented. If you need to set env vars eg PATH for R CMD to work, you have to do so yourself; see *Details*
}
\arguments{
See the examples
\item{ pkg}{usually an unquoted package name, but interpretation can be changed by non-default \code{character.only}. You can also get away with eg \code{..mypack}, ie a direct reference to the maintained package.}
\item{ character.only}{default FALSE. If TRUE, treat \code{pkg} as a normal object, which should therefore be a string containing the package's name. If \code{character.only} is itself a string, it will override \code{pkg} and be treated as the name of the package.}
\item{ dir.above.source}{where to look for source package; see \code{\link{pre.install}}}
\item{ intern}{?return the result as a character vector? (See \code{\link{system}}) Set to FALSE if you want to see the output as-it-happens, but in that case it won't be returned.}
\item{ lib}{where to install to; default is the same place R{} would install to, i.e. \code{.libPaths()[1]}.}
\item{ flags}{character vector, by default empty. Any entries should be function-specific flags, such as "--md5" for \code{build.pkg}.}
\item{ build.flags, check.flags}{as per \code{flags} but for the two separate parts of \code{check.pkg} (see \bold{Details}). \code{check.flags} is overridden if \code{CRAN==TRUE}'.}
\item{ CRAN}{if TRUE, set the \code{--as-cran} flag to "RCMD check" and unset all other check flags (except library locations, which are set automatically by all these functions).}
\item{ ...}{name-value pairs of system environment variables (not used for now)}
}
\details{
Before doing any of this, you need to have used \code{\link{pre.install}} to create a source package. (Or \code{\link{patch.install}}, if you've done all this before and just want to re-install/build/check for some reason.)
The only environment variable currently made known to R{} CMD is R_LIBS-- let me know if others would be useful.
\code{install.pkg} calls "R CMD INSTALL" to install from a source package.
\code{build.pkg} calls "R CMD build" to wrap up the source package into a "tarball", as required by CRAN and also for distribution to non-Windows-and-Mac platforms.
\code{build.pkg.binary} (Windows & Mac only) calls "R CMD INSTALL --build" to generate a binary package. A temporary installation directory is used, so your existing installation is \emph{not} overwritten or deleted if there's a problem; R{} CMD INSTALL --build has a nasty habit of doing just that unless you're careful, which \code{build.pkg.binary} is.
\code{check.pkg} calls "R CMD check" after first calling \code{build.pkg} (more efficiently, I should perhaps try to work out whether there's an up-to-date tarball already). It \emph{may} also be possible to do some checks directly from R{} via functions in the \pkg{utils} package, but NB the possibility of interference with your current R{} session. For example, at one stage \code{codoc} (which is the only check that I personally find very useful) tried to unload & load the package, which was very bad; but I think that may no longer be the case.
You \emph{may} have to set some environment variables (eg PATH, and perhaps R_LIBS) for the underlying R{} CMD calls to work. Currently you have to do this manually--- your \code{.First} or \code{.Rprofile} would be a good place. If you really object to changing these for the whole R{} session, let me know; I've left a placeholder for a function \code{set.rcmd.vars} that could store a list of environment variables to be set temporarily for the duration of the R{} CMD calls only, but I haven't implemented it (and won't unless there's demand).
Perhaps it would be desirable to let some flags be set automatically, eg via something in the \code{pre.install.hook} for a package. I'll add this if requested.
}
\value{If \code{intern=TRUE}: the stuff printed out, with class \code{cat} so it prints nicely. If \code{intern=FALSE}: various things about the paths (purely for my programming convenience).
}
\examples{
\dontrun{
# First time package installation
# Must be cd()ed to task above 'mvbutils'
maintain.packages( mvbutils)
pre.install( mvbutils)
install.pkg( mvbutils)
# Subsequent maintenance is all done by:
patch.install( mvbutils)
# For distro to
build.pkg( mvbutils)
# or on Windows (?and Macs?)
build.pkg.binary( mvbutils)
# If you enjoy R CMD CHECK:
check.pkg( mvbutils) # will print "live" as it does them
# If you want the results directly in R, and don't mind not seeing them "live":
check.pkg( mvbutils, intern=T)
# Also legal:
build.pkg( ..mvbutils)
# To do it under programmatic control
for( ipack in all.my.package.names)
build.pkg( char=ipack)
}
}
\keyword{misc}
|
a522f5a7bc9e1fd19b7cd2cae0446ec9ff62a594
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/kernDeepStackNet/R/KDSNdirectOpt.R
|
c13562ec2d1b20995ebec0544637aa37a48797bd
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,761
|
r
|
KDSNdirectOpt.R
|
# General sequential one dimensional optimization of f(x), x \in R^d, f(x) \in R
optimize1dMulti <- function (f_input, interval_matrix, maxRuns=3, repetitions=5,
tol_input=.Machine$double.eps^0.25, x_0=NULL, addInfo=TRUE,
nCores=1, envir=parent.frame(), directUse=TRUE, OptTypePar="") {
# Check if first argument of function is x
stopifnot(formalArgs (f_input) [1]=="x")
# Rerun optimization with different starting values
Results <- vector("list", repetitions)
dimension <- dim(interval_matrix) [2]
if(nCores==1) {
for(j in 1:repetitions) {
if(j > 1 | is.null(x_0)) {
# Set initial starting value: Random vector x nid ~ U (a_i, b_i)
x_0 <- sapply(1:dimension, function (x) runif(n=1, min=interval_matrix [1, x], max=interval_matrix [2, x]))
}
x_0_alt <- x_0
liste_vektoren <- vector("list", dimension)
if(dimension==1) {
liste_vektoren [[1]] <- expression(x)
}
if(dimension==2) {
liste_vektoren [[1]] <- expression(c(x, x_0 [2]))
liste_vektoren [[2]] <- expression(c(x_0 [1], x))
}
if(dimension>=3) {
liste_vektoren [[1]] <- expression(c(x, x_0 [2:dimension]))
liste_vektoren [[dimension]] <- expression(c(x_0 [1:(dimension-1)], x))
for(i in 1:(dimension-2)) {
liste_vektoren [[i+1]] <- substitute(c(x_0 [1:i], x, x_0 [(2+i):dimension]),list(i=i))
}
}
# Univariate optimization over one variable given all other variables
i <- 1
whileCondition <- TRUE
stepRuns <- 0
f_input_x_0 <- f_input(x_0)
f_input_x_0_alt <- f_input_x_0
x_0_alt <- x_0
while (whileCondition) {
# Conditional optimization
for(i in 1:dimension) {
erg <- optimize(f=function (x) f_input( eval( liste_vektoren [[i]] ) ), interval=interval_matrix [, i], tol=tol_input)
x_0 [i] <- erg$minimum
if(addInfo) {
cat("optimize1dMulti", "parameter", i, "\n")
}
}
# Condition for stopping
stepRuns <- stepRuns + 1
f_input_x_0 <- erg$objective
if(stepRuns == maxRuns) {
whileCondition <- FALSE
}
else {
if(abs(f_input_x_0_alt) != 0 & sum(abs(x_0_alt)) != 0) {
whileCondition <- (abs(f_input_x_0 - f_input_x_0_alt) / abs(f_input_x_0_alt) >= tol_input) & (sum(abs(x_0 - x_0_alt)) / sum(abs(x_0_alt)) >= tol_input)
}
else {
if(abs(f_input_x_0_alt) == 0 & sum(abs(x_0_alt)) != 0) {
whileCondition <- (abs(f_input_x_0 - f_input_x_0_alt) >= tol_input) & (sum(abs(x_0 - x_0_alt)) / sum(abs(x_0_alt)) >= tol_input)
}
if(abs(f_input_x_0_alt) != 0 & sum(abs(x_0_alt)) == 0) {
whileCondition <- (abs(f_input_x_0 - f_input_x_0_alt) / abs(f_input_x_0_alt) >= tol_input) & (sum(abs(x_0 - x_0_alt)) >= tol_input)
}
if(abs(f_input_x_0_alt) == 0 & sum(abs(x_0_alt)) == 0) {
whileCondition <- (abs(f_input_x_0 - f_input_x_0_alt) >= tol_input) & (sum(abs(x_0 - x_0_alt)) >= tol_input)
}
}
}
f_input_x_0_alt <- f_input_x_0
x_0_alt <- x_0
if(addInfo) {
cat("optimize1dMulti", "run", stepRuns, "\n")
}
}
Results [[j]] <- list(minimum=x_0, objective=f_input_x_0)
if(addInfo) {
cat("optimize1dMulti", "repetition", j, "\n")
}
}
}
else{
# Help function for parallelisation
tempFunc <- function(j) {
if(j > 1 | is.null(x_0)) {
# Set initial starting value: Random vector x nid ~ U (a_i, b_i)
x_0 <- sapply(1:dimension, function (x) runif(n=1, min=interval_matrix [1, x], max=interval_matrix [2, x]))
}
x_0_alt <- x_0
liste_vektoren <- vector("list", dimension)
if(dimension==1) {
liste_vektoren [[1]] <- expression(x)
}
if(dimension==2) {
liste_vektoren [[1]] <- expression(c(x, x_0 [2]))
liste_vektoren [[2]] <- expression(c(x_0 [1], x))
}
if(dimension>=3) {
liste_vektoren [[1]] <- expression(c(x, x_0 [2:dimension]))
liste_vektoren [[dimension]] <- expression(c(x_0 [1:(dimension-1)], x))
for(i in 1:(dimension-2)) {
liste_vektoren [[i+1]] <- substitute(c(x_0 [1:i], x, x_0 [(2+i):dimension]),list(i=i))
}
}
# Univariate optimization of one variable given all other variables
i <- 1
whileCondition <- TRUE
stepRuns <- 0
f_input_x_0 <- f_input(x_0)
f_input_x_0_alt <- f_input_x_0
x_0_alt <- x_0
while (whileCondition) {
# Conditional optimization
for(i in 1:dimension) {
erg <- optimize(f=function (x) f_input( eval( liste_vektoren [[i]] ) ), interval=interval_matrix [, i], tol=tol_input)
x_0 [i] <- erg$minimum
if(addInfo) {
cat("optimize1dMulti", "parameter", i, "\n")
}
}
# Condition for stopping
stepRuns <- stepRuns + 1
f_input_x_0 <- erg$objective
if(stepRuns == maxRuns) {
whileCondition <- FALSE
}
else {
if(abs(f_input_x_0_alt) != 0 & sum(abs(x_0_alt)) != 0) {
whileCondition <- (abs(f_input_x_0 - f_input_x_0_alt) / abs(f_input_x_0_alt) >= tol_input) & (sum(abs(x_0 - x_0_alt)) / sum(abs(x_0_alt)) >= tol_input)
}
else {
if(abs(f_input_x_0_alt) == 0 & sum(abs(x_0_alt)) != 0) {
whileCondition <- (abs(f_input_x_0 - f_input_x_0_alt) >= tol_input) & (sum(abs(x_0 - x_0_alt)) / sum(abs(x_0_alt)) >= tol_input)
}
if(abs(f_input_x_0_alt) != 0 & sum(abs(x_0_alt)) == 0) {
whileCondition <- (abs(f_input_x_0 - f_input_x_0_alt) / abs(f_input_x_0_alt) >= tol_input) & (sum(abs(x_0 - x_0_alt)) >= tol_input)
}
if(abs(f_input_x_0_alt) == 0 & sum(abs(x_0_alt)) == 0) {
whileCondition <- (abs(f_input_x_0 - f_input_x_0_alt) >= tol_input) & (sum(abs(x_0 - x_0_alt)) >= tol_input)
}
}
}
f_input_x_0_alt <- f_input_x_0
x_0_alt <- x_0
if(addInfo) {
cat("optimize1dMulti", "run", stepRuns, "\n")
}
}
Results1 <- list(minimum=x_0, objective=f_input_x_0)
if(addInfo) {
cat("optimize1dMulti", "repetition", j, "\n")
}
return(Results1)
}
if(!directUse){
# Subroutine to direct minimization
if(OptTypePar=="tuneKDSN" || OptTypePar=="tuneLevelKDSN") {
localEnvir <- environment()
clusterExport(cl=envir$cl, varlist=ls(), envir=localEnvir)
clusterExport(cl = envir$cl, varlist = objects(envir), envir=envir)
Results <- parLapply(cl = envir$cl, X=1:repetitions, fun=tempFunc)
}
# Subroutine to Mbo minimization
if(OptTypePar=="mbo1d") {
localEnvir <- environment()
clusterExport(cl=envir$envir$cl, varlist=ls(), envir=localEnvir)
Results <- parLapply(cl = envir$envir$cl, X=1:repetitions, fun=tempFunc)
}
}
else{
cl <- makeCluster(nCores)
localEnvir <- environment()
clusterExport(cl=cl, varlist=ls(all.names=TRUE), envir=localEnvir)
Results <- parLapply(cl = cl, X=1:repetitions, fun=tempFunc)
stopCluster(cl=cl)
}
}
# Choose best iteration among the random starting values
Index <- which.min(sapply(1:repetitions, function (x) Results [[x]]$objective))
Output <- list(minimum=Results [[Index]]$minimum, objective=Results [[Index]]$objective)
return(Output)
}
# Main tuning function of KDSN
tuneKDSN <- function (y, X, maxRuns=3, repetitions=5, maxLevels=10, gammaPar=1,
fineTuneIt=100, tol_input=.Machine$double.eps^0.25, addInfo=TRUE, dimMax=round(sqrt(dim(X)[1])/2),
nCores=1) {
if(nCores>1) {
# Initialize cluster
cl <- makeCluster(nCores)
}
# Initialize parameters
n <- dim(X) [1]
levels <- 1
condWhile <- TRUE
Loss_prev <- Inf
# Initialize starting vector of hyperparameters
MaxMatEntries <- .Machine$integer.max
dimStart <- round ((dimMax+1) / 2)
if((dimMax*2*n) > MaxMatEntries) {
dimMax <- floor(MaxMatEntries/n/2)
dimStart <- round (sqrt(dimMax*2)/2)
}
quantEuklid <- quantile(c(dist(robustStandard(X))^2), probs=c(0, 0.5, 1))
sigmaStart <- quantEuklid [2]
lambdaStart <- 0
x_start <- c(dimStart, sigmaStart, lambdaStart)
x_new <- x_start
x_prev <- x_start
# Initialize bounds of hyperparameters
interval_matrix_start <- matrix(c(1, dimMax, quantEuklid [1], quantEuklid [3], 0, 10), nrow=2, ncol=3)
interval_matrix <- interval_matrix_start
while(condWhile) {
if(levels>1) {
# Specification in loss function must match!
# Reset starting value
x_new <- c(x_new, x_start)
# Compute bound matrix: rows=1 (min), 2 (max); columns=number of parameters
interval_matrix <- cbind(interval_matrix, interval_matrix_start)
}
# Tune
optVal <- optimize1dMulti (f_input=function (x) lossKDSN (parOpt=x,
y=y, X=X, gammaPar=gammaPar, seedW=seq(0, (levels-1), 1)),
interval_matrix=interval_matrix, maxRuns=maxRuns, repetitions=repetitions, x_0=x_new,
tol_input=tol_input, addInfo=addInfo, directUse=FALSE, nCores=nCores, OptTypePar="tuneKDSN")
# Set condition
condWhile <- optVal$objective < Loss_prev
# Update
if(condWhile) {
x_new <- optVal$minimum
Loss_prev <- optVal$objective
x_prev <- x_new
if(levels >= maxLevels) {
break
if(addInfo) {cat("tuneKDSN", "Level =", levels, "\n")}
}
levels <- levels + 1
if(addInfo) {cat("tuneKDSN", "Level =", levels-1, "\n")}
}
else {
x_new <- x_prev
levels <- levels - 1
}
}
# Fine tune random fourier transformation weights
# Reproduceability is ensured with seed generation
fineTune <- vector("numeric", fineTuneIt)
seedGuess <- matrix(sample.int(.Machine$integer.max, size=fineTuneIt * levels) *
sample(c(-1, 1), size=fineTuneIt * levels, replace=TRUE), nrow=fineTuneIt, ncol=levels)
if(nCores==1) {
for(i in 1:fineTuneIt) {
fineTune[i] <- lossKDSN(parOpt=x_new, y=y, X=X, gammaPar=gammaPar, seedW=seedGuess [i, ])[1]
if(addInfo) {cat("tuneKDSN", "FineTune =", i, "\n")}
}
}
else {
localEnvir <- environment()
clusterExport(cl = cl, varlist=c("lossKDSN", "x_new", "y", "X", "gammaPar", "seedGuess", "fineTuneIt"),
envir = localEnvir)
fineTune <- parSapply(cl=cl, X=1:fineTuneIt,
FUN=function(i) lossKDSN(parOpt=x_new, y=y, X=X, gammaPar=gammaPar, seedW=seedGuess [i, ])[1])
stopCluster(cl=cl)
if(addInfo) {cat("tuneKDSN", "FineTune done", "\n")}
}
minIndex <- which.min(fineTune)
# Output
# Refit best model
lenx_new <- length(x_new)
stopifnot((lenx_new%%3) == 0)
levels1 <- lenx_new/3
stopifnot(length(seedGuess[minIndex, ]) == levels1)
Dim1 <- round(x_new[seq(1, lenx_new, 3)])
sigma1 <- x_new[seq(2, lenx_new, 3)]
lambda1 <- x_new[seq(3, lenx_new, 3)]
if(fineTune[minIndex] < optVal$objective){
finalModel <- fitKDSN(y = y, X = X, levels = levels1, Dim = Dim1,
sigma = sigma1, lambda = lambda1, alpha = rep(0, levels1),
info = FALSE,
seedW = seedGuess [minIndex, ], standX = TRUE)
# Include GCV score as attribute
attr(finalModel, which="GCV") <- fineTune[minIndex]
}
else{
finalModel <- fitKDSN(y = y, X = X, levels = levels1, Dim = Dim1,
sigma = sigma1, lambda = lambda1, alpha = rep(0, levels1),
info = FALSE,
seedW = seq(0, (levels1-1), 1), standX = TRUE)
# Include GCV score as attribute
attr(finalModel, which="GCV") <- optVal$objective
}
return(finalModel)
}
# Main tuning function of KDSN
tuneLevelKDSN <- function (y, X, maxRuns=3, repetitions=5, levels=10, gammaPar=1,
fineTuneIt=100, tol_input=.Machine$double.eps^0.25, addInfo=TRUE, dimMax=round(sqrt(dim(X)[1])/2),
nCores=1) {
if(nCores>1) {
# Initialize cluster
cl <- makeCluster(nCores)
}
# Initialize parameters
n <- dim(X)[1]
# Initialize starting vector of hyperparameters
MaxMatEntries <- .Machine$integer.max
dimStart <- round ((dimMax+1) / 2)
if((dimMax*2*n) > MaxMatEntries) {
dimMax <- floor(MaxMatEntries/n/2)
dimStart <- round (sqrt(dimMax*2)/2)
}
quantEuklid <- quantile(c(dist(robustStandard(X))^2), probs=c(0, 0.5, 1))
sigmaStart <- quantEuklid [2]
lambdaStart <- 0
x_start <- c(dimStart, sigmaStart, lambdaStart)
x_new <- rep(x_start, levels)
# Initialize bounds of hyperparameters
interval_matrix_start <- matrix(c(1, dimMax, quantEuklid [1], quantEuklid [3], 0, 10), nrow=2, ncol=3)
interval_matrix <- interval_matrix_start [, rep(1:dim(interval_matrix_start)[2], levels)]
# Tune
optVal <- optimize1dMulti (f_input=function (x) lossKDSN (parOpt=x,
y=y, X=X, gammaPar=gammaPar, seedW=seq(0, (levels-1), 1)),
interval_matrix=interval_matrix, maxRuns=maxRuns, repetitions=repetitions, x_0=x_new,
tol_input=tol_input, addInfo=addInfo, directUse=FALSE,
OptTypePar="tuneLevelKDSN", nCores=nCores)
x_new <- optVal$minimum
if(addInfo){cat("tuneKDSN", "Optimize 1D done", "\n")}
# Fine tune random fourier transformation weights
# Reproduceability is ensured with seed generation
fineTune <- vector("numeric", fineTuneIt)
seedGuess <- matrix(sample.int(.Machine$integer.max, size=fineTuneIt * levels) * sample(c(-1, 1), size=fineTuneIt * levels, replace=TRUE), nrow=fineTuneIt, ncol=levels)
if(nCores==1) {
for(i in 1:fineTuneIt) {
fineTune[i] <- lossKDSN(parOpt=x_new, y=y, X=X, gammaPar=gammaPar, seedW=seedGuess [i, ])[1]
if(addInfo) {cat("tuneLevelKDSN", "FineTune =", i, "\n")}
}
}
else {
localEnvir <- environment()
clusterExport(cl = cl, varlist=c("lossKDSN", "x_new", "y", "X", "gammaPar", "seedGuess", "fineTuneIt"),
envir = localEnvir)
fineTune <- parSapply(cl=cl, X=1:fineTuneIt,
FUN=function(i) lossKDSN(parOpt=x_new, y=y, X=X, gammaPar=gammaPar, seedW=seedGuess [i, ])[1])
stopCluster(cl=cl)
if(addInfo){cat("tuneLevelKDSN", "FineTune done", "\n")}
}
minIndex <- which.min(fineTune)
# Output
# Refit best model
lenx_new <- length(x_new)
stopifnot((lenx_new%%3) == 0)
levels1 <- lenx_new/3
stopifnot(length(seedGuess[minIndex, ]) == levels1)
Dim1 <- round(x_new[seq(1, lenx_new, 3)])
sigma1 <- x_new[seq(2, lenx_new, 3)]
lambda1 <- x_new[seq(3, lenx_new, 3)]
if(fineTune[minIndex] < optVal$objective){
finalModel <- fitKDSN(y = y, X = X, levels = levels1, Dim = Dim1,
sigma = sigma1, lambda = lambda1, alpha = rep(0, levels1),
info = FALSE,
seedW = seedGuess [minIndex, ],
standX = TRUE)
# Include GCV score as attribute
attr(finalModel, which="GCV") <- fineTune[minIndex]
}
else{
finalModel <- fitKDSN(y = y, X = X, levels = levels1, Dim = Dim1,
sigma = sigma1, lambda = lambda1, alpha = rep(0, levels1),
info = FALSE,
seedW = seq(0, (levels1-1), 1), standX = TRUE)
# Include GCV score as attribute
attr(finalModel, which="GCV") <- optVal$objective
}
return(finalModel)
}
###############################################
# Grid search over subset of levels
# In each level MBO algorithm will be performed
tuneLevelGridKDSN <- function(y, X, maxRuns=3, repetitions=5, levelSet, gammaPar=1,
fineTuneIt=100, tol_input=.Machine$double.eps^0.25, addInfo=TRUE,
dimMax=round(sqrt(dim(X)[1])/2), nCoresInner=1, nCoresOuter=1) {
# Check parallel arguments
if(round(nCoresInner)!=nCoresInner & round(nCoresOuter)!=nCoresOuter) {
stop("Please specify integer numbers in nCoresInner, nCoresOuter!")}
if(nCoresInner>1 & nCoresOuter>1) {stop("Only one parallelisation technique is allowed.
Please specify either nCoresInner or nCoresOuter
with integer numbers greater 1!")}
# Compute KDSN with MBO tuning
localEnvir <- environment()
if(nCoresOuter==1) {
resGridLevel <- vector("list", length(levelSet))
for(l in levelSet) {
resGridLevel[[l]] <- tuneLevelKDSN(y=y, X=X, levels=levelSet[l], fineTuneIt=fineTuneIt, gammaPar=gammaPar,
dimMax=dimMax, addInfo=addInfo, nCores=nCoresInner, maxRuns=maxRuns,
repetitions=repetitions, tol_input=tol_input)
cat("Level", levelSet[l], "\n")
}
}
else{
cl <- makeCluster(nCoresOuter)
clusterExport(cl = cl, varlist=c(ls(), "tuneLevelKDSN"), envir = localEnvir)
resGridLevel <- parLapplyLB(cl=cl, X=levelSet,
fun=function(x) tuneLevelKDSN(y=y, X=X, levels=levelSet[x], fineTuneIt=fineTuneIt, gammaPar=gammaPar,
dimMax=dimMax, addInfo=addInfo, nCores=nCoresInner, maxRuns=maxRuns,
repetitions=repetitions, tol_input=tol_input))
stopCluster(cl=cl)
}
# Output tuned KDSN with smallest GCV
gridLevelScores <- sapply(1:length(resGridLevel), function(x) attr(resGridLevel[[x]], "GCV"))
return(resGridLevel[[which.min(gridLevelScores)]])
}
|
a18bd0617ed982ba5b3bcbd5e55b0161147d6471
|
d5bc5d4334969fec87fb2dd85ee03a70f4864caf
|
/man/ROIVec-class.Rd
|
3838e88a1982dc8c26910b5c89ff0d14dd4403c5
|
[
"MIT"
] |
permissive
|
bbuchsbaum/neuroim2
|
46e14e10efb2b992e78741b0aa6737bde6971e33
|
2184bfafd75ab51e0c9d67db83076b08bbf3f40b
|
refs/heads/master
| 2023-05-03T11:19:36.844512
| 2023-04-26T00:06:50
| 2023-04-26T00:06:50
| 109,069,233
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 967
|
rd
|
ROIVec-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_class.R
\docType{class}
\name{ROIVec-class}
\alias{ROIVec-class}
\title{ROIVec}
\description{
A class representing a vector-valued volumetric region of interest (ROI) in a brain image.
}
\section{Slots}{
\describe{
\item{\code{coords}}{A \code{matrix} containing the 3D coordinates of the voxels within the ROI. Each row represents a voxel coordinate as (x, y, z).}
\item{\code{.Data}}{A \code{matrix} containing the data values associated with each voxel in the ROI. Each row corresponds to a unique vector value, and the number of rows should match the number of rows in the \code{coords} matrix.}
}}
\section{Validity}{
An object of class \code{ROIVec} is considered valid if:
- The \code{coords} slot is a matrix with 3 columns.
- The \code{.Data} slot is a matrix.
- The number of rows in the \code{.Data} matrix is equal to the number of rows in the \code{coords} matrix.
}
|
7464a0a8b75952c7f8bed81c8634859b73456d87
|
77572ab0628f675213204e505599a068375da215
|
/R/tumor_microenvironment_ui.R
|
0f4508f8e7ea700a80b2358fd4608b41a40393dc
|
[
"MIT"
] |
permissive
|
CRI-iAtlas/iatlas-app
|
a61d408e504b00126796b9b18132462c5da28355
|
500c31d11dd60110ca70bdc019b599286f695ed5
|
refs/heads/staging
| 2023-08-23T11:09:16.183823
| 2023-03-20T21:57:41
| 2023-03-20T21:57:41
| 236,083,844
| 10
| 3
|
NOASSERTION
| 2023-03-21T21:27:26
| 2020-01-24T21:07:54
|
R
|
UTF-8
|
R
| false
| false
| 578
|
r
|
tumor_microenvironment_ui.R
|
tumor_microenvironment_ui <- function(id) {
ns <- shiny::NS(id)
shiny::tagList(
iatlas.modules::titleBox("iAtlas Explorer — Tumor Microenvironment"),
iatlas.modules::textBox(
width = 12,
shiny::includeMarkdown(get_markdown_path("tumor_microenvironment"))
),
iatlas.modules::sectionBox(
title = "Overall Cell Proportions",
module_ui(ns("tumor_microenvironment_cell_proportions"))
),
iatlas.modules::sectionBox(
title = "Cell Type Fractions",
module_ui(ns("tumor_microenvironment_type_fractions"))
)
)
}
|
80e89e187407d6849e05056edfae907cfa720307
|
7e5c434468d8f97cc4cee95c467967280064c48e
|
/analyses/Linear/05-convergence-linear.R
|
ce9283cbb0d4d52e8d17c6aedc5c2ed5e9472010
|
[
"MIT"
] |
permissive
|
nhcooper123/NATRICINE-ecomorph
|
d9fce11d79721ee2aa14f9f997d174598f08076a
|
54e67f4a112cc6a7f84945354d3ef870a53ada3e
|
refs/heads/master
| 2023-04-14T23:06:19.814253
| 2022-11-23T16:52:49
| 2022-11-23T16:52:49
| 299,321,129
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,811
|
r
|
05-convergence-linear.R
|
# Phylogenetic convergence testing for linear data
# Stayton's distance-based metric
# Modified 2022
#-------------------------------------
# Load libraries
library(ape)
library(geomorph)
library(geiger)
library(phytools)
library(convevol)
library(tidyverse)
#------------------------------------------------------------
# Import data species means outputs and tree file
#------------------------------------------------------------
# species means PCA outputs
snake <- read_csv("data/Linear/snakepca-LSR.csv")
# Read in the tree
tree <- read.nexus("data/Linear/new_datedtree-LM.nexus")
# Extract from the tree only those species which match with the data
sps <- name.check(tree, snake, data.names = snake$Species)
tree <- drop.tip(tree, sps$tree_not_data)
# Extract from the data species not in the tree
matches <- match(snake$Species, sps$data_not_tree, nomatch = 0)
snake <- snake[which(matches == 0), ]
# Look at the data
glimpse(snake)
str(tree)
#----------------------------------------------
# Stayton's distance-based convergence metrics
#----------------------------------------------
# Create data object for convevol functions
# This selects just the ten PCs and make sure the data is in matrix format
pc.data <- as.matrix(snake[, 21:30])
rownames(pc.data) <- snake$Species
#--------------------------------------------------------------------------------
# Calculate CONVRAT for each habit type with simulations and save them to outputs
#### names provided for each categorical runs Burr AqB Ter Aq SAq
#--------------------------------------------------------------------------------
# Burrowing vs the rest
#-----------------------
# Select tips that are burrowing natricines
burrowers <- filter(snake, Habit == "Burrowing")
burrowers <- pull(burrowers, Species)
# Run analyses and
ans_BU <- convrat(phyl = tree,
phendata = pc.data,
convtips = burrowers)
# Look at output
ans_BU
# Create an output file. There are four 'C' outputs C1-C4 and columns below
outputBU <- data.frame(array(dim = c(4, 3)))
names(outputBU) <- c("group", "obsC", "PVals")
rownames(outputBU) <- c("C1", "C2", "C3", "C4")
### This takes a long time (days) #########
# Get significance values
x <- convratsig(phyl = tree,
phendata = pc.data,
convtips = burrowers,
nsim = 100)
outputBU["group"] <- "burrowers"
outputBU["obsC"] <- x$ObservedCs
outputBU["PVals"] <- x$Pvals
# Write to file
write_csv(outputBU, file = "outputs/Linear/Tables/convevol-results-burrower-LSR.csv")
#--------------------------------------------------------------------------------
# Aquatic burrowing vs the rest
#-------------------------------
# Select tips that are aquatic burrowing natricines
Aquaburrowers <- filter(snake, Habit == "Aquatic burrower")
Aquaburrowers <- pull(Aquaburrowers, Species)
# Run analyses and
ans_ABU <- convrat(phyl = tree,
phendata = pc.data,
convtips = Aquaburrowers)
# Look at output
ans_ABU
# Create an output file. There are four 'C' outputs C1-C4 and columns below
outputABU <- data.frame(array(dim = c(4, 3)))
names(outputABU) <- c("group", "obsC", "PVals")
rownames(outputABU) <- c("C1", "C2", "C3", "C4")
### This takes a long time (days) #########
# Get significance values
x <- convratsig(phyl = tree,
phendata = pc.data,
convtips = Aquaburrowers,
nsim = 100)
outputABU["group"] <- "Aquaburrowers"
outputABU["obsC"] <- x$ObservedCs
outputABU["PVals"] <- x$Pvals
outputABU
write_csv(outputABU, file = "outputs/Linear/Tables/convevol-results-aquaburrower-LSR.csv")
#--------------------------------------------------------------------------------
# Terrestrial vs the rest
#-------------------------------
# Select tips that are Terrestrial natricines
Terrestrial <- filter(snake, Habit == "Terrestrial")
Terrestrial <- pull(Terrestrial, Species)
# Run analyses and
ans_TE <- convrat(phyl = tree,
phendata = pc.data,
convtips = Terrestrial)
# Look at output
ans_TE
# Create an output file. There are four 'C' outputs C1-C4 and columns below
outputTE <- data.frame(array(dim = c(4, 3)))
names(outputTE) <- c("group", "obsC", "PVals")
rownames(outputTE) <- c("C1", "C2", "C3", "C4")
### This takes a long time (days) #########
# Get significance values
# Takes a while to run
# Run CONVRAT and save outputs in the outputs folder
x <- convratsig(phyl = tree,
phendata = pc.data,
convtips = Terrestrial,
nsim = 100)
outputTE["group"] <- "Terrestrial"
outputTE["obsC"] <- x$ObservedCs
outputTE["PVals"] <- x$Pvals
# Write to file
write_csv(outputTE, file = "outputs/Linear/Tables/convevol-results-terrestrial-LSR.csv")
#--------------------------------------------------------------------------------
# Aquatic vs the rest
#-------------------------------
# Select tips that are aquatic natricines
Aquatic <- filter(snake, Habit == "Aquatic")
Aquatic <- pull(Aquatic, Species)
# Run analyses and
ans_AQ <- convrat(phyl = tree,
phendata = pc.data,
convtips = Aquatic)
# Look at output
ans_AQ
# Create an output file. There are four 'C' outputs C1-C4 and columns below
outputAQ <- data.frame(array(dim = c(4, 3)))
names(outputAQ) <- c("group", "obsC", "PVals")
rownames(outputAQ) <- c("C1", "C2", "C3", "C4")
### This takes a long time (days) #########
x <- convratsig(phyl = tree,
phendata = pc.data,
convtips = Aquatic,
nsim = 100)
outputAQ["group"] <- "Aquatic"
outputAQ["obsC"] <- x$ObservedCs
outputAQ["PVals"] <- x$Pvals
# Write to file
write_csv(outputAQ, file = "outputs/Linear/Tables/convevol-results-aquatic-LSR.csv")
#--------------------------------------------------------------------------------
# Semi Aquatic vs the rest
#-------------------------------
# Select tips that are semi aquatic natricines
semiaquatic <-filter(snake, Habit == "Semiaquatic")
semiaquatic <-pull(semiaquatic, Species)
# Run analyses and
ans_SAQ <- convrat(phyl = tree,
phendata = pc.data,
convtips = semiaquatic)
# Look at output
ans_SAQ
# Create an output file. There are four 'C' outputs C1-C4 and columns below
outputSAQ <- data.frame(array(dim = c(4, 3)))
names(outputSAQ) <- c("group", "obsC", "PVals")
rownames(outputSAQ) <- c("C1", "C2", "C3", "C4")
### This takes a long time (days) #########
x <- convratsig(phyl = tree,
phendata = pc.data,
convtips = semiaquatic,
nsim = 100)
outputSAQ["group"] <- "semiaquatic"
outputSAQ["obsC"] <- x$ObservedCs
outputSAQ["PVals"] <- x$Pvals
# Write to file
write_csv(outputSAQ, file = "outputs/Linear/Tables/convevol-results-semiaquatic-LSR.csv")
|
19b4bbbd32cf5b7b612e2c400e134ef84f8cc08b
|
a6566ebc69ed5e7a17e2091bdb10e7b6523eefc9
|
/R/test1.r
|
68927dd2c00902acf997074c88ef637bd089e42d
|
[
"MIT"
] |
permissive
|
mabotech/mabo.task
|
916e71650b45a24bb3852206a3755a7fd0342e47
|
96752a5ae94349a46e3b6f9369cc0933d5e37be0
|
refs/heads/master
| 2020-06-05T13:05:02.768838
| 2015-11-29T08:18:10
| 2015-11-29T08:18:10
| 23,750,849
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 428
|
r
|
test1.r
|
DF <- read.table(text="Rank F1 F2 F3
1 500 250 50
2 400 100 30
3 300 155 100
4 200 90 10
5 300 100 20", header=TRUE)
print(DF)
library(reshape2)
DF1 <- melt(DF, id.var="Rank")
print(DF1)
library(ggplot2)
ggplot(DF1, aes(x = Rank, y = value, fill = variable)) +
geom_bar(stat = "identity", alpha=0.9, position="dodge")
ggsave("test1.png",width=5, height=4)
|
dc317fed63d8c549391cff901d69eaa9e1ff0605
|
1b90387128040d64490c3a27d5423e4c0bf4f0e9
|
/R/R Test.r
|
a71f8a1aaf4e263ea5a3e50cfc070f2db3f9e66a
|
[] |
no_license
|
michalmar/azure-databricks-projects
|
2f30f1228478d3bd5d977aceea684b3a1e6f6fe1
|
cc129912e22171f680a8cd18e8e816257035a7f3
|
refs/heads/master
| 2023-01-23T02:30:23.774756
| 2020-11-18T09:26:20
| 2020-11-18T09:26:20
| 299,838,644
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,046
|
r
|
R Test.r
|
# Databricks notebook source
candidates <- c( Sys.getenv("R_PROFILE"),
file.path(Sys.getenv("R_HOME"), "etc", "Rprofile.site"),
Sys.getenv("R_PROFILE_USER"),
file.path(getwd(), ".Rprofile") )
Filter(file.exists, candidates)
# COMMAND ----------
# MAGIC %sh
# MAGIC cat /usr/lib/R/etc/Rprofile.site
# COMMAND ----------
# MAGIC %sh
# MAGIC ls -l /dbfs/
# COMMAND ----------
save.image(file = "/dbfs/my_work_space.RData")
# load("/dbfs/my_work_space.RData")
# COMMAND ----------
load("/dbfs/my_work_space.RData")
# COMMAND ----------
my_var
# COMMAND ----------
# COMMAND ----------
# MAGIC %md # In RStudio
# COMMAND ----------
candidates <- c( Sys.getenv("R_PROFILE"),
file.path(Sys.getenv("R_HOME"), "etc", "Rprofile.site"),
Sys.getenv("R_PROFILE_USER"),
file.path(getwd(), ".Rprofile") )
Filter(file.exists, candidates)
my_var = "123"
save.image(file = "/dbfs/my_work_space.RData")
load("/dbfs/my_work_space.RData")
|
147093f231e70af97510df84da2b9d48e078ca61
|
bc113c18c979f88158d1e2557efb81bf01f44e35
|
/R/ml_mapping_tables.R
|
c9d06c04fdc2670b593e2b0311efacb6838b212c
|
[
"Apache-2.0"
] |
permissive
|
awblocker/sparklyr
|
6524ce9ac1d9c24392cd9e179ca8836851d3c93f
|
18d9df6a6755f8bd10f81721e71c4f818a115084
|
refs/heads/master
| 2020-04-08T09:27:47.362403
| 2018-11-26T20:02:49
| 2018-11-26T20:02:49
| 159,225,148
| 0
| 0
|
Apache-2.0
| 2018-11-26T20:03:28
| 2018-11-26T19:59:20
|
R
|
UTF-8
|
R
| false
| false
| 11,628
|
r
|
ml_mapping_tables.R
|
ml_create_mapping_tables <- function() { # nocov start
param_mapping_list <-
list("input_col" = "inputCol",
"output_col" = "outputCol",
# ml_logistic_regression
"elastic_net_param" = "elasticNetParam",
"features_col" = "featuresCol",
"fit_intercept" = "fitIntercept",
"label_col" = "labelCol",
"max_iter" = "maxIter",
"prediction_col" = "predictionCol",
"probability_col" = "probabilityCol",
"raw_prediction_col" = "rawPredictionCol",
"reg_param" = "regParam",
"weight_col" = "weightCol",
"aggregation_depth" = "aggregationDepth",
"num_features" = "numFeatures",
"lower_bounds_on_coefficients" = "lowerBoundsOnCoefficients",
"upper_bounds_on_coefficients" = "upperBoundsOnCoefficients",
"lower_bounds_on_intercepts" = "lowerBoundsOnIntercepts",
"upper_bounds_on_intercepts" = "upperBoundsOnIntercepts",
# ft_standard_scaler
"with_mean" = "withMean",
"with_std" = "withStd",
# ft_vector_indexer
"max_categories" = "maxCategories",
# ft_imputer
"missing_value" = "missingValue",
"output_cols" = "outputCols",
# ft_word2vec
"vector_size" = "vectorSize",
"min_count" = "minCount",
"max_sentence_length" = "maxSentenceLength",
"num_partitions" = "numPartitions",
# ft_chisq_selector
"num_top_features" = "numTopFeatures",
"selector_type" = "selectorType",
# ft_bucketed_random_projection_lsh
"bucket_length" = "bucketLength",
"num_hash_tables" = "numHashTables",
# ft_idf
"min_doc_freq" = "minDocFreq",
# ft_r_formula
"force_index_label" = "forceIndexLabel",
# ft_string_indexer
"handle_invalid" = "handleInvalid",
"string_order_type" = "stringOrderType",
# ft_one_hot_encoder
"drop_last" = "dropLast",
# ft_vector_assembler
"input_cols" = "inputCols",
# ft_stop_words_remover
"case_sensitive" = "caseSensitive",
"stop_words" = "stopWords",
# ft_elementwise_product
"scaling_vec" = "scalingVec",
# ft_regex_tokenizer
"min_token_length" = "minTokenLength",
"to_lower_case" = "toLowercase",
# ft_count_vectorizer
"min_df" = "minDF",
"min_tf" = "minTF",
"vocab_size" = "vocabSize",
# ft_quantile_discretizer
"num_buckets" = "numBuckets",
"num_buckets_array" = "numBucketsArray",
"relative_error" = "relativeError",
# ft_bucketizer
"splits_array" = "splitsArray",
# ft_feature-hasher
"categorical_cols" = "categoricalCols",
# ml_generalized_linear_regression
"link_prediction_col" = "linkPredictionCol",
"variance_power" = "variancePower",
"link_power" = "linkPower",
"offset_col" = "offsetCol",
# ml_decision_tree_regressor
"variance_col" = "varianceCol",
"checkpoint_interval" = "checkpointInterval",
"max_bins" = "maxBins",
"max_depth" = "maxDepth",
"min_info_gain" = "minInfoGain",
"min_instances_per_node" = "minInstancesPerNode",
"cache_node_ids" = "cacheNodeIds",
"max_memory_in_mb" = "maxMemoryInMB",
# ml_gbt_classifier
"loss_type" = "lossType",
"step_size" = "stepSize",
"subsampling_rate" = "subsamplingRate",
# ml_random_forest_classifier
"num_trees" = "numTrees",
"feature_subset_strategy" = "featureSubsetStrategy",
# ml_naive_bayes
"model_type" = "modelType",
# ml_multilayer_perceptron_classifier
"block_size" = "blockSize",
"initial_weights" = "initialWeights",
# ml_aft_survival_regression
"censor_col" = "censorCol",
"quantile_probabilities" = "quantileProbabilities",
"quantiles_col" = "quantilesCol",
# ml_isotonic_regression
"feature_index" = "featureIndex",
# ml_fpgrowth
"items_col" = "itemsCol",
"min_confidence" = "minConfidence",
"min_support" = "minSupport",
# ml_als
"rating_col" = "ratingCol",
"user_col" = "userCol",
"item_col" = "itemCol",
"implicit_prefs" = "implicitPrefs",
"num_user_blocks" = "numUserBlocks",
"num_item_blocks" = "numItemBlocks",
"cold_start_strategy" = "coldStartStrategy",
"intermediate_storage_level" = "intermediateStorageLevel",
"final_storage_level" = "finalStorageLevel",
# ml_lda
"doc_concentration" = "docConcentration",
"topic_concentration" = "topicConcentration",
"topic_distribution_col" = "topicDistributionCol",
"keep_last_checkpoint" = "keepLastCheckpoint",
"learning_decay" = "learningDecay",
"learning_offset" = "learningOffset",
"optimize_doc_concentration" = "optimizeDocConcentration",
"topic_distribution_col" = "topicDistributionCol",
# ml_kmeans
"max_iter" = "maxIter",
"init_steps" = "initSteps",
"init_mode" = "initMode",
# ml_bisecting_kmeans
"min_divisible_cluster_size" = "minDivisibleClusterSize",
# evaluators
"metric_name" = "metricName",
# tuning
"collect_sub_models" = "collectSubModels",
"num_folds" = "numFolds",
"train_ratio" = "trainRatio")
param_mapping_r_to_s <- new.env(parent = emptyenv(),
size = length(param_mapping_list))
param_mapping_s_to_r <- new.env(parent = emptyenv(),
size = length(param_mapping_list))
invisible(lapply(names(param_mapping_list),
function(x) {
param_mapping_r_to_s[[x]] <- param_mapping_list[[x]]
param_mapping_s_to_r[[param_mapping_list[[x]]]] <- x
}))
ml_class_mapping_list <- list(
# feature (transformers)
"PolynomialExpansion" = "polynomial_expansion",
"Normalizer" = "normalizer",
"Interaction" = "interaction",
"HashingTF" = "hashing_tf",
"Binarizer" = "binarizer",
"Bucketizer" = "bucketizer",
"DCT" = "dct",
"ElementwiseProduct" = "elementwise_product",
"IndexToString" = "index_to_string",
"OneHotEncoder" = "one_hot_encoder",
"RegexTokenizer" = "regex_tokenizer",
"SQLTransformer" = "sql_transformer",
"StopWordsRemover" = "stop_words_remover",
"Tokenizer" = "tokenizer",
"VectorAssembler" = "vector_assembler",
"NGram" = "ngram",
"VectorSlicer" = "vector_slicer",
"FeatureHasher" = "feature_hasher",
# feature (estimators)
"VectorIndexer" = "vector_indexer",
"VectorIndexerModel" = "VectorIndexerModel",
"StandardScaler" = "standard_scaler",
"StandardScalerModel" = "standard_scaler_model",
"MinMaxScaler" = "min_max_scaler",
"MinMaxScalerModel" = "min_max_scaler_model",
"MaxAbsScaler" = "max_abs_scaler",
"MaxAbsScalerModel" = "max_abs_scaler_model",
"Imputer" = "imputer",
"ImputerModel" = "imputer_model",
"ChiSqSelector" = "chisq_selector",
"ChiSqSelectorModel" = "chisq_selector_model",
"Word2Vec" = "word2vec",
"Word2VecModel" = "word2vec_model",
"IDF" = "idf",
"IDFModel" = "idf_model",
"VectorIndexer" = "vector_indexer",
"VectorIndexerModel" = "vector_indexer_model",
"QuantileDiscretizer" = "quantile_discretizer",
"RFormula" = "r_formula",
"RFormulaModel" = "r_formula_model",
"StringIndexer" = "string_indexer",
"StringIndexerModel" = "string_indexer_model",
"CountVectorizer" = "count_vectorizer",
"CountVectorizerModel" = "count_vectorizer_model",
"PCA" = "pca",
"PCAModel" = "pca_model",
"BucketedRandomProjectionLSH" = "bucketed_random_projection_lsh",
"BucketedRandomProjectionLSHModel" = "bucketed_random_projection_lsh_model",
"MinHashLSH" = "minhash_lsh",
"MinHashLSHModel" = "minhash_lsh_model",
# regression
"LogisticRegression" = "logistic_regression",
"LogisticRegressionModel" = "logistic_regression_model",
"LinearRegression" = "linear_regression",
"LinearRegressionModel" = "linear_regression_model",
"GeneralizedLinearRegression" = "generalized_linear_regression",
"GeneralizedLinearRegressionModel" = "generalized_linear_regression_model",
"DecisionTreeRegressor" = "decision_tree_regressor",
"DecisionTreeRegressionModel" = "decision_tree_regression_model",
"GBTRegressor" = "gbt_regressor",
"GBTRegressionModel" = "gbt_regression_model",
"RandomForestRegressor" = "random_forest_regressor",
"RandomForestRegressionModel" = "random_forest_regression_model",
"AFTSurvivalRegression" = "aft_survival_regression",
"AFTSurvivalRegressionModel" = "aft_survival_regression_model",
"IsotonicRegression" = "isotonic_regression",
"IsotonicRegressionModel" = "isotonic_regression_model",
# classification
"GBTClassifier" = "gbt_classifier",
"GBTClassificationModel" = "gbt_classification_model",
"DecisionTreeClassifier" = "decision_tree_classifier",
"DecisionTreeClassificationModel" = "decision_tree_classification_model",
"RandomForestClassifier" = "random_forest_classifier",
"RandomForestClassificationModel" = "random_forest_classification_model",
"NaiveBayes" = "naive_bayes",
"NaiveBayesModel" = "naive_bayes_model",
"MultilayerPerceptronClassifier" = "multilayer_perceptron_classifier",
"MultilayerPerceptronClassificationModel" = "multilayer_perceptron_classification_model",
"OneVsRest" = "one_vs_rest",
"OneVsRestModel" = "one_vs_rest_model",
"LinearSVC" = "linear_svc",
"LinearSVCModel" = "linear_svc_model",
# recommendation
"ALS" = "als",
"ALSModel" = "als_model",
# clustering
"LDA" = "lda",
"LDAModel" = "lda_model",
"KMeans" = "kmeans",
"KMeansModel" = "kmeans_model",
"BisectingKMeans" = "bisecting_kmeans",
"BisectingKMeansModel" = "bisecting_kmeans_model",
"GaussianMixture" = "gaussian_mixture",
"GaussianMixtureModel" = "gaussian_mixture_model",
# fpm
"FPGrowth" = "fpgrowth",
"FPGrowthModel" = "fpgrowth_model",
# tuning
"CrossValidator" = "cross_validator",
"CrossValidatorModel" = "cross_validator_model",
"TrainValidationSplit" = "train_validation_split",
"TrainValidationSplitModel" = "train_validation_split_model",
# evaluation
"BinaryClassificationEvaluator" = "binary_classification_evaluator",
"MulticlassClassificationEvaluator" = "multiclass_classification_evaluator",
"RegressionEvaluator" = "regression_evaluator",
"ClusteringEvaluator" = "clustering_evaluator",
# pipeline
"Pipeline" = "pipeline",
"PipelineModel" = "pipeline_model",
"Transformer" = "transformer",
"Estimator" = "estimator",
"PipelineStage" = "pipeline_stage"
)
ml_class_mapping <- new.env(parent = emptyenv(),
size = length(ml_class_mapping_list))
invisible(lapply(names(ml_class_mapping_list),
function(x) {
ml_class_mapping[[x]] <- ml_class_mapping_list[[x]]
}))
rlang::ll(param_mapping_r_to_s = param_mapping_r_to_s,
param_mapping_s_to_r = param_mapping_s_to_r,
ml_class_mapping = ml_class_mapping)
} # nocov end
|
43b84edcf83b60bd8b208295996c3406eaf0e22e
|
a348c7dab03439d4a07c93be85ab292faf67bc8d
|
/inst/tests/test_utils.R
|
ca8380cd6bd4cba72e462bf3227a3e162827cf9e
|
[] |
no_license
|
jasonzou/papaja
|
9aab91b0582a10232a2524c85ed9398d4f73c9f7
|
5d810c548560f31ced7a7712cb1858196c9c526d
|
refs/heads/master
| 2020-12-11T03:27:52.941586
| 2016-02-02T16:45:47
| 2016-02-02T16:45:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,557
|
r
|
test_utils.R
|
context("Utility functions")
test_that(
"convert_stat_name()"
, {
chis <- c(
convert_stat_name("X-squared")
, convert_stat_name("Chi-squared")
, convert_stat_name("chi-squared")
, convert_stat_name("X^2")
)
expect_that(chis, equals(rep("\\chi^2", length(chis))))
expect_that(convert_stat_name("t"), equals("t"))
expect_that(convert_stat_name("z"), equals("z"))
}
)
test_that(
"print_confint()"
, {
x <- c(44.4, 45.9, 41.9, 53.3, 44.7, 44.1, 50.7, 45.2, 60.1)
y <- c( 2.6, 3.1, 2.5, 5.0, 3.6, 4.0, 5.2, 2.8, 3.8)
cor_test <- cor.test(x, y)
apa_confint <- print_confint(cor_test$conf.int, conf_level = 0.95)
expect_that(apa_confint, is_a("character"))
expect_that(apa_confint, equals("95\\% CI $[-0.15$, $0.90]$"))
apa_confint <- print_confint(cor_test$conf.int, gt1 = FALSE)
expect_that(apa_confint, equals("95\\% CI $[-.15$, $.90]$"))
apa_confint <- print_confint(cor_test$conf.int)
expect_that(apa_confint, equals("95\\% CI $[-0.15$, $0.90]$"))
apa_confint <- print_confint(c(1, 2))
expect_that(apa_confint, equals("$[1.00$, $2.00]$"))
conf_int <- confint(lm(x ~ y))
apa_confint <- print_confint(conf_int)
expect_that(apa_confint, is_a("list"))
expect_that(length(apa_confint), equals(nrow(conf_int)))
expect_that(names(apa_confint), equals(c("Intercept", "y")))
expect_that(apa_confint$Intercept, equals("95\\% CI $[19.52$, $51.78]$"))
expect_that(apa_confint$y, equals("95\\% CI $[-0.95$, $7.67]$"))
}
)
|
3981ddc63232671039f8cbe1406e28c6b92cf89c
|
4f74e0dfdfce3c41c85b457cf21f10c25b0bcea8
|
/WScraping/clean_df.R
|
d48ada45f013a456b56b72fa8b81cf30e4c25156
|
[
"CC-BY-NC-SA-4.0",
"MIT"
] |
permissive
|
anahubel/WScrapingR
|
c77c87f2da6f1f86b71e27fc978e1430b5e5d051
|
d6c8aa02e9b7086ebf5f61e3c71fd9251090946e
|
refs/heads/main
| 2023-01-13T15:24:12.737204
| 2020-11-08T23:18:08
| 2020-11-08T23:18:08
| 310,109,840
| 0
| 0
|
MIT
| 2020-11-04T20:24:48
| 2020-11-04T20:24:47
| null |
UTF-8
|
R
| false
| false
| 151
|
r
|
clean_df.R
|
# Este fichero cargará el dataframe generado por el script "scrap" y lo limpiará.
# Además, generará el csv final listo para la entrega y análisis
|
44263e948b5385cfe30db0689721b3b8f6dc8088
|
da4f93c6da85ecdf2a3b9936fa72f26982e04875
|
/R/blockedReactions.R
|
1879c829112e56a98c038cc9710d23dcc1a580ea
|
[] |
no_license
|
gibbslab/g2f
|
11d052b443be50bd7b7ec4a862cff3d99186c3ef
|
c5f42de12a7e67af1ea83a38e2d9b4c96519de5d
|
refs/heads/master
| 2021-01-16T21:38:43.440135
| 2020-12-05T06:41:27
| 2020-12-05T06:41:27
| 54,038,847
| 7
| 5
| null | 2020-12-05T06:41:28
| 2016-03-16T14:28:15
|
R
|
UTF-8
|
R
| false
| false
| 1,561
|
r
|
blockedReactions.R
|
#' @export blockedReactions
#' @importFrom "sybil" "optimizeProb"
#' @author Andres Pinzon-Velasco <ampinzonv@unal.edu.co> - Mantainer: Daniel Camilo Osorio <dcosorioh@unal.edu.co>
#' @title Identify blocked reactions in a metabolic network
# Bioinformatics and Systems Biology Lab | Universidad Nacional de Colombia
# Experimental and Computational Biochemistry | Pontificia Universidad Javeriana
#' @description A blocked reaction in a metabolic network is a reaction that not participate in any optimization solution. This function set as objective function each one of the reactions (one by time) in the model, and identifies the reactions without flux under all scenarios.
#' @param model A valid model for the \code{'sybil'} package. An object of class modelorg.
#' @return A vector with the reaction ids of the blocked reactions
#' @examples
#' \dontrun{
#' # Loading a model for the 'sybil' package
#' data("Ec_core")
#'
#' # Identifying blocked reactions
#' blockedReactions(Ec_core)}
#' @keywords Blocked reactions genome scale metabolic reconstruction
blockedReactions <- function(model) {
locked <- NULL
pb <- txtProgressBar(min = 1, max = model@react_num, style = 3)
for (reaction in 1:model@react_num) {
setTxtProgressBar(pb, reaction)
model@obj_coef <- rep(0, model@react_num)
model@obj_coef[reaction] <- 1
FBA <- sybil::optimizeProb(model)
locked <- unique(c(locked, model@react_id[as.vector(FBA@fluxdist@fluxes != 0)]))
}
close(pb)
locked <- model@react_id[!model@react_id %in% locked]
return(locked)
}
|
e0fb76c56cff07aa065aa726379a0a1297b24af2
|
71db4a78c8a989b58a0d839a77d58d1774dbec5f
|
/Code/R/Monte Carlo Integration.R
|
0753d6baf24863b3296e3a67211ed66981c1724e
|
[] |
no_license
|
saulmoore1/MSc_CMEE
|
906a7bdf09528c39c0daf6e37f2d722b8ad7bd3d
|
5bfd0a5f696c59a092aa9df5536169d905d7ab69
|
refs/heads/master
| 2022-04-30T20:14:59.660442
| 2022-03-30T11:28:15
| 2022-03-30T11:28:15
| 158,312,708
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,798
|
r
|
Monte Carlo Integration.R
|
#!/usr/bin/env R
###############################################
# Day 2 - Monte Carlo integration & Gene drive
###############################################
x <- runif(20000,0,1) # Sample 20000 numbers from a random uniform distribution of 0 and 1's
mean(sqrt(1-x^2))
pi/4 # Roughly equal
x <- rexp(20000,1) # Random numbers from exponential distribution
mean(x*exp((-1)*x))
drive.drift.selection <- function(N=500, M=500, t=10, p=0.5, R=2) {
population <- list()
length(population) <- t+1
size <- rep(NA, t+1)
size[1] <- N
allele.freq <- rep(NA, t+1)
allele.freq[1] <- p
k <- ceiling(2*N*p)
population[[1]] <- matrix(sample(c(rep(0, k), rep(1, 2*N-k))), nrow=2)
bh <- function(N, R, M){
return(R*N/(1+N/M))
}
# First genotype (00), heterozygotes (01), and homozygous positive (11) is lethal
# fitness <- c(1, 1, 0) # homozygous 11 (position 3) is lethal
# gamete.0 <- c(1, 1-d, 0)
# gamete.1 <- c(0, d, 1)
# while (allele.freq > 0 & allele.freq < 1) {
for (i in 2:(t+1)) {
population[[i]] <- population[[i-1]][,colSums(population[[i-1]])!=2]
allele.freq[[i]] <- sum(population[[i-1]]==0)/(2*N)
population[[i]] <- matrix(sample(0:1, size=2*N, replace = TRUE, prob = c(allele.freq[i-1], 1-allele.freq[i-1])), nrow=2)
size <- length(population[[i]]) # Population size must be made to decrease each iteration (in addition to the removal of lethal 11 homozygotes)
}
return(list(population=population, allele.freq=allele.freq))
}
drive.drift.selection(N=10, M=10, t=3, p=0.5, R=2)
##############################################
# Example
##############################################
# SIMULATING DRIVE-DRIFT-SELECTION BALANCE - Assumes random mating
drive.drift.selection <- function(q=0.5, d=0.8, N=500, t=10, R0=2, M=500) {
# INPUT PARAMETERS: q = allele freq of lethal allele, d = homing rate (drive), N = initial pop size, t = time step (generation)
# FUNCTION TO CALCULATE GENOTYPIC FREQUENCY
cal.geno.freq <- function(population) {
temp<-apply(population, 2, sum)
# IF THE COLUMN SUM IS 0 THEN 00 HOMOZYGOTE, IF SUM IS 1 THEN HETEROZYGOTE, if sum is 2 then colummn is lethal 11 HOMOZYGOTE
return(c(sum(temp==0), sum(temp==1), sum(temp==2))/length(temp))
}
# BEVERTON-HOLT MODEL FOR POPULATION DYNAMICS - Parameters R0 and M, together determine carrying capacity
bh <- function(N, R0, M) {
return(R0*N/(1+N/M))
}
# FITNESS COST
# THE FIRST GENOTYPE IS 00, SECOND 01 HETEROZYGOTES, ASSUMING NO LOSS/ADVANTAGE IN FITNESS, AND THIRD 11 IS LETHAL
fitness<-c(1, 1, 0)
# GAMETES PRODUCED BY EACH GENOTYPE
gamete.0<-c(1, 1-d, 0)
gamete.1<-c(0, d, 1)
# USE A LIST TO STORE ALL THE POPULATION OUTPUTS
# THE LIST CONSISTS OF t+1 MATRICES TO REPRESENT ALL THE INDIVIDUALS IN EACH GENERATION
# EACH COLUMN IN A MATRIX REPRESENTS AN INDIVIDUAL
population<-list()
length(population)<-(t+1)
# TO STORE THE POPULATION SIZE
population.size<-rep(NA, t+1)
population.size[1]<-N
# TO STORE THE ALLELE FREQ OF q
allele.freq.q<-rep(NA, t+1)
allele.freq.q[1]<- q
# THE INITIAL POPULATION - ASSUME HW EQUILIBRIUM (ie. BINOMIAL SAMPLING)
k<-ceiling(2*N*q)
population[[1]]<-matrix(sample(c(rep(0, 2*N-k), rep(1, k))), nr=2)
# population[[1]]<-matrix(sample(0:1, size=2*N, replace=TRUE, prob=c(1-q, q)), nr=2)
# FOR EACH TIME STEP
for (i in 1:t) {
# CALCULATE THE GENOTYPIC FREQ, THEN THE FREQ AFTER SELECTION, AND FINALLY THE GAMETIC FREQ (CONSIDERING THE DRIVE)
genotype.freq<-cal.geno.freq(population[[i]])
freq.after.selection<-genotype.freq*fitness/sum(genotype.freq*fitness)
gametic.freq<-c(sum(gamete.0*freq.after.selection), sum(gamete.1*freq.after.selection))
# CALCULATE THE POPULATION SIZE FOR THE NEXT TIME STEP USING THE BH MODEL, AND THE LETHAL 11 GENOTYPE IS ALSO CONSIDERED
population.size[i+1]<-floor(bh(population.size[i]*(1-genotype.freq[3]), R0=R0, M=M))
# REPRODUCTION ONLY IF POPULATION SIZE >= 1
if (population.size[i+1] >= 1) {
# REPRODUCTION. USE THE gametic.freq FOR SAMPLING
population[[i+1]]<-matrix(sample(0:1, size=2*population.size[i+1], replace=TRUE, prob=gametic.freq), nr=2)
# THE NEW ALLELE FREQ OF q
allele.freq.q[i+1]<-sum(population[[i+1]]==1)/(2*population.size[i+1])
} else {
print('Population collapsed!')
return(list(population=population, population.size=population.size, allele.freq.q=allele.freq.q))
}
}
return(list(population=population, population.size=population.size, allele.freq.q=allele.freq.q))
}
###########################################
# TRY IT!
result<-drive.drift.selection(q=0.1, d=0.8, N=500, t=1000000, R0=2, M=500)
plot(result$population.size ~ seq(1, res, by=1), type="l")
|
6751f8a9c67a7bfc783b93fcfedbd52982603fdd
|
376d771270ef5c36d60b9441a76d96d38529dace
|
/mudel.R
|
fc1fcaf6f0188bd8ec51612816030993fef09982
|
[] |
no_license
|
AndresVork/koroonajaressursid
|
823cbb67227c6876aeb6ea200c0a893fc66543d1
|
a3e50b111c146def6b467ee61fa421952f9ad6ee
|
refs/heads/master
| 2021-03-21T02:36:52.433897
| 2020-03-16T07:47:06
| 2020-03-16T07:47:06
| 247,256,958
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,649
|
r
|
mudel.R
|
#Markovi mudel ressursside planeerimiseks
#TODO! Prognoosida uute juhtude arvu
#logi
#14.03.2020 - Andres
library(dplyr)
#Sisendparameetrid - input parameters
#seisundid
statenames <- c("homecare", "stationarycare", "intensivecare", "recovered", "deaths")
#initial distribution, it should be possible to be updated
#all new cases enter via homecare
pt0 <- c(1, 0, 0, 0, 0.00)
library(heemod)
#transition matrix between states - need information from health specialists
mat_trans <- define_transition(
0.85, 0.03, 0.02, 0.10, 0.00, #homecare
0.05, 0.90, 0.03, 0.01, 0.01, #stationary care
0.02, 0.50, 0.45, 0.00, 0.03, #intensive care
0, 0, 0, 1, 0, #recovered stay recovered
0, 0, 0, 0, 1,
state_names = c("homecare", "stationarycare", "intensivecare", "recovered", "deaths")
)
#transition matrix
library(diagram)
#plot(mat_trans)
#plot(mat_trans, relsize = 0.5, box.cex = 0.6, cex = 0.7)
plot(mat_trans, box.cex = 0.6, cex = 0.7, box.type= "ellipse", relsize = 0.75,
pos = c(2,2,1))
#Input data
library(jsonlite)
data <- fromJSON("https://raw.githubusercontent.com/okestonia/koroonakaart/master/public/EstonianData.json")
confirmed <- data$confirmed %>% as.data.frame()
recovered <- data$recovered %>% as.data.frame()
deaths <- data$recovered %>% as.data.frame()
confirmed <- confirmed %>% mutate(date = as.Date(date)) %>%
#drop the day of the analysis, usually date are missing
dplyr::filter(date<Sys.Date())
#check
confirmed %>% group_by(date) %>%
summarise(newcases = n()) %>%
ggplot(aes(x=date, y = newcases)) +
geom_col()
confirmeddaily <- confirmed %>% group_by(date) %>%
summarise(newcases = n()) %>%
right_join(data.frame(date=seq.Date(from=min(confirmed$date), to = Sys.Date()-1,
by = "day"))) %>%
mutate(newcases = ifelse(is.na(newcases), 0, newcases))
#cumulative cases
confirmedcumul <- confirmeddaily %>%
mutate(cumulcases = cumsum(newcases))
#check
confirmedcumul %>%
ggplot(aes(x=date, y = cumulcases)) +
geom_col()
#Define strategy for Markov chain simulation
state_names = c("homecare", "stationarycare", "intensivecare", "recovered", "deaths")
#State consists of cost to health care and health utility
State_homecare<- define_state(cost =0,utility =0.85)
State_stationarycare<- define_state(cost = 100, utility =0.6)
State_intensivecare<- define_state(cost =1000, utility =0.2)
State_recovered<- define_state(cost =0,utility =1)
State_deaths<- define_state(cost =0,utility =0)
mod1 <- define_strategy(
transition = mat_trans,
homecare = State_homecare,
stationarycare = State_stationarycare,
intensivecare = State_intensivecare,
recovered = State_recovered,
deaths = State_deaths
)
#model - one single inflow from 13 March 2020
res_mod <- run_model(
mod1,
init = pt0*as.numeric(confirmeddaily[confirmeddaily$date=="2020-03-13", "newcases"]),
cycles= 14,
method = "end",
effect = utility,
cost = cost
)
summary(res_mod)
plot(res_mod)
#Continuous inflow
#pt0 %*% t(confirmeddaily$newcases)
#TODO:! Tuua sisse aegrida mineviku haigestumisest ja prognoos
#Kui iga päev alates tänasest tuleks peale 100 patsienti
inflow = define_inflow(
homecare=100,
stationarycare=0,
intensivecare = 0,
recovered =0,
deaths=0
)
res_mod1 <- run_model(
mod1,
init = pt0*as.numeric(confirmeddaily[confirmeddaily$date=="2020-03-13", "newcases"]),
inflow = inflow,
cycles= 200,
method = "end",
effect = utility,
cost = cost
)
#summary(res_mod1)
plot(res_mod1)
plot(res_mod1, panels = "by_state", free_y = TRUE)
#vaja läheks ca 60 intensiivravivoodit nende eelduste korral
|
1889029a67d8acd8f7d9fd39442df0f9236b4df5
|
c838d151d53af5a9be67b4295a5097f1f84d9aa6
|
/QTL_mapping/Ha_QTL_deltaACR15_6_perms.R
|
6c075c93be1644d144291e988046ca3575686ca5
|
[
"MIT"
] |
permissive
|
ytakemon/1415-Col4a5xDO-Project
|
7d3a018042ab31217a00f1f1d383a4cbee710faf
|
51c73f57b07ac2c340e623de582755aebd180e21
|
refs/heads/master
| 2023-06-25T18:58:51.670520
| 2021-07-27T20:47:33
| 2021-07-27T20:47:33
| 76,672,535
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,409
|
r
|
Ha_QTL_deltaACR15_6_perms.R
|
#Plotting QTL of the delta between ACR at 15 and 6WKs.
library(DOQTL)
setwd("/hpcdata/ytakemon/Col4a5xDO")
#load data
load("./GBRS_reconstruction/reconstruct/best.compiled.genoprob/genoprobs/best.genoprobs.192.Rdata")
load("GBRS_reconstruction/reconstruct/best.compiled.genoprob/GM_snps.Rdata")
load("GBRS_reconstruction/reconstruct/best.compiled.genoprob/kinship/K_GS.Rdata")
pheno <- read.delim("./Phenotype/1415_master_pheno.txt", sep = "\t", header = TRUE)
#clean data
rownames(pheno) <- make.names(pheno[,1]) #move sample ID to row names
pheno <- pheno[rownames(best.genoprobs.192),] #subset pheno to match 192 samples
pheno$delta_ACR15_6 <- pheno$ACR15WK - pheno$ACR6WK
pheno[pheno == -Inf] = NA
options(na.action = "na.pass")
pheno <- pheno[,c("MouseID", "Sex", "ACR6WK", "ACR15WK", "delta_ACR15_6")]
#sex covariate
sex.covar <- model.matrix(~0+Sex, data = pheno)
colnames(sex.covar)[2] <- "sex"
sex.covar <- sex.covar[,"sex"]
sex.covar <- as.data.frame(sex.covar)
colnames(sex.covar)[1] <- "sex"
# run QTL perms
perms <- scanone.perm(pheno = pheno, pheno.col = "delta_ACR15_6",
probs = best.genoprobs.192, addcovar = sex.covar,
snps = GM_snps, nperm = 1000)
perms.1000.qtl.deltaACR15_6.192 <- perms
save(perms.1000.qtl.deltaACR15_6.192, file = "./GBRS_reconstruction/reconstruct/best.compiled.genoprob/perm1000/perms.1000.qtl.delta_ACR15_6.192.Rdata")
|
4c103148e7ff2ea6bbba8819c0d3c06cdb8416e9
|
5e42a668e417fd55fe28ecee719c759016f963b9
|
/tests/testthat/dummy_projects/project/one_start_no_end.R
|
0e90c45db425ed8d4f450783257cda5a15a29ab0
|
[
"MIT"
] |
permissive
|
cordis-dev/lintr
|
2120e22820e8499ca3066fa911572fd89c49d300
|
cb694d5e4da927f56c88fa5d8972594a907be59a
|
refs/heads/main
| 2023-08-05T08:50:42.679421
| 2023-07-25T13:21:29
| 2023-07-25T13:21:29
| 225,583,354
| 0
| 0
|
NOASSERTION
| 2019-12-03T09:41:30
| 2019-12-03T09:41:30
| null |
UTF-8
|
R
| false
| false
| 24
|
r
|
one_start_no_end.R
|
#nolint start
c(1,2)
|
e4ea55044007ef7f0dc37968c8edd3700b2d9c59
|
dbcfe68342837b889194a14e9da8173d0415be41
|
/man/set_size.Rd
|
2c117e554890e4f37d70a739e5c3757b15ed3626
|
[] |
no_license
|
zachcp/depict
|
3d35bcb10e60d776d183ab60de7bbdb7e1f153c2
|
e1f0de362f66ad6a89aa1a6e209c1fbc7d742e65
|
refs/heads/master
| 2022-11-28T14:16:08.744106
| 2022-03-14T15:11:12
| 2022-03-14T15:11:12
| 78,388,052
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 244
|
rd
|
set_size.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/depict.R
\name{set_size}
\alias{set_size}
\title{set_size}
\usage{
set_size(dg, width, height)
}
\arguments{
\item{a}{Depiction Generator}
}
\description{
set_size
}
|
6c1700cf2e8b5da914ea3e497f23a88997dce075
|
bce8156a9e5b39f17f5c4f6fcd4c9fbff4d74897
|
/R/utils-methods.R
|
bd916189810869c6d1f42f125141a28fc4a53bab
|
[] |
no_license
|
cran/fPortfolio
|
fb8f26496a32fd8712361a20cbb325c0bfcffe01
|
d0189fabdf712c043fb13feb80f47696ac645cef
|
refs/heads/master
| 2023-04-29T14:30:55.700486
| 2023-04-25T06:50:06
| 2023-04-25T06:50:06
| 17,695,954
| 10
| 10
| null | 2015-04-23T18:15:24
| 2014-03-13T04:38:33
|
R
|
UTF-8
|
R
| false
| false
| 1,703
|
r
|
utils-methods.R
|
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General
# Public License along with this library; if not, write to the
# Free Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
###############################################################################
# FUNCTION:
# print.solver
# summary.solver
###############################################################################
print.solver <-
function(x, ...)
{
# A function implemented by Diethelm Wuertz
# Number of Variables:
nSolution <- length(x$solution)
# Print:
cat("\nSolver: ", x$solver)
cat("\nSolution: ", 1, ":", x$solution[1])
for (i in 2:nSolution)
cat("\n ", i, ":", x$solution[i])
cat("\nObjective: ", x$objective)
cat("\nStatus: ", x$status)
cat("\nMessage: ", x$message)
cat("\n")
}
# -----------------------------------------------------------------------------
.summary.solver <-
function(object, ...)
{
# A function implemented by Diethelm Wuertz
# Print:
print(object[1])
}
###############################################################################
|
25a5132d77f515b84eccf39328cb8f173c0f30ff
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.security.identity/man/guardduty_get_findings_statistics.Rd
|
5f6c9e8dd15bc128e8c94b100b6673ae0dcc2f27
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 912
|
rd
|
guardduty_get_findings_statistics.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/guardduty_operations.R
\name{guardduty_get_findings_statistics}
\alias{guardduty_get_findings_statistics}
\title{Lists Amazon GuardDuty findings statistics for the specified detector ID}
\usage{
guardduty_get_findings_statistics(
DetectorId,
FindingStatisticTypes,
FindingCriteria = NULL
)
}
\arguments{
\item{DetectorId}{[required] The ID of the detector that specifies the GuardDuty service whose
findings' statistics you want to retrieve.}
\item{FindingStatisticTypes}{[required] The types of finding statistics to retrieve.}
\item{FindingCriteria}{Represents the criteria that is used for querying findings.}
}
\description{
Lists Amazon GuardDuty findings statistics for the specified detector ID.
See \url{https://www.paws-r-sdk.com/docs/guardduty_get_findings_statistics/} for full documentation.
}
\keyword{internal}
|
db141c1adddaa358607ae6d2158b5b3a6774d03b
|
6df14ca70004ee08676eef709aa7d2c920b45c5d
|
/final-script.r
|
f609548b0e5605ddd45dd867864f05e905be0cac
|
[] |
no_license
|
LarissaHa/aqm18
|
389f2a0e11e83885d551c6450a82347681e44267
|
e03cf7fba7b1d6f39cb7be12a3905038d9495cd1
|
refs/heads/master
| 2020-04-24T23:23:09.854646
| 2019-02-24T13:52:07
| 2019-02-24T13:52:07
| 172,342,802
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 55,032
|
r
|
final-script.r
|
##################################################################
# Local R-File
# Final Submission for AQM
# Larissa Haas
##################################################################
# Note: The plots and long running code snippets are
# commented out. Please consider if it is really
# necessary to run them, as this might take some while.
# Eventually, I do a lot of plotting in the code, but I can't
# show everything in my paper. I didn't want to delete it, either.
# I hope the code is nevertheless interesting.
##################################################################
# Setting up the Working Environment
##################################################################
library(neuralnet)
library(devtools)
library(stargazer)
library(RCurl)
library(jsonlite)
library(caret)
library(ggplot2)
library(e1071)
library(h2o)
library(statmod)
library(MASS)
library(corrplot)
library(data.table)
library(ROSE)
library(reshape2)
library(ggpubr)
F1 <- function(table){
tp <- table[2,2]
tn <- table[1,1]
fp <- table[2,1]
fn <- table[1,2]
precision <- tp / (tp + fp)
recall <- tp / (tp + fn)
result <- (2 * precision * recall) / (precision + recall)
return(result)
}
ACC <- function(table){
tp <- table[2,2]
tn <- table[1,1]
fp <- table[2,1]
fn <- table[1,2]
acc <- (tn + tp) / (tn + tp + fn + fp)
return(acc)
}
ll_logit <- function(theta, y, X) {
beta <- theta[1:ncol(X)]
mu <- X %*% beta
p <- 1/(1 + exp(-mu))
ll <- y * log(p) + (1 - y) * log(1 - p)
ll <- sum(ll)
return(ll)
}
##################################################################
# Loading and Pre-Processing the Data
##################################################################
ess1 <- read.table("C:/Users/laris/Desktop/MMDS/Semester3-FSS2018/Advanced-Quantitative-Methods/paper/Submission/ess1.csv", header=TRUE, sep=",")
ess6 <- read.table("C:/Users/laris/Desktop/MMDS/Semester3-FSS2018/Advanced-Quantitative-Methods/paper/Submission/ess6.csv", header=TRUE, sep=",")
ess1$round1 = 1
ess6$round1 = 0
data <- rbind(ess1, ess6)
dim(data)
data.complete <- data
for(i in c(4,5,6,10,11,12,13,14,15,16,17,18,19,20,21,22,23)){
data.complete[is.na(data.complete[,i]), i] <- mean(data.complete[,i], na.rm = TRUE)
}
data.complete[is.na(data.complete[,3]), 3] <- 1
data.complete[is.na(data.complete[,7]), 7] <- 1
data.complete[is.na(data.complete[,8]), 8] <- 0
data.complete[is.na(data.complete[,9]), 9] <- 0
data.scaled <- cbind(data.complete[c(1, 3, 7, 8, 9, 25)], scale(data.complete[-c(1, 2, 3, 7, 8, 9, 24, 25)]))
# This part can be used to write and to load
# the already pre-processed data
#write.csv(data.complete, file = "ess-complete.csv")
#write.csv(data.scaled, file = "ess-scaled.csv")
#data.scaled <- read.table("ess-scaled.csv", header=TRUE, sep=",")
#data.scaled <- data.scaled[,-1]
# Splitting in Train and Test Data
smp_size <- floor(0.9 * nrow(data.scaled))
set.seed(123)
train_ind <- sample(seq_len(nrow(data.scaled)), size = smp_size)
train <- data.scaled[train_ind,]
test <- data.scaled[-train_ind,]
# Balancing the Data
train.bal <- ovun.sample(volact ~ ., data = train, method = "over", N = 114264)$data
# Defining X and y for later use
train.X <- train[, -1]
train.y <- train[, 1]
train.y <- factor(train.y, levels = 0:1)
train.X.bal <- train[, -1]
train.y.bal <- train[, 1]
train.y.bal <- factor(train.y, levels = 0:1)
test.X <- test[, -1]
test.y <- test[, 1]
test.y <- factor(test.y, levels = 0:1)
##################################################################
# First Step into Neural Nets
##################################################################
# In this section I want to replicate the code we worked on
# in the tutorial. While doing this, necessary functions are
# defined. But most of the code is just "getting warm" wit NNs.
##################################################################
col <- test$volact
col[test$volact == 1] <- adjustcolor("orange", alpha = 0.3)
col[test$volact == 0] <- adjustcolor("blue", alpha = 0.3)
#plot(test$wkhtot, test$social_trust, col = col, pch = 19,
# main = "True Relationship \n (scaled values)",
# bty = "n", las = 1,
# xlab = "Total Work Hours (per Week)", ylab = "Social Trust")
y <- test$volact
X <- cbind(1, test$wkhtot, test$social_trust)
startvals <- c(0, 0, 0)
res <- optim(par = startvals,fn = ll_logit, y = y, X = X,
control = list(fnscale = -1),
method = "BFGS"
)
mu <- X %*% res$par
p <- 1/(1 + exp(-mu))
y_hat <- rbinom(nrow(p), 1, p)
col <- y_hat
col[y_hat == 1] <- adjustcolor("orange", alpha = 0.3)
col[y_hat == 0] <- adjustcolor("blue", alpha = 0.3)
#plot(test$wkhtot, test$social_trust, col = col, pch = 19,
# main = "True Relationship",
# bty = "n", las = 1,
# xlab = "x1", ylab = "x2")
logit_pred <- table(y_hat, test$volact)
#logit_pred
ll_simple_nn <- function(theta, y, X){
gamma <- theta[1:4]
beta_neuron1 <- theta[5:7]
beta_neuron2 <- theta[8:10]
beta_neuron3 <- theta[11:13]
mu_neuron1 <- X %*% beta_neuron1
mu_neuron2 <- X %*% beta_neuron2
mu_neuron3 <- X %*% beta_neuron3
logitResponse <- function(mu) 1 / (1+exp(-mu))
p_neuron1 <- logitResponse(mu_neuron1)
p_neuron2 <- logitResponse(mu_neuron2)
p_neuron3 <- logitResponse(mu_neuron3)
Z <- cbind(1, p_neuron1, p_neuron2, p_neuron3)
mu <- Z %*% gamma
p <- logitResponse(mu)
ll <- y * log(p) + (1 - y) * log(1 - p)
ll <- sum(ll)
return(ll)
}
# initial values
startvals <- rnorm(13)
ll_simple_nn(startvals, y, X)
# optimize
resNN <- optim(par = startvals, fn = ll_simple_nn, y = y, X = X,
control = list(fnscale = -1),
hessian = F,
method = "BFGS"
)
#resNN$par
gammaEst <- resNN$par[1:4]
beta_neuron1Est <- resNN$par[5:7]
beta_neuron2Est <- resNN$par[8:10]
beta_neuron3Est <- resNN$par[11:13]
mu_neuron1Est <- X %*% beta_neuron1Est
mu_neuron2Est <- X %*% beta_neuron2Est
mu_neuron3Est <- X %*% beta_neuron3Est
logitResponse <- function(mu) 1/(1+exp(-mu))
p_neuron1Est <- logitResponse(mu_neuron1Est)
p_neuron2Est <- logitResponse(mu_neuron2Est)
p_neuron3Est <- logitResponse(mu_neuron3Est)
Z <- cbind(1, p_neuron1Est, p_neuron2Est, p_neuron3Est )
mu <- Z %*% gammaEst
p <- logitResponse(mu)
y_hat <- rbinom(nrow(p),1,p)
col <- y_hat
col[y_hat == 1] <- adjustcolor("orange", alpha = 0.3)
col[y_hat == 0] <- adjustcolor("blue", alpha = 0.3)
#plot(X[, 2], X[, 3], col = col, pch = 19,
# main = "Predicted Values from a Neural Net",
# bty = "n", las = 1,
# xlab = "x1", ylab = "x2")
nn_pred <- table(y_hat, test$volact)
#nn_pred
m <- neuralnet(volact ~ wkhtot + social_trust,
train, hidden = 1)
p <- compute(m, test[,c(11, 14)])
predictions <- p$net.result
result_test <- rbinom(nrow(predictions),1,predictions)
result3 <- ifelse(predictions >= 0.3, 1, 0)
result2 <- ifelse(predictions >= 0.2, 1, 0)
result4 <- ifelse(predictions >= 0.4, 1, 0)
print(cor(result_test, test$volact))
prenn_pred <- table(result_test, test$volact)
#prenn_pred
#png("basic_nn.png", width = 800, height = 600)
#plot.nnet(m)
#dev.off()
# Comparison of the PRE-defined NN, our custom NN and the
# logistic regression (based on two input variables).
F1(nn_pred)
F1(logit_pred)
F1(prenn_pred)
F1(table(result2, test$volact))
F1(table(result3, test$volact))
F1(table(result4, test$volact))
##################################################################
# Logistic Regression: Calculated on the "Basic Model" Data
##################################################################
y <- train$volact
X <- as.matrix(cbind(1, train[,-1]))
startvals <- c(rep(0, 23))
res <- optim(par = startvals,fn = ll_logit, y = y, X = X,
control = list(fnscale = -1),
method = "BFGS"
)
mu <- as.matrix(cbind(1, test[,-1])) %*% res$par
p <- 1/(1 + exp(-mu))
# The following code snippet will be repeated more often, because
# I can have a look at different thresholds for evaluating the
# logistic regression results. The output will always be:
# 1. MAX Accuracy
# 2. MAX F1 measure
y_hat1 <- ifelse(p > 0.1, 1, 0)
y_hat2 <- ifelse(p > 0.2, 1, 0)
y_hat3 <- ifelse(p > 0.3, 1, 0)
y_hat4 <- ifelse(p > 0.4, 1, 0)
y_hat5 <- ifelse(p > 0.5, 1, 0)
y_hat6 <- ifelse(p > 0.6, 1, 0)
y_hat7 <- ifelse(p > 0.7, 1, 0)
y_hat8 <- ifelse(p > 0.8, 1, 0)
y_hat9 <- ifelse(p > 0.9, 1, 0)
threshold <- cbind(seq(0.1, 0.8, length.out = 8),
c(ACC(table(y_hat1, test$volact)),
ACC(table(y_hat2, test$volact)),
ACC(table(y_hat3, test$volact)),
ACC(table(y_hat4, test$volact)),
ACC(table(y_hat5, test$volact)),
ACC(table(y_hat6, test$volact)),
ACC(table(y_hat7, test$volact)),
ACC(table(y_hat8, test$volact))),
#ACC(table(y_hat9, test$volact))),
c(F1(table(y_hat1, test$volact)),
F1(table(y_hat2, test$volact)),
F1(table(y_hat3, test$volact)),
F1(table(y_hat4, test$volact)),
F1(table(y_hat5, test$volact)),
F1(table(y_hat6, test$volact)),
F1(table(y_hat7, test$volact)),
F1(table(y_hat8, test$volact))))
#F1(table(y_hat9, test$volact))))
print("MAX Accuracy for BASIC Logistic Regression")
max(threshold[,2])
print("MAX F1 for BASIC Logistic Regression")
max(threshold[,3])
# The following plot shows the distribution of Accuracy and
# F1 for the different threshold levels. A threshold with both
# values high would be the best!
#acc <- ggplot(data.frame(threshold),aes(threshold[,1],threshold[,2]))+geom_line(aes(color="Accuracy"))+
# labs(color=" ") +
# ylab("accuracy value") + xlab("threshold")
#f1 <- ggplot(data.frame(threshold),aes(threshold[,1],threshold[,4]))+geom_line(aes(color="F1"))+
# labs(color=" ") +
# ylab("f1 value") + xlab("threshold")
#png("evaluation_log_threshold_unbal.png")
#myplot <- ggarrange(acc, f1 + rremove("x.text"),
# labels = c("ACC", "F1"),
# ncol = 2, nrow = 1)
#print(myplot)
#dev.off()
#print(myplot)
##################################################################
# Logistic Regression: Calculated on Balanced Data
##################################################################
y.bal <- train.bal$volact
X.bal <- as.matrix(cbind(1, train.bal[,-1]))
startvals <- c(rep(0, 23))
res <- optim(par = startvals,fn = ll_logit, y = y.bal, X = X.bal,
control = list(fnscale = -1), hessian = TRUE,
method = "BFGS"
)
mu <- as.matrix(cbind(1, test[,-1])) %*% res$par
p <- 1/(1 + exp(-mu))
y_hat1 <- ifelse(p > 0.1, 1, 0)
y_hat2 <- ifelse(p > 0.2, 1, 0)
y_hat3 <- ifelse(p > 0.3, 1, 0)
y_hat4 <- ifelse(p > 0.4, 1, 0)
y_hat5 <- ifelse(p > 0.5, 1, 0)
y_hat6 <- ifelse(p > 0.6, 1, 0)
y_hat7 <- ifelse(p > 0.7, 1, 0)
y_hat8 <- ifelse(p > 0.8, 1, 0)
y_hat9 <- ifelse(p > 0.9, 1, 0)
threshold <- cbind(seq(0.1, 0.9, length.out = 9),
c(ACC(table(y_hat1, test$volact)),
ACC(table(y_hat2, test$volact)),
ACC(table(y_hat3, test$volact)),
ACC(table(y_hat4, test$volact)),
ACC(table(y_hat5, test$volact)),
ACC(table(y_hat6, test$volact)),
ACC(table(y_hat7, test$volact)),
ACC(table(y_hat8, test$volact)),
ACC(table(y_hat9, test$volact))),
c(F1(table(y_hat1, test$volact)),
F1(table(y_hat2, test$volact)),
F1(table(y_hat3, test$volact)),
F1(table(y_hat4, test$volact)),
F1(table(y_hat5, test$volact)),
F1(table(y_hat6, test$volact)),
F1(table(y_hat7, test$volact)),
F1(table(y_hat8, test$volact)),
F1(table(y_hat9, test$volact))))
print("MAX Accuracy for Balanced Logistic Regression")
max(threshold[,2])
print("MAX F1 for Balanced Logistic Regression")
max(threshold[,3])
#acc <- ggplot(data.frame(threshold),aes(threshold[,1],threshold[,2]))+
# geom_line(aes(color="Accuracy"))+
# labs(color=" ") +
# ylab("accuracy value") + xlab("threshold")
#f1 <- ggplot(data.frame(threshold),aes(threshold[,1],threshold[,4]))+
# geom_line(aes(color="F1"))+
# labs(color=" ") +
# ylab("f1 value") + xlab("threshold")
#png("evaluation_log_threshold_bal.png")
#myplot <- ggarrange(acc, f1 + rremove("x.text"),
# labels = c("ACC", "F1"),
# ncol = 2, nrow = 1)
#print(myplot)
#dev.off()
#print(myplot)
##################################################################
# Logistic Regression: Tested with Pre-Defined Function
##################################################################
ols_model <- glm(y ~ X, family=binomial(link='logit'))
#summary(ols_model)
##################################################################
# Logistic Regression: Preparing Plot for Marginal Effects later
##################################################################
range <- seq(-4, 2, length.out = 20)
sel <- 7
plot.data <- matrix(NA, nrow = length(range), ncol = ncol(test[,-1]))
for(i in 1:ncol(test[,-1])){
plot.data[,i] <- mean(test[,i])
}
for(i in 1:length(range)){
plot.data[i, sel] <- range[i]
}
mu <- as.matrix(cbind(1, plot.data)) %*% res$par
p <- 1/(1 + exp(-mu))
age.volact <- data.frame(cbind(range, p))
##################################################################
# Neural Net: Plotting Nets with Neuralnet-Library
##################################################################
#m <- neuralnet(volact ~ sclmeet,
# train, hidden = 1)
#p <- compute(m, test[,13])
#predictions <- p$net.result
#print(cor(predictions, test$volact))
#result <- ifelse(predictions >= 0.3, 1, 0)
#ACC(table(result, test$volact))
#F1(table(result, test$volact))
#plot.nnet(m)
#m2 <- neuralnet(volact ~ sclmeet + social_trust + tolerance +
# self_realisation + solidarity,
# train, hidden = 2)
#p2 <- compute(m2, test[,c(13, 14, 15, 16, 17)])
#predictions2 <- p2$net.result
#print(cor(predictions2, test$volact))
#result3 <- ifelse(predictions2 >= 0.3, 1, 0)
#result2 <- ifelse(predictions2 >= 0.2, 1, 0)
#result4 <- ifelse(predictions2 >= 0.4, 1, 0)
#ACC(table(result4, test$volact))
#F1(table(result4, test$volact))
#png("more-complex_nn.png", width = 800, height = 600)
#plot.nnet(m2)
#dev.off()
#m3 <- neuralnet(volact ~ round1 + female + yrbrn + eduyrs + domicil + married +
# children + houseperson + wkhtot + church + sclmeet + social_trust +
# tolerance + self_realisation + solidarity + tvpol + tvtot +
# political_interest + trust_exe + trust_leg + trstep + stfdem,
# train, hidden = 1)
#p3 <- compute(m3, test[,-c(1, 2, 25)])
#predictions3 <- p3$net.result
#print(cor(predictions3, test$volact))
#library(nnet)
#png("total-input_nn.png", width = 800, height = 600)
#plot.nnet(m3)
#dev.off()
##################################################################
# Neural Net: Models for Plotting Tuning Differences (Part 1)
##################################################################
# For this plot, I am tuning the decay rate, which is an
# additional weight parameter influencing the signals between
# the neurons.
#aus: R deep learning essentials
#m20.0 <- train(x = train.X, y = train.y,
# method = "nnet",
# tuneGrid = expand.grid(
# .size = c(20),
# .decay = 0),
# trControl= trainControl(method = "none"),
# MaxNWts = 1000,
# maxit = 100)
#m20.1 <- train(x = train.X, y = train.y,
# method = "nnet",
# tuneGrid = expand.grid(
# .size = c(20),
# .decay = 0.1),
# trControl= trainControl(method = "none"),
# MaxNWts = 1000,
# maxit = 100)
#m20.2 <- train(x = train.X, y = train.y,
# method = "nnet",
# tuneGrid = expand.grid(
# .size = c(20),
# .decay = 0.2),
# trControl= trainControl(method = "none"),
# MaxNWts = 1000,
# maxit = 100)
#m20.3 <- train(x = train.X, y = train.y,
# method = "nnet",
# tuneGrid = expand.grid(
# .size = c(20),
# .decay = 0.3),
# trControl= trainControl(method = "none"),
# MaxNWts = 1000,
# maxit = 100)
#m20.4 <- train(x = train.X, y = train.y,
# method = "nnet",
# tuneGrid = expand.grid(
# .size = c(20),
# .decay = 0.4),
# trControl= trainControl(method = "none"),
# MaxNWts = 1000,
# maxit = 100)
#m20.5 <- train(x = train.X, y = train.y,
# method = "nnet",
# tuneGrid = expand.grid(
# .size = c(20),
# .decay = 0.5),
# trControl= trainControl(method = "none"),
# MaxNWts = 1000,
# maxit = 100)
#m20.6 <- train(x = train.X, y = train.y,
# method = "nnet",
# tuneGrid = expand.grid(
# .size = c(20),
# .decay = 0.6),
# trControl= trainControl(method = "none"),
# MaxNWts = 1000,
# maxit = 100)
#m20.7 <- train(x = train.X, y = train.y,
# method = "nnet",
# tuneGrid = expand.grid(
# .size = c(20),
# .decay = 0.7),
# trControl= trainControl(method = "none"),
# MaxNWts = 1000,
# maxit = 100)
#m20.8 <- train(x = train.X, y = train.y,
# method = "nnet",
# tuneGrid = expand.grid(
# .size = c(20),
# .decay = 0.8),
# trControl= trainControl(method = "none"),
# MaxNWts = 1000,
# maxit = 100)
#m20.9 <- train(x = train.X, y = train.y,
# method = "nnet",
# tuneGrid = expand.grid(
# .size = c(20),
# .decay = 0.9),
# trControl= trainControl(method = "none"),
# MaxNWts = 1000,
# maxit = 100)
#m20.10 <- train(x = train.X, y = train.y,
# method = "nnet",
# tuneGrid = expand.grid(
# .size = c(20),
# .decay = 1),
# trControl= trainControl(method = "none"),
# MaxNWts = 1000,
# maxit = 100)
#yhat20.0 <- predict(m20.0)
#yhat20.1 <- predict(m20.1)
#yhat20.2 <- predict(m20.2)
#yhat20.3 <- predict(m20.3)
#yhat20.4 <- predict(m20.4)
#yhat20.5 <- predict(m20.5)
#yhat20.6 <- predict(m20.6)
#yhat20.7 <- predict(m20.7)
#yhat20.8 <- predict(m20.8)
#yhat20.9 <- predict(m20.9)
#yhat20.10 <- predict(m20.10)
#yhat_unseen20.0 <- predict(m20.0, as.matrix(test.X))
#yhat_unseen20.1 <- predict(m20.1, as.matrix(test.X))
#yhat_unseen20.2 <- predict(m20.2, as.matrix(test.X))
#yhat_unseen20.3 <- predict(m20.3, as.matrix(test.X))
#yhat_unseen20.4 <- predict(m20.4, as.matrix(test.X))
#yhat_unseen20.5 <- predict(m20.5, as.matrix(test.X))
#yhat_unseen20.6 <- predict(m20.6, as.matrix(test.X))
#yhat_unseen20.7 <- predict(m20.7, as.matrix(test.X))
#yhat_unseen20.8 <- predict(m20.8, as.matrix(test.X))
#yhat_unseen20.9 <- predict(m20.9, as.matrix(test.X))
#yhat_unseen20.10 <- predict(m20.10, as.matrix(test.X))
#measures <- c("AccuracyNull", "Accuracy", "AccuracyLower", "AccuracyUpper")
#n20.0.insample <- caret::confusionMatrix(xtabs(~train.y + yhat20.0))
#n20.1.insample <- caret::confusionMatrix(xtabs(~train.y + yhat20.1))
#n20.2.insample <- caret::confusionMatrix(xtabs(~train.y + yhat20.2))
#n20.3.insample <- caret::confusionMatrix(xtabs(~train.y + yhat20.3))
#n20.4.insample <- caret::confusionMatrix(xtabs(~train.y + yhat20.4))
#n20.5.insample <- caret::confusionMatrix(xtabs(~train.y + yhat20.5))
#n20.6.insample <- caret::confusionMatrix(xtabs(~train.y + yhat20.6))
#n20.7.insample <- caret::confusionMatrix(xtabs(~train.y + yhat20.7))
#n20.8.insample <- caret::confusionMatrix(xtabs(~train.y + yhat20.8))
#n20.9.insample <- caret::confusionMatrix(xtabs(~train.y + yhat20.9))
#n20.10.insample <- caret::confusionMatrix(xtabs(~train.y + yhat20.10))
#n20.0.outsample <- caret::confusionMatrix(xtabs(~test.y + yhat_unseen20.0))
#n20.1.outsample <- caret::confusionMatrix(xtabs(~test.y + yhat_unseen20.1))
#n20.2.outsample <- caret::confusionMatrix(xtabs(~test.y + yhat_unseen20.2))
#n20.3.outsample <- caret::confusionMatrix(xtabs(~test.y + yhat_unseen20.3))
#n20.4.outsample <- caret::confusionMatrix(xtabs(~test.y + yhat_unseen20.4))
#n20.5.outsample <- caret::confusionMatrix(xtabs(~test.y + yhat_unseen20.5))
#n20.6.outsample <- caret::confusionMatrix(xtabs(~test.y + yhat_unseen20.6))
#n20.7.outsample <- caret::confusionMatrix(xtabs(~test.y + yhat_unseen20.7))
#n20.8.outsample <- caret::confusionMatrix(xtabs(~test.y + yhat_unseen20.8))
#n20.9.outsample <- caret::confusionMatrix(xtabs(~test.y + yhat_unseen20.9))
#n20.10.outsample <- caret::confusionMatrix(xtabs(~test.y + yhat_unseen20.10))
#shrinkage <- rbind(
# cbind(Size = 20.0, Sample = "In", as.data.frame(t(n20.0.insample$overall[measures]))),
# cbind(Size = 20.0, Sample = "Out", as.data.frame(t(n20.0.outsample$overall[measures]))),
# cbind(Size = 20.1, Sample = "In", as.data.frame(t(n20.1.insample$overall[measures]))),
# cbind(Size = 20.1, Sample = "Out", as.data.frame(t(n20.1.outsample$overall[measures]))),
# cbind(Size = 20.2, Sample = "In", as.data.frame(t(n20.2.insample$overall[measures]))),
# cbind(Size = 20.2, Sample = "Out", as.data.frame(t(n20.2.outsample$overall[measures]))),
# cbind(Size = 20.3, Sample = "In", as.data.frame(t(n20.3.insample$overall[measures]))),
# cbind(Size = 20.3, Sample = "Out", as.data.frame(t(n20.3.outsample$overall[measures]))),
# cbind(Size = 20.4, Sample = "In", as.data.frame(t(n20.4.insample$overall[measures]))),
# cbind(Size = 20.4, Sample = "Out", as.data.frame(t(n20.4.outsample$overall[measures]))),
# cbind(Size = 20.5, Sample = "In", as.data.frame(t(n20.5.insample$overall[measures]))),
# cbind(Size = 20.5, Sample = "Out", as.data.frame(t(n20.5.outsample$overall[measures]))),
# cbind(Size = 20.6, Sample = "In", as.data.frame(t(n20.6.insample$overall[measures]))),
# cbind(Size = 20.6, Sample = "Out", as.data.frame(t(n20.6.outsample$overall[measures]))),
# cbind(Size = 20.7, Sample = "In", as.data.frame(t(n20.7.insample$overall[measures]))),
# cbind(Size = 20.7, Sample = "Out", as.data.frame(t(n20.7.outsample$overall[measures]))),
# cbind(Size = 20.8, Sample = "In", as.data.frame(t(n20.8.insample$overall[measures]))),
# cbind(Size = 20.8, Sample = "Out", as.data.frame(t(n20.8.outsample$overall[measures]))),
# cbind(Size = 20.9, Sample = "In", as.data.frame(t(n20.9.insample$overall[measures]))),
# cbind(Size = 20.9, Sample = "Out", as.data.frame(t(n20.9.outsample$overall[measures]))),
# cbind(Size = 20.99, Sample = "In", as.data.frame(t(n20.10.insample$overall[measures]))),
# cbind(Size = 20.99, Sample = "Out", as.data.frame(t(n20.10.outsample$overall[measures])))
# )
#shrinkage$Pkg <- rep(c("In", "Out"), 1)
#dodge <- position_dodge(width=0.4)
#p.shrinkage <- ggplot(shrinkage, aes(interaction(Size, sep = " : "), Accuracy,
# ymin = AccuracyLower, ymax = AccuracyUpper,
# shape = Sample, linetype = Sample)) +
# geom_point(size = 2.5, position = dodge) +
# geom_errorbar(width = .25, position = dodge) +
# xlab("") + ylab("Accuracy + 95% CI") +
# theme_classic() +
# theme(legend.key.size = unit(1, "cm"), legend.position = c(.8, .2))
#png("test_20.png",
# width = 6, height = 6, units = "in", res = 600)
# print(p.shrinkage)
#dev.off()
##################################################################
# Neural Net: Models for Plotting Tuning Differences (Part 2)
##################################################################
# This time I am tuning the hidden layer size, ranging from 5
# hidden neurons to 100.
#m5 <- train(x = train.X, y = train.y,
# method = "nnet",
# tuneGrid = expand.grid(
# .size = c(5),
# .decay = 0),
# trControl= trainControl(method = "none"),
# MaxNWts = 2000,
# maxit = 100)
#m10 <- train(x = train.X, y = train.y,
# method = "nnet",
# tuneGrid = expand.grid(
# .size = c(10),
# .decay = 0),
# trControl= trainControl(method = "none"),
# MaxNWts = 2000,
# maxit = 100)
#m15 <- train(x = train.X, y = train.y,
# method = "nnet",
# tuneGrid = expand.grid(
# .size = c(15),
# .decay = 0),
# trControl= trainControl(method = "none"),
# MaxNWts = 2000,
# maxit = 100)
#m20 <- train(x = train.X, y = train.y,
# method = "nnet",
# tuneGrid = expand.grid(
# .size = c(20),
# .decay = 0),
# trControl= trainControl(method = "none"),
# MaxNWts = 2000,
# maxit = 100)
#m30 <- train(x = train.X, y = train.y,
# method = "nnet",
# tuneGrid = expand.grid(
# .size = c(30),
# .decay = 0),
# trControl= trainControl(method = "none"),
# MaxNWts = 2000,
# maxit = 100)
#m40 <- train(x = train.X, y = train.y,
# method = "nnet",
# tuneGrid = expand.grid(
# .size = c(40),
# .decay = 0),
# trControl= trainControl(method = "none"),
# MaxNWts = 2000,
# maxit = 100)
#m50 <- train(x = train.X, y = train.y,
# method = "nnet",
# tuneGrid = expand.grid(
# .size = c(50),
# .decay = 0),
# trControl= trainControl(method = "none"),
# MaxNWts = 2000,
# maxit = 100)
#m70 <- train(x = train.X, y = train.y,
# method = "nnet",
# tuneGrid = expand.grid(
# .size = c(70),
# .decay = 0),
# trControl= trainControl(method = "none"),
# MaxNWts = 2000,
# maxit = 100)
#m100 <- train(x = train.X, y = train.y,
# method = "nnet",
# tuneGrid = expand.grid(
# .size = c(100),
# .decay = 0),
# trControl= trainControl(method = "none"),
# MaxNWts = 20000,
# maxit = 100)
#yhat5 <- predict(m5)
#yhat10 <- predict(m10)
#yhat15 <- predict(m15)
#yhat20 <- predict(m20)
#yhat30 <- predict(m30)
#yhat40 <- predict(m40)
#yhat50 <- predict(m50)
#yhat70 <- predict(m70)
#yhat100 <- predict(m100)
#yhat_unseen5 <- predict(m5, as.matrix(test.X))
#yhat_unseen10 <- predict(m10, as.matrix(test.X))
#yhat_unseen15 <- predict(m15, as.matrix(test.X))
#yhat_unseen20 <- predict(m20, as.matrix(test.X))
#yhat_unseen30 <- predict(m30, as.matrix(test.X))
#yhat_unseen40 <- predict(m40, as.matrix(test.X))
#yhat_unseen50 <- predict(m50, as.matrix(test.X))
#yhat_unseen70 <- predict(m70, as.matrix(test.X))
#yhat_unseen100 <- predict(m100, as.matrix(test.X))
#measures <- c("AccuracyNull", "Accuracy", "AccuracyLower", "AccuracyUpper")
#n5.insample <- caret::confusionMatrix(xtabs(~train.y + yhat5))
#n10.insample <- caret::confusionMatrix(xtabs(~train.y + yhat10))
#n15.insample <- caret::confusionMatrix(xtabs(~train.y + yhat15))
#n20.insample <- caret::confusionMatrix(xtabs(~train.y + yhat20))
#n30.insample <- caret::confusionMatrix(xtabs(~train.y + yhat30))
#n40.insample <- caret::confusionMatrix(xtabs(~train.y + yhat40))
#n50.insample <- caret::confusionMatrix(xtabs(~train.y + yhat50))
#n70.insample <- caret::confusionMatrix(xtabs(~train.y + yhat70))
#n100.insample <- caret::confusionMatrix(xtabs(~train.y + yhat100))
#n5.outsample <- caret::confusionMatrix(xtabs(~test.y + yhat_unseen5))
#n10.outsample <- caret::confusionMatrix(xtabs(~test.y + yhat_unseen10))
#n15.outsample <- caret::confusionMatrix(xtabs(~test.y + yhat_unseen15))
#n20.outsample <- caret::confusionMatrix(xtabs(~test.y + yhat_unseen20))
#n30.outsample <- caret::confusionMatrix(xtabs(~test.y + yhat_unseen30))
#n40.outsample <- caret::confusionMatrix(xtabs(~test.y + yhat_unseen40))
#n50.outsample <- caret::confusionMatrix(xtabs(~test.y + yhat_unseen50))
#n70.outsample <- caret::confusionMatrix(xtabs(~test.y + yhat_unseen70))
#n100.outsample <- caret::confusionMatrix(xtabs(~test.y + yhat_unseen100))
#shrinkage <- rbind(
# cbind(Size = 5, Sample = "In", as.data.frame(t(n5.insample$overall[measures]))),
# cbind(Size = 5, Sample = "Out", as.data.frame(t(n5.outsample$overall[measures]))),
# cbind(Size = 10, Sample = "In", as.data.frame(t(n10.insample$overall[measures]))),
# cbind(Size = 10, Sample = "Out", as.data.frame(t(n10.outsample$overall[measures]))),
# cbind(Size = 15, Sample = "In", as.data.frame(t(n15.insample$overall[measures]))),
# cbind(Size = 15, Sample = "Out", as.data.frame(t(n15.outsample$overall[measures]))),
# cbind(Size = 20, Sample = "In", as.data.frame(t(n20.insample$overall[measures]))),
# cbind(Size = 20, Sample = "Out", as.data.frame(t(n20.outsample$overall[measures]))),
# cbind(Size = 30, Sample = "In", as.data.frame(t(n30.insample$overall[measures]))),
# cbind(Size = 30, Sample = "Out", as.data.frame(t(n30.outsample$overall[measures]))),
# cbind(Size = 40, Sample = "In", as.data.frame(t(n40.insample$overall[measures]))),
# cbind(Size = 40, Sample = "Out", as.data.frame(t(n40.outsample$overall[measures]))),
# cbind(Size = 50, Sample = "In", as.data.frame(t(n50.insample$overall[measures]))),
# cbind(Size = 50, Sample = "Out", as.data.frame(t(n50.outsample$overall[measures]))),
# cbind(Size = 70, Sample = "In", as.data.frame(t(n70.insample$overall[measures]))),
# cbind(Size = 70, Sample = "Out", as.data.frame(t(n70.outsample$overall[measures]))),
# cbind(Size = 100, Sample = "In", as.data.frame(t(n100.insample$overall[measures]))),
# cbind(Size = 100, Sample = "Out", as.data.frame(t(n100.outsample$overall[measures])))
# )
#shrinkage$Pkg <- rep(c("In", "Out"), 1)
#dodge <- position_dodge(width=0.4)
#p.shrinkage <- ggplot(shrinkage, aes(interaction(Size, sep = " : "), Accuracy,
# ymin = AccuracyLower, ymax = AccuracyUpper,
# shape = Sample, linetype = Sample)) +
# geom_point(size = 2.5, position = dodge) +
# geom_errorbar(width = .25, position = dodge) +
# xlab("") + ylab("Accuracy + 95% CI") +
# theme_classic() +
# theme(legend.key.size = unit(1, "cm"), legend.position = c(.8, .2))
#png("test_5150.png",
# width = 6, height = 6, units = "in", res = 600)
# print(p.shrinkage)
#dev.off()
##################################################################
# Neural Net: H2O Models
##################################################################
# Initializing the H2O-cluster
c1 <- h2o.init()
# Setting up the data for H2O
train$volact <- as.factor(train$volact)
test$volact <- as.factor(test$volact)
h2o.train <- as.h2o(train)
h2o.test <- as.h2o(test)
train.bal$volact <- as.factor(train.bal$volact)
h2o.train.bal <- as.h2o(train.bal)
xnames <- colnames(train[,-1])
##################################################################
# Neural Net: H2O Models for Plotting Tuning Differences (Part 3)
##################################################################
# Now I am testing different depths of hidden layers, starting
# with one layer and going until 4 layers. The error
# distribution is going to be plotted.
#m2a <- h2o.deeplearning(
# x = xnames,
# #y = "Outcome",
# training_frame= h2o.train,
# validation_frame = h2o.test,
# activation = "Tanh",
# autoencoder = TRUE,
# hidden = c(20),
# epochs = 10,
# sparsity_beta = 0,
# l1 = 0,
# l2 = 0
#)
#m2b <- h2o.deeplearning(
# x = xnames,
# #y = "Outcome",
# training_frame= h2o.train,
# validation_frame = h2o.test,
# activation = "Tanh",
# autoencoder = TRUE,
# hidden = c(20, 15),
# epochs = 10,
# sparsity_beta = 0,
# #hidden_pout_ratios = c(.3),
# l1 = 0,
# l2 = 0
#)
#m2c <- h2o.deeplearning(
# x = xnames,
# #y = "Outcome",
# training_frame= h2o.train,
# validation_frame = h2o.test,
# activation = "Tanh",
# autoencoder = TRUE,
# hidden = c(20, 15, 10),
# epochs = 10,
# sparsity_beta = 0,
# l1 = 0,
# l2 = 0
#)
#m2d <- h2o.deeplearning(
# x = xnames,
# #y = "Outcome",
# training_frame= h2o.train,
# validation_frame = h2o.test,
# activation = "Tanh",
# autoencoder = TRUE,
# hidden = c(20, 15, 10, 5),
# epochs = 10,
# sparsity_beta = 0,
# #hi2den_dropout_ratios = c(.3),
# l1 = 0,
# l2 = 0
#)
#summary(m2a)
#summary(m2b)
#summary(m2c)
#summary(m2d)
#error1 <- as.data.frame(h2o.anomaly(m2a, h2o.train))
#error2a <- as.data.frame(h2o.anomaly(m2b, h2o.train))
#error2b <- as.data.frame(h2o.anomaly(m2c, h2o.train))
#error2c <- as.data.frame(h2o.anomaly(m2d, h2o.train))
#error <- as.data.table(rbind(
# cbind.data.frame(Model = "2a", error1),
# cbind.data.frame(Model = "2b", error2a),
# cbind.data.frame(Model = "2c", error2b),
# cbind.data.frame(Model = "2d", error2c)))
#percentile <- error[, .(
# Percentile = quantile(Reconstruction.MSE, probs = .95)
#), by = Model]
#p1 <- ggplot(error, aes(Reconstruction.MSE)) +
# geom_histogram(binwidth = .001, fill = "grey50") +
# geom_vline(aes(xintercept = Percentile), data = percentile, linetype = 2) +
# theme_bw() +
# facet_wrap(~Model)
#print(p1)
#png("error_1.png",
# width = 5.5, height = 5.5, units = "in", res = 600)
#print(p1)
#dev.off()
##################################################################
# Neural Net: Calculated on the "Basic Model" Data
##################################################################
mt3 <- h2o.deeplearning(
x = xnames,
y = "volact",
training_frame = h2o.train,
validation_frame = h2o.test,
activation = "Rectifier",
hidden = c(200, 200, 100),
epochs = 10,
rate = .005,
loss = "CrossEntropy",
#input_dropout_ratio = .2,
#hidden_dropout_ratios = c(.5, .3, .1),
export_weights_and_biases = TRUE
)
summary(mt3)
mt3_pred <- h2o.predict(mt3, h2o.test)
ACC(table(as.vector(mt3_pred$predict), as.vector(h2o.test$volact)))
F1(table(as.vector(mt3_pred$predict), as.vector(h2o.test$volact)))
# These variable importances are used for the diagramm.
#h2o.varimp(mt3)
##################################################################
# Neural Net: Calculated on Balanced Data
##################################################################
mtbal <- h2o.deeplearning(
x = xnames,
y = "volact",
training_frame = h2o.train.bal,
validation_frame = h2o.test,
activation = "Rectifier",
hidden = c(200, 200, 100),
epochs = 10,
rate = .005,
loss = "CrossEntropy",
#input_dropout_ratio = .2,
#hidden_dropout_ratios = c(.5, .3, .1),
export_weights_and_biases = TRUE
)
summary(mtbal)
mtbal_pred <- h2o.predict(mtbal, h2o.test)
ACC(table(as.vector(mtbal_pred$predict), as.vector(h2o.test$volact)))
F1(table(as.vector(mtbal_pred$predict), as.vector(h2o.test$volact)))
#h2o.varimp(mtbal)
##################################################################
# Neural Net: Plot a Heatmap of the First Layer Weights
##################################################################
## weights for mapping from inputs to hidden layer 1 neurons
#w1 <- as.matrix(h2o.weights(mtbal, 1))
## plot heatmap of the weights
#tmp <- as.data.frame(t(w1))
#tmp$Row <- 1:nrow(tmp)
#tmp <- melt(tmp, id.vars = c("Row"))
#p.heat <- ggplot(tmp,
# aes(variable, Row, fill = value)) +
# geom_tile() +
# scale_fill_gradientn(colours = c("black", "white", "blue")) +
# theme_classic() +
# theme(axis.text = element_blank()) +
# xlab("Hidden Neuron") +
# ylab("Input Variable") +
# ggtitle("Heatmap of Weights for Layer 1")
#print(p.heat)
#png("heatmap_layer1.png",
# width = 5.5, height = 7.5, units = "in", res = 600)
#print(p.heat)
#dev.off()
##################################################################
# Neural Net: Preparing Plot for Marginal Effects later
##################################################################
h2o.plot <- as.h2o(plot.data)
mt3_pred <- h2o.predict(mt3, h2o.plot)
##################################################################
# Random Forest: Calculated on the "Basic Model" Data
##################################################################
rf2 <- h2o.randomForest(
training_frame = h2o.train,
validation_frame = h2o.test,
x = xnames,
y = "volact",
model_id = "forest2",
ntrees = 400,
max_depth = 30,
stopping_rounds = 0,
score_each_iteration = F,
seed = 1000000)
summary(rf2)
# These variable importances are used for the diagramm.
# h2o.varimp(rf2)
rf2_pred <- h2o.predict(rf2, h2o.test)
ACC(table(as.vector(rf2_pred$predict), as.vector(h2o.test$volact)))
F1(table(as.vector(rf2_pred$predict), as.vector(h2o.test$volact)))
##################################################################
# Random Forest: Calculated on Balanced Data
##################################################################
rfbal <- h2o.randomForest(
training_frame = h2o.train.bal,
validation_frame = h2o.test,
x = xnames,
y = "volact",
model_id = "forestbal",
ntrees = 400,
max_depth = 30,
stopping_rounds = 0,
score_each_iteration = F,
seed = 1000000)
summary(rfbal)
rfbal_pred <- h2o.predict(rfbal, h2o.test)
ACC(table(as.vector(rfbal_pred$predict), as.vector(h2o.test$volact)))
F1(table(as.vector(rfbal_pred$predict), as.vector(h2o.test$volact)))
##################################################################
# Random Forest: Preparing Plot for Marginal Effects
##################################################################
rf2_pred <- h2o.predict(rf2, h2o.plot)
# Condensing the prepared results into one dataframe
# and plotting it.
age.volact <- data.frame(cbind(as.matrix(age.volact),
as.vector(mt3_pred$p1), as.vector(rf2_pred$p1)))
colnames(age.volact) <- c("range", "1", "2", "3")
melted <- melt(age.volact, id.vars="range")
#png("age-volact.png")
#myplot <- ggplot(data=melted, aes(x=range, y=value, group=variable)) +
# geom_line(col = melted$variable) +
# ylab("Predicted Probability of Voluntary Action") +
# xlab("Year of Birth (Scaled)") +
# ggtitle("Comparison: \n Predicted Probabilites of Voluntary Action for
# a range of Year of Births \n From top to down: Logistic Regression, Neural Network, Random Forest") +
# theme_bw()
#print(myplot)
#dev.off()
#print(myplot)
##################################################################
##################################################################
##################################################################
# Preparing the Data for the Separate Models
##################################################################
ess1 <- read.table("C:/Users/laris/Desktop/MMDS/Semester3-FSS2018/Advanced-Quantitative-Methods/paper/Submission/ess1.csv", header=TRUE, sep=",")
ess6 <- read.table("C:/Users/laris/Desktop/MMDS/Semester3-FSS2018/Advanced-Quantitative-Methods/paper/Submission/ess", header=TRUE, sep=",")
for(i in c(4,5,6,10,11,12,13,14,15,16,17,18,19,20,21,22,23)){
ess1[is.na(ess1[,i]), i] <- mean(ess1[,i], na.rm = TRUE)
}
ess1[is.na(ess1[,3]), 3] <- 1
ess1[is.na(ess1[,7]), 7] <- 1
ess1[is.na(ess1[,8]), 8] <- 0
ess1[is.na(ess1[,9]), 9] <- 0
for(i in c(4,5,6,10,11,12,13,14,15,16,17,18,19,20,21,22,23)){
ess6[is.na(ess6[,i]), i] <- mean(ess6[,i], na.rm = TRUE)
}
ess6[is.na(ess6[,3]), 3] <- 1
ess6[is.na(ess6[,7]), 7] <- 1
ess6[is.na(ess6[,8]), 8] <- 0
ess6[is.na(ess6[,9]), 9] <- 0
ess1.scaled <- cbind(ess1[c(1, 3, 7, 8, 9)], scale(ess1[-c(1, 2, 3, 7, 8, 9, 24)]))
ess1c.scaled <- cbind(ess1[c(1, 2, 3, 7, 8, 9)], scale(ess1[-c(1, 2, 3, 7, 8, 9, 24)]))
ess6.scaled <- cbind(ess6[c(1, 3, 7, 8, 9)], scale(ess6[-c(1, 2, 3, 7, 8, 9, 24)]))
ess6c.scaled <- cbind(ess6[c(1, 2, 3, 7, 8, 9)], scale(ess6[-c(1, 2, 3, 7, 8, 9, 24)]))
for(t in unique(ess1c.scaled$cntry)) {
ess1c.scaled[paste("cntry",t,sep="")] <- ifelse(ess1c.scaled$cntry==t,1,0)
}
ess1c.scaled <- ess1c.scaled[,-2]
for(t in unique(ess6c.scaled$cntry)) {
ess6c.scaled[paste("cntry",t,sep="")] <- ifelse(ess6c.scaled$cntry==t,1,0)
}
ess6c.scaled <- ess6c.scaled[,-2]
# Same as above: this can be un-commented when the data sets are
# already there and just have to be written and loaded.
#write.csv(ess1.scaled, file = "ess1-scaled.csv")
#write.csv(ess1c.scaled, file = "ess1-scaled-cntry.csv")
#write.csv(ess6.scaled, file = "ess6-scaled.csv")
#write.csv(ess6c.scaled, file = "ess6-scaled-cntry.csv")
#ess1.scaled <- read.table("ess1-scaled.csv", header=TRUE, sep=",")
#ess6.scaled <- read.table("ess6-scaled.csv", header=TRUE, sep=",")
#ess1c.scaled <- read.table("ess1-scaled-cntry.csv", header=TRUE, sep=",")
#ess6c.scaled <- read.table("ess6-scaled-cntry.csv", header=TRUE, sep=",")
#ess1.scaled <- ess1.scaled[,-1]
#ess6.scaled <- ess6.scaled[,-1]
#ess1c.scaled <- ess1c.scaled[,-1]
#ess6c.scaled <- ess6c.scaled[,-1]
# Again: splitting into training and test set
set.seed(123)
smp_size <- floor(0.9 * nrow(ess1.scaled))
train_ind <- sample(seq_len(nrow(ess1.scaled)), size = smp_size)
train1 <- ess1.scaled[train_ind,]
test1 <- ess1.scaled[-train_ind,]
train1c <- ess1c.scaled[train_ind,]
test1c <- ess1c.scaled[-train_ind,]
set.seed(123)
smp_size <- floor(0.9 * nrow(ess6.scaled))
train_ind <- sample(seq_len(nrow(ess6.scaled)), size = smp_size)
train6 <- ess6.scaled[train_ind,]
test6 <- ess6.scaled[-train_ind,]
train6c <- ess6c.scaled[train_ind,]
test6c <- ess6c.scaled[-train_ind,]
##################################################################
# Logistic Regression: Calculated on two years separately
##################################################################
y1 <- train1$volact
X1 <- as.matrix(train1[,-1])
ols_model1 <- glm(y1 ~ X1, family=binomial(link='logit'))
X1 <- as.matrix(test1[,-1])
p_ols1 <- predict(ols_model1, data.frame(X1), type = "response")
#table(p_ols1, test1$volact)
#ACC(table(p_ols1, test1$volact))
#F1(table(p_ols1, test1$volact))
y6 <- train6$volact
X6 <- as.matrix(train6[,-1])
ols_model6 <- glm(y6 ~ X6, family=binomial(link='logit'))
X6 <- as.matrix(test6[,-1])
p_ols6 <- predict(ols_model6, data.frame(X6), type = "response")
#table(y_hat6, test6$volact)
#ACC(table(y_hat6, test6$volact))
#F1(table(y_hat6, test6$volact))
p_ols11 <- ifelse(p_ols1 > 0.1, 1, 0)
p_ols12 <- ifelse(p_ols1 > 0.2, 1, 0)
p_ols13 <- ifelse(p_ols1 > 0.3, 1, 0)
p_ols14 <- ifelse(p_ols1 > 0.4, 1, 0)
p_ols15 <- ifelse(p_ols1 > 0.5, 1, 0)
p_ols16 <- ifelse(p_ols1 > 0.6, 1, 0)
p_ols17 <- ifelse(p_ols1 > 0.7, 1, 0)
#p_ols18 <- ifelse(p_ols1 > 0.8, 1, 0)
#p_ols19 <- ifelse(p_ols1 > 0.9, 1, 0)
threshold <- cbind(seq(0.1, 0.7, length.out = 7),
c(ACC(table(p_ols11, test1$volact)),
ACC(table(p_ols12, test1$volact)),
ACC(table(p_ols13, test1$volact)),
ACC(table(p_ols14, test1$volact)),
ACC(table(p_ols15, test1$volact)),
ACC(table(p_ols16, test1$volact)),
ACC(table(p_ols17, test1$volact))),
#ACC(table(p_ols18, test1$volact)),
#ACC(table(p_ols19, test1$volact))),
c(F1(table(p_ols11, test1$volact)),
F1(table(p_ols12, test1$volact)),
F1(table(p_ols13, test1$volact)),
F1(table(p_ols14, test1$volact)),
F1(table(p_ols15, test1$volact)),
F1(table(p_ols16, test1$volact)),
F1(table(p_ols17, test1$volact))))
#F1(table(p_ols18, test1$volact))))
#F1(table(p_ols19, test1$volact))))
print("MAX Accuracy for ESS1 Logistic Regression")
max(threshold[,2])
print("MAX F1 for ESS1 Logistic Regression")
max(threshold[,3])
p_ols61 <- ifelse(p_ols6 > 0.1, 1, 0)
p_ols62 <- ifelse(p_ols6 > 0.2, 1, 0)
p_ols63 <- ifelse(p_ols6 > 0.3, 1, 0)
p_ols64 <- ifelse(p_ols6 > 0.4, 1, 0)
p_ols65 <- ifelse(p_ols6 > 0.5, 1, 0)
p_ols66 <- ifelse(p_ols6 > 0.6, 1, 0)
p_ols67 <- ifelse(p_ols6 > 0.7, 1, 0)
p_ols68 <- ifelse(p_ols6 > 0.8, 1, 0)
#p_ols69 <- ifelse(p_ols6 > 0.9, 1, 0)
threshold <- cbind(seq(0.1, 0.8, length.out = 8),
c(ACC(table(p_ols61, test6$volact)),
ACC(table(p_ols62, test6$volact)),
ACC(table(p_ols63, test6$volact)),
ACC(table(p_ols64, test6$volact)),
ACC(table(p_ols65, test6$volact)),
ACC(table(p_ols66, test6$volact)),
ACC(table(p_ols67, test6$volact)),
ACC(table(p_ols68, test6$volact))),
#ACC(table(p_ols69, test6$volact))),
c(F1(table(p_ols61, test6$volact)),
F1(table(p_ols62, test6$volact)),
F1(table(p_ols63, test6$volact)),
F1(table(p_ols64, test6$volact)),
F1(table(p_ols65, test6$volact)),
F1(table(p_ols66, test6$volact)),
F1(table(p_ols67, test6$volact)),
F1(table(p_ols68, test6$volact))))
#F1(table(p_ols69, test6$volact))))
print("MAX Accuracy for ESS6 Logistic Regression")
max(threshold[,2])
print("MAX F1 for ESS6 Logistic Regression")
max(threshold[,3])
##################################################################
# Logistic Regression: Calculated with Country Dummies
##################################################################
y1c <- train1c$volact
X1c <- as.matrix(train1c[,-c(1, 44)])
ols_model1c <- glm(y1c ~ X1c, family=binomial(link='logit'))
X1c <- as.matrix(test1c[,-c(1, 44)])
p_ols1c <- predict(ols_model1c, data.frame(X1c), type = "response")
#table(p_ols1, test1$volact)
#ACC(table(p_ols1, test1$volact))
#F1(table(p_ols1, test1$volact))
y6c <- train6c$volact
X6c <- as.matrix(train6c[,-c(1, 51)])
ols_model6c <- glm(y6c ~ X6c, family=binomial(link='logit'))
X6c <- as.matrix(test6c[,-c(1, 51)])
p_ols6c <- predict(ols_model6c, data.frame(X6c), type = "response")
#table(y_hat6, test6$volact)
#ACC(table(y_hat6, test6$volact))
#F1(table(y_hat6, test6$volact))
p_ols11c <- ifelse(p_ols1c > 0.1, 1, 0)
p_ols12c <- ifelse(p_ols1c > 0.2, 1, 0)
p_ols13c <- ifelse(p_ols1c > 0.3, 1, 0)
p_ols14c <- ifelse(p_ols1c > 0.4, 1, 0)
p_ols15c <- ifelse(p_ols1c > 0.5, 1, 0)
p_ols16c <- ifelse(p_ols1c > 0.6, 1, 0)
p_ols17c <- ifelse(p_ols1c > 0.7, 1, 0)
p_ols18c <- ifelse(p_ols1c > 0.8, 1, 0)
#p_ols19c <- ifelse(p_ols1c > 0.9, 1, 0)
threshold <- cbind(seq(0.1, 0.8, length.out = 8),
c(ACC(table(p_ols11c, test1c$volact)),
ACC(table(p_ols12c, test1c$volact)),
ACC(table(p_ols13c, test1c$volact)),
ACC(table(p_ols14c, test1c$volact)),
ACC(table(p_ols15c, test1c$volact)),
ACC(table(p_ols16c, test1c$volact)),
ACC(table(p_ols17c, test1c$volact)),
ACC(table(p_ols18c, test1c$volact))),
#ACC(table(p_ols19c, test1c$volact))),
c(F1(table(p_ols11c, test1c$volact)),
F1(table(p_ols12c, test1c$volact)),
F1(table(p_ols13c, test1c$volact)),
F1(table(p_ols14c, test1c$volact)),
F1(table(p_ols15c, test1c$volact)),
F1(table(p_ols16c, test1c$volact)),
F1(table(p_ols17c, test1c$volact)),
F1(table(p_ols18c, test1c$volact))))
#F1(table(p_ols19c, test1c$volact))))
print("MAX Accuracy for ESS1 Logistic Regression with Countries")
max(threshold[,2])
print("MAX F1 for ESS1 Logistic Regression with Countries")
max(threshold[,3])
p_ols61c <- ifelse(p_ols6c > 0.1, 1, 0)
p_ols62c <- ifelse(p_ols6c > 0.2, 1, 0)
p_ols63c <- ifelse(p_ols6c > 0.3, 1, 0)
p_ols64c <- ifelse(p_ols6c > 0.4, 1, 0)
p_ols65c <- ifelse(p_ols6c > 0.5, 1, 0)
p_ols66c <- ifelse(p_ols6c > 0.6, 1, 0)
p_ols67c <- ifelse(p_ols6c > 0.7, 1, 0)
p_ols68c <- ifelse(p_ols6c > 0.8, 1, 0)
#p_ols69c <- ifelse(p_ols6c > 0.9, 1, 0)
threshold <- cbind(seq(0.1, 0.8, length.out = 8),
c(ACC(table(p_ols61c, test6c$volact)),
ACC(table(p_ols62c, test6c$volact)),
ACC(table(p_ols63c, test6c$volact)),
ACC(table(p_ols64c, test6c$volact)),
ACC(table(p_ols65c, test6c$volact)),
ACC(table(p_ols66c, test6c$volact)),
ACC(table(p_ols67c, test6c$volact)),
ACC(table(p_ols68c, test6c$volact))),
#ACC(table(p_ols69c, test6c$volact))),
c(F1(table(p_ols61c, test6c$volact)),
F1(table(p_ols62c, test6c$volact)),
F1(table(p_ols63c, test6c$volact)),
F1(table(p_ols64c, test6c$volact)),
F1(table(p_ols65c, test6c$volact)),
F1(table(p_ols66c, test6c$volact)),
F1(table(p_ols67c, test6c$volact)),
F1(table(p_ols68c, test6c$volact))))
#F1(table(p_ols69c, test6c$volact))))
print("MAX Accuracy for ESS6 Logistic Regression with Countries")
max(threshold[,2])
print("MAX F1 for ESS6 Logistic Regression with Countries")
max(threshold[,3])
##################################################################
# Neural Net: H2O Models Extended
##################################################################
# Again: transforming the data into H2O-data
train1$volact <- as.factor(train1$volact)
test1$volact <- as.factor(test1$volact)
train6$volact <- as.factor(train6$volact)
test6$volact <- as.factor(test6$volact)
h2o.train1 <- as.h2o(train1)
h2o.test1 <- as.h2o(test1)
h2o.train6 <- as.h2o(train6)
h2o.test6 <- as.h2o(test6)
train1c$volact <- as.factor(train1c$volact)
test1c$volact <- as.factor(test1c$volact)
train6c$volact <- as.factor(train6c$volact)
test6c$volact <- as.factor(test6c$volact)
h2o.train1c <- as.h2o(train1c)
h2o.test1c <- as.h2o(test1c)
h2o.train6c <- as.h2o(train6c)
h2o.test6c <- as.h2o(test6c)
xnames1 <- colnames(train1[,-1])
xnames6 <- colnames(train6[,-1])
xnames1c <- colnames(train1c[,-1])
xnames6c <- colnames(train6c[,-1])
##################################################################
# Neural Net: Calculated on two years separately
##################################################################
nn1_1 <- h2o.deeplearning(
x = xnames1,
y = "volact",
training_frame = h2o.train1,
validation_frame = h2o.test1,
activation = "Rectifier",
hidden = c(200, 200, 100),
epochs = 10,
rate = .005,
loss = "CrossEntropy",
export_weights_and_biases = TRUE,
seed = 1000000
)
nn6_1 <- h2o.deeplearning(
x = xnames6,
y = "volact",
training_frame = h2o.train6,
validation_frame = h2o.test6,
activation = "Rectifier",
hidden = c(200, 200, 100),
epochs = 10,
rate = .005,
loss = "CrossEntropy",
export_weights_and_biases = TRUE,
seed = 1000000
)
summary(nn1_1)
summary(nn6_1)
nn1_1_pred <- h2o.predict(nn1_1, h2o.test1)
nn6_1_pred <- h2o.predict(nn6_1, h2o.test6)
ACC(table(as.vector(nn1_1_pred$predict), test1$volact))
F1(table(as.vector(nn1_1_pred$predict), test1$volact))
ACC(table(as.vector(nn6_1_pred$predict), test6$volact))
F1(table(as.vector(nn6_1_pred$predict), test6$volact))
##################################################################
# Neural Net: Calculated with Country Dummies
##################################################################
nn1_1c <- h2o.deeplearning(
x = xnames1c,
y = "volact",
training_frame = h2o.train1c,
validation_frame = h2o.test1c,
activation = "Rectifier",
hidden = c(200, 200, 100),
epochs = 10,
rate = .005,
loss = "CrossEntropy",
export_weights_and_biases = TRUE,
seed = 1000000
)
nn6_1c <- h2o.deeplearning(
x = xnames6c,
y = "volact",
training_frame = h2o.train6c,
validation_frame = h2o.test6c,
activation = "Rectifier",
hidden = c(200, 200, 100),
epochs = 10,
rate = .005,
loss = "CrossEntropy",
export_weights_and_biases = TRUE,
seed = 1000000
)
summary(nn1_1c)
summary(nn6_1c)
nn1_1c_pred <- h2o.predict(nn1_1c, h2o.test1c)
nn6_1c_pred <- h2o.predict(nn6_1c, h2o.test6c)
ACC(table(as.vector(nn1_1c_pred$predict), test1c$volact))
F1(table(as.vector(nn1_1c_pred$predict), test1c$volact))
ACC(table(as.vector(nn6_1c_pred$predict), test6c$volact))
F1(table(as.vector(nn6_1c_pred$predict), test6c$volact))
##################################################################
# Random Forest: Calculated on two years separately
##################################################################
rf1_1 <- h2o.randomForest(
training_frame = h2o.train1,
validation_frame = h2o.test1,
x = xnames1,
y = "volact",
model_id = "forest1",
ntrees = 300,
max_depth = 50,
stopping_rounds = 2,
score_each_iteration = F,
seed = 1000000)
rf6_1 <- h2o.randomForest(
training_frame = h2o.train6,
validation_frame = h2o.test6,
x = xnames6,
y = "volact",
model_id = "forest1",
ntrees = 300,
max_depth = 50,
stopping_rounds = 2,
score_each_iteration = F,
seed = 1000000)
summary(rf1_1)
summary(rf6_1)
rf1_1_pred <- h2o.predict(rf1_1, h2o.test1)
rf6_1_pred <- h2o.predict(rf6_1, h2o.test6)
ACC(table(as.vector(rf1_1_pred$predict), test1$volact))
F1(table(as.vector(rf1_1_pred$predict), test1$volact))
ACC(table(as.vector(rf6_1_pred$predict), test6$volact))
F1(table(as.vector(rf6_1_pred$predict), test6$volact))
##################################################################
# Random Forest: Calculated with Country Dummies
##################################################################
rf1_1c <- h2o.randomForest(
training_frame = h2o.train1c,
validation_frame = h2o.test1c,
x = xnames1c,
y = "volact",
model_id = "forest1",
ntrees = 300,
max_depth = 50,
stopping_rounds = 2,
score_each_iteration = F,
seed = 1000000)
rf6_1c <- h2o.randomForest(
training_frame = h2o.train6c,
validation_frame = h2o.test6c,
x = xnames6c,
y = "volact",
model_id = "forest1",
ntrees = 300,
max_depth = 50,
stopping_rounds = 2,
score_each_iteration = F,
seed = 1000000)
summary(rf1_1c)
summary(rf6_1c)
rf1_1c_pred <- h2o.predict(rf1_1c, h2o.test1c)
rf6_1c_pred <- h2o.predict(rf6_1c, h2o.test6c)
ACC(table(as.vector(rf1_1c_pred$predict), test1c$volact))
F1(table(as.vector(rf1_1c_pred$predict), test1c$volact))
ACC(table(as.vector(rf6_1c_pred$predict), test6c$volact))
F1(table(as.vector(rf6_1c_pred$predict), test6c$volact))
##################################################################
##################################################################
|
8b2260335fed572e644a94e64951f1131de50fe0
|
f13fbc74fb35e2c2efd3bab429880f56f44cfd33
|
/R/blast.pdb.R
|
e9722d3a8d647256f5489cccc39366245fdc83b5
|
[] |
no_license
|
thomasp85/pepmaps
|
1a05ba4a1df622cdf23faaa40740a5af4f7c3183
|
86e9b952f38fc1ab198eb526a239f7e620679a54
|
refs/heads/master
| 2020-12-25T19:14:43.684678
| 2013-04-11T06:17:34
| 2013-04-11T06:17:34
| 5,366,935
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,819
|
r
|
blast.pdb.R
|
# TODO: Add comment
#
# Author: Thomas
###############################################################################
blast.pdb <- function(seq, database="pdb") {
## Run NCBI blastp on a given 'seq' sequence against a given 'database'
if(!is.vector(seq)) {
stop("Input 'seq' should be a single sequence as a single or multi element character vector")
}
seq <- paste(seq, collapse="")
if( !(database %in% c("pdb", "nr", "swissprot")) )
stop("Option database should be one of pdb, nr or swissprot")
##- Submit
urlput <- paste("http://www.ncbi.nlm.nih.gov/BLAST/Blast.cgi?CMD=Put&DATABASE=",
database,"&HITLIST_SIZE=20000&PROGRAM=blastp&CLIENT=web&QUERY=",
paste(seq,collapse=""),
sep="")
txt <- scan(urlput, what="raw", sep="\n", quiet=TRUE)
rid <- sub("^.*RID = " ,"",txt[ grep("RID =",txt) ])
cat(paste(" Searching ... please wait (updates every 5 seconds) RID =",rid,"\n "))
##- Retrive results via RID code (note 'Sys.sleep()')
urlget <- paste("http://blast.ncbi.nlm.nih.gov/Blast.cgi?CMD=Get",
"&FORMAT_OBJECT=Alignment",
"&ALIGNMENT_VIEW=Tabular",
"&RESULTS_FILE=on",
"&FORMAT_TYPE=CSV",
"&ALIGNMENTS=20000",
"&RID=",rid, sep="")
raw <- read.csv(urlget,
header = FALSE, sep = ",", quote="\"", dec=".",
fill = TRUE, comment.char="")
## Check for job completion (retrive html or cvs?)
html <- 1
while(length(html) == 1) {
cat("."); Sys.sleep(5)
raw <- try(read.csv(urlget,
header = FALSE, sep = ",", quote="\"", dec=".",
fill = TRUE, comment.char=""), silent=TRUE)
if(class(raw)=="try-error") { stop("No hits found: thus no output generated") }
html <- grep("DOCTYPE", raw[1,])
}
colnames(raw) <- c("queryid", "subjectids", "identity", "positives",
"alignmentlength", "mismatches", "gapopens",
"q.start", "q.end", "s.start", "s.end",
"evalue", "bitscore")
## expand 'raw' for each hit in 'subjectids' (i.e. split on ";")
rawm <- as.matrix(raw)
eachsubject <- strsplit(rawm[,"subjectids"],";")
subjectids <- unlist(eachsubject)
n.subjects <- sapply(eachsubject, length)
rawm <- apply(rawm, 2, rep, times=n.subjects)
rawm[,"subjectids"] <- subjectids
## parse ids
all.ids <- strsplit(subjectids, "\\|")
gi.id <- sapply(all.ids, '[', 2)
pdb.id <- paste(sapply(all.ids, '[', 4),"_",sapply(all.ids, '[', 5),sep="")
## N.B. hack: zero evalues to arbirtrly high value!!
mlog.evalue <- -log(as.numeric(rawm[,"evalue"]))
mlog.evalue[is.infinite(mlog.evalue)] <- -log(1e-308)
cat(paste("\n Reporting",length(pdb.id),"hits\n"))
output <- list(bitscore= as.numeric(rawm[,"bitscore"]),
evalue = as.numeric(rawm[,"evalue"]),
mlog.evalue = mlog.evalue,
gi.id = gi.id,
pdb.id = pdb.id,
hit.tbl = rawm,
raw = raw)
class(output) <- "blast"
return(output)
}
|
ca25e97910810ec936b635c5f31f73de1389a830
|
56996f680ba915b6033e444c11f51ba883bfbb9a
|
/Plot2.R
|
a1e65023a55d5238b9f7ebd9a2fdbf703614bbba
|
[] |
no_license
|
csmithmeyer/csmithmeyer-ExData_Plotting1
|
dfd5dce1a23e76ff5cb59ff5d64aa94fffcd58b3
|
a45df4e73debb98f79c2ed1b3c6b9c6966d999e5
|
refs/heads/master
| 2020-05-07T09:50:16.429954
| 2019-04-09T23:32:46
| 2019-04-09T23:32:46
| 180,393,809
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 517
|
r
|
Plot2.R
|
setwd("C:/Users/1257256940.MIL/Desktop/Coursera/Plot/")
library(lubridate)
data<-read.table("household_power_consumption.txt",sep = ";", header = T)
data$Date<-as.character(data$Date)
data$Date<-strptime(data$Date,"%d/%m/%Y")
data$Time<-as.character(data$Time)
datetime <- strptime(paste(data$Date, data$Time, sep=" "), "%Y-%m-%d %H:%M:%S")
png("plot2.png", width=480, height=480)
plot(datetime,data$Global_active_power, type = "l",xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
9246ec01c637bd681816c93e57043e44596166a1
|
799468ce526db6f14f2aa5003c601e259e5f0d62
|
/man/fundamental.matrix.Rd
|
344e949572c2cd24853d1b28228c2d58378e3547
|
[] |
no_license
|
kostask84/popbio
|
6aa45015bfc1659bd97f2ce51ad5246b8d434fac
|
682d3ffb922dfab4fd2c7fc7179af2b0d926edfd
|
refs/heads/master
| 2021-05-09T02:01:42.050755
| 2017-02-09T21:44:20
| 2017-02-09T21:44:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,296
|
rd
|
fundamental.matrix.Rd
|
\name{fundamental.matrix}
\alias{fundamental.matrix}
\title{ Fundamental matrix and age-specific survival }
\description{
Age-specific survival calculations from stage-classified matrices. Includes the mean, variance and coefficient of variation (cv) of the time spent in each stage class and the mean and variance of the time to death
}
\usage{
fundamental.matrix(A, \dots)
}
\arguments{
\item{A}{ projection matrix }
\item{\dots}{ additional items are passed to \code{\link{splitA}}
and are used to split A into T and F matrices }
}
\details{
see section 5.3.1 in Caswell (2001).
}
\value{
A list with 5 items
\item{ N }{ fundamental matrix or mean of the time spent in each stage class}
\item{ var }{ variance of the time spent in each stage class}
\item{ cv }{ coefficient of variation (sd/mean) }
\item{ meaneta }{ mean of time to death}
\item{ vareta }{ variance of time to death }
}
\references{ Caswell, H. 2001. Matrix population models: construction, analysis, and interpretation, Second edition. Sinauer, Sunderland, Massachusetts, USA. }
\author{ Chris Stubben }
%\note{ }
\seealso{ see \code{\link{generation.time}} and \code{\link{net.reproductive.rate}} for other age-specific traits }
\examples{
data(whale)
fundamental.matrix(whale)
}
\keyword{ survey }
|
a5a8eb50d649e8f1da71441217ca4a77ad8ae06b
|
4adb21c499e05df30eb4fc64be68a20d746463b7
|
/R/run.eqs.r
|
c09096d110ace9170a758486f2203444753a2a58
|
[] |
no_license
|
cran/REQS
|
e827f7712a169791b8ca6988bd3b5011ae23d83b
|
4a686f98c91c80452ce4a9c35030eed8f9f16d2c
|
refs/heads/master
| 2022-10-02T08:38:50.087618
| 2022-09-28T08:34:02
| 2022-09-28T08:34:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 546
|
r
|
run.eqs.r
|
run.eqs <- function(EQSpgm, EQSmodel, serial, Rmatrix = NA, datname = NA, LEN = 2000000)
{
res <- call.eqs(EQSpgm = EQSpgm, EQSmodel = EQSmodel, serial = serial, Rmatrix = Rmatrix, datname = datname, LEN = LEN)
if (!res) warning("EQS estimation not successful!")
filedir.split <- strsplit(EQSmodel, "/")[[1]]
n <- length(filedir.split)
etsname <- strsplit(filedir.split[n], "\\.")[[1]][1]
etsfile <- paste(etsname, ".ets",sep = "" )
reslist <- read.eqs(etsfile)
return(c(list(success = res),reslist))
}
|
354784a68542ccc081c964f5e5cb5192d1113221
|
01a13a32f517704d75b93a385634736c0ce43d41
|
/statistical_test/statistical_test.R
|
670858acb043144b2c52699c257f626af77f6782
|
[] |
no_license
|
JudithVerstegen/PLUC_Brazil_stoch
|
0dd688a70eb45ac666f3e31c2b3f27ed49841591
|
080102c14bb3d288b7136ce6876631948f3f1df0
|
refs/heads/master
| 2022-02-23T05:54:40.582481
| 2019-09-20T13:51:05
| 2019-09-20T13:51:05
| 175,794,893
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,069
|
r
|
statistical_test.R
|
# load libs
if(!require(ggplot2)){install.packages("ggplot2")}
if(!require(gridExtra)){install.packages("gridExtra")}
if(!require(stats)){install.packages("stats")}
if(!require(car)){install.packages("car")}
if(!require(plyr)){install.packages("plyr")}
if(!require(scales)){install.packages("scales")}
if(!require(userfriendlyscience)){install.packages("userfriendlyscience")}
library(ggplot2)
library(gridExtra)
library(stats)
library(car)
library(plyr)
library(scales)
library(userfriendlyscience)
rm(list = ls()) # clean up working environment
graphics.off() # terminate graphics devices
# define function to open and put into data frame
get_data <- function(indicator_name){
# load all scenario & count rows and cols
filename <- paste("data4StatsTest_", indicator_name, ".csv", sep="")
ghge <- read.csv(filename, sep=";", dec=".", header=FALSE)
summary(ghge)
class(ghge)
class(ghge[,1])
# determine data frame size
k <- 6 # No of scenarios
n <- length(ghge[1,]) # No of MC realizations
N = k*n # Total DF size
scenarios <- data.frame(scenario=rep(c("Ref", "Hp", "2ndSC", "2ndEU", "Cp", "All"), each=n), data=rep(as.double(NA),N))
scenarios$data[1:n] <- as.double(c(ghge[1,], recursive = TRUE))
scenarios$data[(n+1):(n*2)] <- as.double(c(ghge[2,], recursive = TRUE))
scenarios$data[(2*n+1):(n*3)] <- as.double(c(ghge[3,], recursive = TRUE))
scenarios$data[(3*n+1):(n*4)] <- as.double(c(ghge[4,], recursive = TRUE))
scenarios$data[(4*n+1):(n*5)] <- as.double(c(ghge[5,], recursive = TRUE))
scenarios$data[(5*n+1):(n*6)] <- as.double(c(ghge[6,], recursive = TRUE))
return(scenarios)
}
#############################################################
# MAIN
setwd("D:/PLUC_Renan/model_merged/PLUC_Brazil_stoch/statistical_test")
k <- 6 # 6scenarios, 6 groups to compare
n <- 10000 # cheating
N <- k*n
indicator <- "TC" # select 'TC', 'SOC' or 'BC' to change the input files
# extra column with combi scen and year for letters
tot <- data.frame(scenario=rep("", N), datas=rep(as.double(NA),N), combi=rep("", N), stringsAsFactors = FALSE)
ghge <- get_data(indicator)
tot$scenario[1:n] <- "1_Ref"
tot$combi[1:n] <- "1_Ref" #as.character(scenarios_s_1990$combi[1:n])
tot$datas[1:n] <- ghge$data[1:n]
tot$scenario[(n+1):(2*n)] <- "2_HighProd"
tot$combi[(n+1):(2*n)] <- "2_HighProd" #as.character(scenarios_s_2000$combi[1:n])
tot$datas[(n+1):(2*n)] <- ghge$data[(n+1):(n*2)]
tot$scenario[(2*n+1):(3*n)] <- "3_SC2nd"
tot$combi[(2*n+1):(3*n)] <- "3_SC2nd" #as.character(scenarios_s_2009$combi[1:n])
tot$datas[(2*n+1):(3*n)] <- ghge$data[(2*n+1):(n*3)]
tot$scenario[(3*n+1):(n*4)] = "4_EU2nd"
tot$combi[(3*n+1):(n*4)] = "4_EU2nd"
tot$datas[(3*n+1):(n*4)] <- ghge$data[(3*n+1):(n*4)]
tot$scenario[(4*n+1):(n*5)] = "5_StrictCons"
tot$combi[(4*n+1):(n*5)] = "5_StrictCons"
tot$datas[(4*n+1):(n*5)] <- ghge$data[(4*n+1):(n*5)]
tot$scenario[(5*n+1) :(n*6)] = "6_AllComb"
tot$combi[(5*n+1):(n*6)] = "6_AllComb"
tot$datas[(5*n+1):(n*6)] <- ghge$data[(5*n+1):(n*6)]
# plotting the data
ggplot(tot, aes(x=scenario, y=datas)) + geom_boxplot()# + facet_wrap(~scenario)
#############################################################
# Significance tests
if(!require(PMCMR)){install.packages("PMCMR")}
if(!require(multcompView)){install.packages("multcompView")}
if(!require(rcompanion)){install.packages("rcompanion")}
if(!require(FSA)){install.packages("FSA")}
if(!require(dplyr)){install.packages("dplyr")}
# Kruskal-Wallis with post-hoc tests after Nemenyi
kk <- kruskal.test(datas ~ as.factor(combi), data = tot) #from stats
#kruskalTest(datas ~ scenario, data = tot, dist='Chisquare')
# dist='Chisquare' gives strange(r) results
pk <- posthoc.kruskal.nemenyi.test(datas ~ as.factor(combi), data = tot)
kk
pk
pt <- pk$p.value
#pt <- t(pt)
# try Welch's anova to account for heteroscedasticity
# does not work
welch <- oneway.test(datas ~ as.factor(combi), data=tot, na.action=na.omit)
welch
# USED FOR THE PAPER
# simple one-way ANOVA with Games-Howell post-hoc test
# https://rpubs.com/aaronsc32/games-howell-test
one.way <- oneway(as.factor(tot$combi), y = tot$datas, posthoc = 'games-howell', levene = T,
digits=10, pvalueDigits=10, corrections=F, conf.level=0.95)
one.way
write.table(one.way$output, file = paste("pvalues_",indicator,".csv", sep=""),
append = FALSE, quote = TRUE, sep = " ",
eol = "\n", na = "NA", dec = ".", row.names = TRUE,
col.names = TRUE)
# try it again without reference scenario
# https://rpubs.com/aaronsc32/games-howell-test
x <- tot$combi[(n+1):(6*n)]
y <- tot$datas[(n+1):(6*n)]
one.way <- oneway(as.factor(x), y = y, posthoc = 'games-howell', levene = T, digits=6, corrections=T)
one.way
# try it again on selection
selection <- tot %>% group_by(combi) %>% sample_n(size = 100)
print(summary(selection))
one.way <- oneway(as.factor(selection$combi), y = selection$datas, posthoc = 'games-howell', levene = T,
digits=10, pvalueDigits=10, corrections=T)
one.way
# try Friedman instead of Kruskal-Wallis
# does not work because the combination of 'combi' and 'scen'
# isn't unique because of MC analysis
##agg.data <- aggregate(datas ~ year+scen+combi, data = tot, median)
##f <- friedman.test(datas ~ year | scenario, data=agg.data)
##f
##pf <- posthoc.friedman.nemenyi.test(datas ~ scenario| year, data=agg.data)
##pt <- pf$p.value
##pt
# https://stats.stackexchange.com/questions/292394/how-to-report-results-based-on-likert-item-group-differences/292540
pt1 = fullPTable(pt)
write.table(pt1, file = paste("pvalues_",indicator,".csv", sep=""), append = FALSE, quote = TRUE, sep = " ",
eol = "\n", na = "NA", dec = ".", row.names = TRUE,
col.names = TRUE)
letters <- multcompLetters(pt1,
compare="<",
threshold=0.01,
Letters=letters,
reversed = FALSE)
DF = as.data.frame(letters$Letters)
DF$scenarios <- rownames(DF)
DF
tot$letter<-with(DF, letters$Letters[match(tot$combi, scenarios)])
agg.data <- aggregate(datas ~ scenario+combi, data = tot, median)
agg.data$letter<-with(DF, letters$Letters[match(agg.data$combi, scenarios)])
agg.data
#---------------------- PLOTS ----------------------
# five panels
# graphics.off() # terminate graphics devices
# ggplot(tot, aes(x = scenario, y = datas, group=combi, fill=scenario)) +
# geom_boxplot(coef = 5) + facet_wrap(~scenario) +
# geom_text(data = agg.data, aes(label=letter, vjust=vjust, hjust=+0.5)) +
# ylab(expression(atop("GHG emissions", paste('gram CO'[2]*'-eq Mj'[EtOH])))) +
# xlab("")
#vjust <- 0
#hjust <-0.5
#legpos <- "20"
if(indicator == "TC") {col = "#B2C29D"}
if(indicator == "SOC") {col = "#DFC284"}
if(indicator == "BC") {col = "#7BC3BC"}
ghge_min = min(ghge$data)
ghge_max = max(ghge$data)
# one panel
graphics.off() # terminate graphics devices
ggplot(tot, aes(x=scenario, y = datas)) +
tat_boxplot(coef=10, geom = "errorbar", colour = "#585858", size=0.2) +
eom_boxplot(coef=10, show.legend=FALSE, fill=col, colour = "#585858", size=0.35) +
eom_text(data = agg.data[agg.data$scenario=="1 Ref",], size=5, aes(label=letter, y=max(tot$datas[tot$scenario=="1 Ref"])-1,vjust=0)) +
eom_text(data = agg.data[agg.data$scenario=="2 HP",], size=5, aes(label=letter, y=max(tot$datas[tot$scenario=="2 HP"])+1,vjust=0)) +
eom_text(data = agg.data[agg.data$scenario=="3 2ndSc",], size=5, aes(label=letter, y=max(tot$datas[tot$scenario=="3 2ndSc"])+1,vjust=0)) +
eom_text(data = agg.data[agg.data$scenario=="4 2ndEu",], size=5, aes(label=letter, y=max(tot$datas[tot$scenario=="4 2ndEu"])+1,vjust=0)) +
eom_text(data = agg.data[agg.data$scenario=="5 CP",], size=5, aes(label=letter, y=max(tot$datas[tot$scenario=="5 CP"])+1,vjust=0)) +
eom_text(data = agg.data[agg.data$scenario=="6 All",], size=5, aes(label=letter, y=max(tot$datas[tot$scenario=="6 All"])+1,vjust=0)) +
lab("") +
lab(expression(atop("GHG emissions", paste('gram CO'[2]*'-eq Mj'[EtOH])))) +
scale_y_continuous(breaks = seq(-300, 300, 30), limits=c(-30, 120)) +
scale_x_discrete(labels=c("Ref.", "HP",
expression(atop("2"^{nd}*"SC")),
expression(atop("2"^{nd}*"EU")),
"CP", "All")) +
theme(plot.title=element_text(hjust = 0.5),
panel.background = element_blank(),
panel.border = element_rect(colour = "grey", linetype = "solid", size=0.3, fill = NA),
panel.grid.major = element_line(colour = "#E6E6E6", size=0.3),
axis.text = element_text(colour="#585858", size = 13),
axis.title.y = element_text(colour="#585858", size = 12))
#############################################################
# OBS: I couldn't figure out how to use savePlot in linux.
# Therefore I was manually exporting the plots manually save figure
#savePlot(file = 'agb_letters.png', type = "png")
#savePlot(file = 'agb_letters.pdf', type = "pdf")
#savePlot(file = 'species_letters.png', type = "png")
#savePlot(file = 'species_letters.pdf', type = "pdf")
|
0d40720202512cd9fb514b053a4e753303a75c96
|
87fdff0de23e7df2862d9c4cbcb4486a9ab80026
|
/rscripts/final_analysis/finalDendro.R
|
958f687304dda7c94be96a548514096e18796aa6
|
[] |
no_license
|
jillbo1000/Data_Expo_2018
|
1aa1586237d39d6564a2353210a5805f7e8036b9
|
e79a120c014302117ba877f9f945e93970e1a4eb
|
refs/heads/master
| 2020-11-25T00:22:10.267735
| 2020-10-28T14:45:10
| 2020-10-28T14:45:10
| 228,405,526
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,154
|
r
|
finalDendro.R
|
library(dplyr)
library(lubridate)
library(tidyr)
library(cluster)
library(fpc)
library(ggplot2)
library(ggmap)
library(fiftystater)
library(maps)
library(RColorBrewer)
library(gridExtra)
library(grid)
library(ggdendro)
cluster2 <- read.csv("../../data/summary_city.csv")
cluster2$state <- as.character(cluster2$state)
# Help for abbreviations from:
# - https://stackoverflow.com/questions/5411979/state-name-to-abbreviation-in-r
cluster2$stateABB <- "none"
for(i in 1:nrow(cluster2)){cluster2$stateABB[i] <- state.abb[grep(cluster2$state[i], state.name)]}
cluster2$name <- paste(cluster2$city, cluster2$stateABB, sep = ", ")
# Impute Baltimore values with nearest neighbor (Dover, DE).
cluster2$Min_Vis[cluster2$AirPtCd == "KDMH"] <- cluster2$Min_Vis[cluster2$AirPtCd == "KDOV"]
cluster2$Sd_Vis[cluster2$AirPtCd == "KDMH"] <- cluster2$Sd_Vis[cluster2$AirPtCd == "KDOV"]
cluster2$CloudCover[cluster2$AirPtCd == "KDMH"] <- cluster2$CloudCover[cluster2$AirPtCd == "KDOV"]
cluster2$Sd_CloudCover[cluster2$AirPtCd == "KDMH"] <- cluster2$Sd_CloudCover[cluster2$AirPtCd == "KDOV"]
# Impute Austin, NV values with nearest neighbor (Reno)
cluster2$CloudCover[cluster2$AirPtCd == "KP68"] <- cluster2$CloudCover[cluster2$AirPtCd == "KRNO"]
cluster2$Sd_CloudCover[cluster2$AirPtCd == "KP68"] <- cluster2$Sd_CloudCover[cluster2$AirPtCd == "KRNO"]
cluster2$Min_Vis[cluster2$AirPtCd == "KP68"] <- cluster2$Min_Vis[cluster2$AirPtCd == "KRNO"]
cluster2$Sd_Vis[cluster2$AirPtCd == "KP68"] <- cluster2$Sd_Vis[cluster2$AirPtCd == "KRNO"]
cc2 <- cluster2 %>% dplyr::select(-AirPtCd, -city, -state, -stateABB, -name, -longitude, -latitude,
-mxT_sd, -mxT_mean, -mnT_sd, -mnT_mean,
-mxT_sd_abs, -mxT_mean_abs, -mnT_sd_abs, -mnT_mean_abs,
-BSS)
cc2 <- scale(cc2)
d2 <- dist(cc2, method = "euclidean")
c2 <- hclust(d2, method = "ward.D2")
l2 <- cutree(c2, k = 6)
# cluster2$Cluster <- l2
# Define color scheme
set1 <- c("#e41a1c", "#377eb8", "#4daf4a", "#984ea3", "#ff7f00", "#f781bf", "gray50")
set1 <- set1[c(4, 2, 3, 6, 1, 5, 7)] # Reorder to match the original layout
# Code adapted from answer given by "jlhoward" at:
# https://stackoverflow.com/questions/21474388/colorize-clusters-in-dendogram-with-ggplot2
#=============================================================================
dendr <- dendro_data(c2, type="rectangle") # convert for ggplot
dendr[["labels"]]$label <- cluster2$name[as.numeric(as.character(dendr[["labels"]]$label))]
clust.df <- data.frame(label = cluster2$name, cluster=factor(l2))
# dendr[["labels"]] has the labels, merge with clust.df based on label column
dendr[["labels"]] <- merge(dendr[["labels"]],clust.df, by="label")
# plot the dendrogram; note use of color=cluster in geom_text(...)
p1 <- ggplot() +
geom_segment(data=segment(dendr), aes(x=x, y=y, xend=xend, yend=yend)) +
geom_text(data=label(dendr), aes(x, y, label=label, hjust=0, color=cluster),
size=6) +
coord_flip() + scale_y_reverse(expand=c(0, 0), limits = c(40, -15)) +
scale_color_manual(values = set1) +
theme(legend.position = "none",
axis.line=element_blank(),
axis.ticks=element_blank(),
axis.text=element_blank(),
axis.title=element_blank(),
panel.background=element_rect(fill="white"),
panel.grid=element_blank())
#=============================================================================
#=============================================================================
# First, create a set of boxes that we can color differently.
tbox.x <- c(0, 0, 5, 5, 0)
tbox.y <- c(0, 1, 1, 0, 0)
# Now create a series of 6 boxes.
boxes <- data.frame(x = c(tbox.x, tbox.x+5, tbox.x+10, tbox.x+15,
tbox.x+20, tbox.x+25),
y = rep(tbox.y, 6),
group = c(rep(1, 5), rep(2, 5), rep(3, 5),
rep(4, 5), rep(5, 5), rep(6, 5)))
legend <- ggplot(boxes, aes(x = x, y = y)) +
geom_polygon(aes(fill = factor(group)), color = "white") +
coord_fixed() +
scale_color_manual(values = c("#f781bf", "#4daf4a", "#377eb8",
"#e41a1c", "#984ea3", "#ff7f00")) +
scale_fill_manual(values = c("#f781bf", "#4daf4a", "#377eb8",
"#e41a1c", "#984ea3", "#ff7f00")) +
annotate("text", x = 2.5 + c(0, 5, 10, 15, 20, 25),
y = rep(0.5, 6),
label = c("Cali-Florida", "Southeast", "Northeast",
"Intermountain West", "Midwest", "Southwest"),
color = "white", size = c(5.5, 5.5, 5.5, 3.5, 5.5, 5.5)) +
theme(legend.position = "none",
panel.background = element_blank(),
axis.line = element_blank(),
axis.text = element_blank(),
axis.title = element_blank(),
axis.ticks = element_blank())
#=============================================================================
pdf(height = 25, width = 10, file = "../../images/final/paper/finalDendro.pdf")
#grid.arrange(grobs = list(ggplotGrob(p1), ggplotGrob(legend)), heights = c(0.9, 0.1))
p1
dev.off()
|
165d1aa15f82d27d88c5c6a7d437f1c101362fd4
|
69b95cdda3e40b11a7b35a929c54b207c483ea33
|
/tree compare v4.R
|
8081cf4efbfc42c8f506e15e1f04d7e1ba26e357
|
[] |
no_license
|
rulofburger/Machine-learning-examples
|
408d57eadf4d6e1f0016d156037c4f960fcc90b2
|
c2e4af1beb206e28d4b1d5df267cd2c3c1a99fcf
|
refs/heads/master
| 2020-12-02T11:13:56.815559
| 2020-11-03T11:15:02
| 2020-11-03T11:15:02
| 96,615,204
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 33,368
|
r
|
tree compare v4.R
|
#Load libraries
library(leaps)
library(glmnet)
library(class)
library(mlbench)
library(caret)
library(car)
library(rattle)
library(party)
library(dummies)
library(haven)
library(foreach)
library(dplyr)
library(rpart)
library(parallel)
library(doParallel)
Sys.setenv(JAVA_HOME='C:\\Program Files\\Java\\jre1.8.0_131')
#Remove lists from memory and load data
rm(list = ls())
library(haven)
NIDSdata <- read_dta("D:/My Documents/Reading group/Machine learning/R Resources/Data/NIDS/NIDS_data_w1.dta")
save(NIDSdata, file="D:/My Documents/Reading group/Machine learning/R Resources/Data/NIDS/NIDSdata.rda")
load("D:/My Documents/Reading group/Machine learning/R Resources/Data/NIDS/NIDSdata.rda")
attach(NIDSdata, warn.conflicts = F)
# Create hhdata.full, which drops variables that contain many missing values, drops observations with missing values,
# creates a sensibly ordered head.educ variable, and generates dummies for all categorical variables
attr(NIDSdata$head_educ,"labels")
table(NIDSdata$head_educ)
NIDSdata$head_educ = ifelse(NIDSdata$head_educ==25,0,ifelse(NIDSdata$head_educ>=13 & NIDSdata$head_educ<=17,11,ifelse(NIDSdata$head_educ>=21 & NIDSdata$head_educ<=23,NIDSdata$head_educ-6,
ifelse(NIDSdata$head_educ>=18 & NIDSdata$head_educ<=19,13,ifelse(NIDSdata$head_educ==20,13,NIDSdata$head_educ)))))
is.na(NIDSdata$head_educ) = NIDSdata$head_educ <0 | NIDSdata$head_educ == 24
is.na(NIDSdata$head_age) = NIDSdata$head_age < 0
hhdata.full = data.frame(income= log(hhincome/hhsizer),members=hhsizer,rooms=h_dwlrms,dwelling=h_dwltyp,prov=hhprov, roof=h_dwlmatroof, wall=h_dwlmatrwll, dwelling.own=h_ownd, water=h_watsrc, toilet=h_toi,
toilet.share=h_toishr, electricity=h_enrgelec, landline=h_tellnd, cellphone.use=h_telcel, refuse.removal=h_refrem, street.light=h_strlght,
radio=h_ownrad, hifi=h_ownhif, tv=h_owntel, satelite=h_ownsat, video=h_ownvid, computer=h_owncom, camera=h_owncam, cellphone.own=h_owncel, tv.les=h_ownelestv,
gas.stove=h_owngasstv, microwave=h_ownmic, fridge=h_ownfrg, washer=h_ownwsh,district.council=district_council,head.absent=head_absent,head = head_gender - 1, head.educ=head_educ, head.age=head_age)
for (i in 2:dim.data.frame(hhdata.full)[2]) {hhdata.full[,i]=as.integer(hhdata.full[,i])}
for (i in 4:(dim.data.frame(hhdata.full)[2]-2)) {hhdata.full[,i]=as.factor(hhdata.full[,i])}
hhdata.full$income=as.numeric(hhdata.full$income)
lm.fit.full = lm(income~.,data=hhdata.full)
summary(lm.fit.full)
sample.omit = lm.fit.full$na.action
hhdata.full = hhdata.full[-sample.omit,]
attach(hhdata.full, warn.conflicts = F)
sample.exclude = toilet.share ==-8 | toilet.share ==-5 | roof == -5 | roof == -13 |
wall == -13 | dwelling.own == -8 | water ==-8 | toilet == -8 | cellphone.use == -5 |
refuse.removal == -5 | street.light == -3 | radio == -8 | hifi == -8 | satelite == -8 | tv == -8 |
camera == -8 | computer == -8 | video == -8 | cellphone.own == -8 | tv.les == -8 | gas.stove == -8 |
microwave == -8 | fridge == -8 | washer == -8 | dwelling == 5
hhdata.full = hhdata.full[which(sample.exclude==F),]
hhdata.full = droplevels(hhdata.full)
levels(hhdata.full$dwelling) <- c('na', 'house', 'hut', 'flat', 'semidetached', 'backyard.house', 'backyard.shack', 'shack', 'flatlet', 'caravan')
levels(hhdata.full$prov) <- c('Western.Cape', 'Eastern.Cape', 'Northern.Cape', 'Free.State', 'KwaZulu.Natal', 'North.West', 'Gauteng', 'Mpumalanga', 'Limpopo')
levels(hhdata.full$roof) <- c('na','brick','concrete','iron','wood','plastic','cardboard','mudcement','wattle','tile','mudbricks','thatching','sheeting','rock')
levels(hhdata.full$wall) <- c('na','brick','concrete','iron','wood','plastic','cardboard','mudcement','wattle','tile','mudbricks','thatching','sheeting','rock')
levels(hhdata.full$dwelling.own) <- c('yes', 'no')
levels(hhdata.full$water) <- c('na','piped','yard','public','tanker','priv.borehole','com.borehole','rain.tank','stream','dam','well','spring','na','neighbour')
levels(hhdata.full$toilet) <- c('na','na','flush.onsite','flush.offsite','chemical','pit.ventilation','pit.noventilation','bucket','na')
levels(hhdata.full$toilet.share) <- c('na', 'yes', 'no')
levels(hhdata.full$electricity) <- c('na', 'yes', 'no')
levels(hhdata.full$landline) <- c('na', 'working', 'not.working','no')
levels(hhdata.full$cellphone.use) <- c('na', 'yes', 'no')
levels(hhdata.full$refuse.removal) <- c('na', 'yes', 'no')
levels(hhdata.full$street.light) <- c('working', 'not.working','no')
levels(hhdata.full$radio) <- c('na', 'yes', 'no')
levels(hhdata.full$hifi) <- c('na', 'yes', 'no')
levels(hhdata.full$tv) <- c('na', 'yes', 'no')
levels(hhdata.full$satelite) <- c('na', 'yes', 'no')
levels(hhdata.full$video) <- c('na', 'yes', 'no')
levels(hhdata.full$computer) <- c('na', 'yes', 'no')
levels(hhdata.full$camera) <- c('na', 'yes', 'no')
levels(hhdata.full$cellphone.own) <- c('na', 'yes', 'no')
levels(hhdata.full$tv.les) <- c('na', 'yes', 'no')
levels(hhdata.full$gas.stove) <- c('na', 'yes', 'no')
levels(hhdata.full$microwave) <- c('na', 'yes', 'no')
levels(hhdata.full$fridge) <- c('na', 'yes', 'no')
levels(hhdata.full$washer) <- c('na', 'yes', 'no')
dc.labels = gsub(" District Municipality","",names(attr(NIDSdata$district_council,"labels")))
dc.labels=gsub(" Municipality","",dc.labels)
dc.labels=gsub(" Metropolitan","",dc.labels)
dc.labels=gsub("City Of ","",dc.labels)
levels(hhdata.full$district.council) <- dc.labels
levels(hhdata.full$head.absent) <- c('no','yes')
levels(hhdata.full$head) <- c('male', 'female')
hhdata.full = data.frame(hhdata.full[,1:3],hhdata.full[,33:34],dummy.data.frame(hhdata.full[,4:32], sep=".",names = NULL, omit.constants=TRUE, dummy.classes = getOption("dummy.classes")))
attach(hhdata.full, warn.conflicts = F)
compare.rmse = function(model,training,testing) {
cv.train.rmse = model$results$RMSE
cv.train.rmse.sd = model$results$RMSESD
train.rmse = sqrt(mean((training$income - predict(model,training))^2))
test.rmse = sqrt(mean((testing$income - predict(model,testing))^2))
rmse=c(cv.train.rmse,cv.train.rmse.sd,train.rmse,test.rmse)
names(rmse)=c("CV RMSE","CV RMSE Std. Dev.","Training RMSE","Testing RMSE")
print(rmse)
}
set.seed(7)
inTrain = createDataPartition(y=income,p=0.75,list=F)
training= hhdata.full[inTrain,]
testing= hhdata.full[-inTrain,]
formula = income~.
cluster <- makeCluster(detectCores() - 1) # convention to leave 1 core for OS
registerDoParallel(cluster)
#Linear regression (lm)
set.seed(123)
seeds <- vector(mode = "list", length = 11)
for(i in 1:11) seeds[[i]] <- sample.int(1000, 1)
fitControl <- trainControl(method = "cv", number = 10, verboseIter = TRUE, allowParallel = TRUE,seeds=seeds)
lm.fit.time = system.time(lm.fit <- train(formula, data=training,method='lm',trControl=fitControl))
lm.fit.size = format(object.size(lm.fit),units="Mb")
lm.fit.rmse=compare.rmse(lm.fit,training,testing)
lm.fit.tab = c(round(lm.fit.time[3],2),lm.fit.size)
names(lm.fit.tab) = c("Time to run (sec)","Size to store (MB)")
lm.plot=plot(varImp(lm.fit))
summary(lm.fit$finalModel)
lm.fit.tab
lm.fit.rmse
# OLS achieves an R-squared of 0.61 and a CV RMSE of 0.77. Testing RMSE is insignificantly higher. Can any of the trees do better?
# The most significant predictors (p < 0.001) are: all the continuous variables (rooms, members, head.educ, head.age),
# an amenity (landline), assets (hifi1, satelite1, video1, computer1,camera1, microwave1, washer1), some DCs, and female headship.
# These are the same variables that show up in the varImp function, since variable importance is derived from the t-statistics.
#Regression tree (rpart)
#Let's start by running a simple regression tree using rpart and conservative parameter values
set.seed(123)
rpart.simple.time = system.time(rpart.simple <- rpart(income ~ ., method="anova", data=training,control=rpart.control(minsplit = 20, minbucket = round(20/3), cp = 0.03,
maxcompete = 4, maxsurrogate = 5, usesurrogate = 2, xval = 10,
surrogatestyle = 0, maxdepth = 30)))
rpart.simple.size = format(object.size(rpart.simple),units="Mb")
rpart.simple.tab = c(round(rpart.simple.time[3],2),rpart.simple.size)
names(rpart.simple.tab) = c("Time to run (sec)","Size to store (MB)")
# Options minsplit, minbucket, cp and maxdepth are used to control the complexity of the model.
# maxcompete stores the results for all competing splits that were nearly enacted. Since the model investigates all possible split
# this paramter has no effect on running time. maxsurrogate specificies how many surrogates to explore for every primary split, which
# will determine which branch to use for observations that are missing the primary split variable, this does slow down the function.
# usersurrogate determines how to apply the information from the surrogates to allocating variables with missing split variables. xval
# determines the number of folds for cross-validation, which also slows down the function.
fancyRpartPlot(rpart.simple,main="Regression Tree for NIDS Income")
# The sample is split four times, which leads to 5 terminal nodes.
# Wealthy households either have a computer, or they have 2 or fewer members and a washing machine
# Poor households have no computer or microwave and 3 or more members
# Middle income households have no computer, and either have 3 or more members and a microwave or 2 or fewer members and no washer
summary(rpart.simple)
# Without any conditioning variables, the sample has a MSE of MSE=1.399056 and a SST = MSE*N = 6550.38.
# The MSE is also called the root node error and the TSS is also sometimes referred to as the deviance.
# The sum of the RSS in the two nodes after the first split is 4518.7630 + 588.3661 = 5107.1291.
# Expressed as a proportion of the root SST, # this gives the rel error = 0.7797 associated with the first split. T
# he CP associated with this split is the relative reduction in the rel error of 0.2203.
# This was the largest possible reduction in the rel error rate.
# The second split occurs in the left son of the first split, where the RSS is 4518.7630. It produces in its sons RSS's of
# 2095.1860 and 1606.0200 respectively, or 3701.206 in total. The reduction in the RSS is 4518.7630 - 3701.206 = 817.557,
# which is 0.1248 when expressed as a proportion of the original TSS. This is the cp associated with the second split.
rpart.simple
printcp(rpart.simple)
#This table shows the rel errors and cp values associated with different splits, as well as the cv rel error values and their std deviations
plotcp(rpart.simple)
#A horizontal line is drawn 1SE above the minimum of the curve, which can assist in choosing the 1SE cp value (providing the minimum value appears on the graph)
set.seed(123)
seeds <- vector(mode = "list", length = 11)
for(i in 1:10) seeds[[i]] <- sample.int(1000, 10)
seeds[[101]] <- sample.int(1000, 1)
fitControl <- trainControl(method = "cv", number = 10, verboseIter = TRUE, allowParallel = TRUE,seeds=seeds)
rpart.fit0 <- train(formula, data=training, method = 'rpart', trControl=fitControl, tuneLength = 10)
rpart.fit0
# The output shows that the RMSE is increasing for all candidate values of cp between 0.007181379 and 0.220330880
# that the function chose for us, so we should also tune over smaller values.
# The fact that we are off the usual grid seems to suggest that our model calls for more larger trees than are usually optimal.
stopCluster(cluster)
registerDoSEQ()
library(mlr)
library(mlrMBO)
library(parallelMap)
regr.task = makeRegrTask(id = "NIDS income regression with rpart", data = training, target = "income")
measure = rmse
mbo.ctrl = makeMBOControl()
mbo.ctrl = setMBOControlInfill(mbo.ctrl, crit = crit.ei)
mbo.ctrl = setMBOControlTermination(mbo.ctrl, max.evals = 25L)
getParamSet("regr.rpart")
regr.lrn = makeLearner("regr.rpart")
rpart.num_ps = makeParamSet(
makeNumericParam("cp", lower = 0, upper = 0.2),
makeIntegerParam("maxdepth", lower = 1, upper = 30),
makeIntegerParam("minsplit", lower = 1, upper = 100)
)
design.mat = generateRandomDesign(n = 300, par.set = rpart.num_ps)
ctrl = makeTuneControlMBO(mbo.control = mbo.ctrl, mbo.design = design.mat)
set.seed(123, "L'Ecuyer")
parallelStartSocket(min(6,detectCores()-1))
tune.pars.rpart = tuneParams(learner = regr.lrn, task = regr.task, resampling = cv10,
measures = rmse, par.set = rpart.num_ps, control = ctrl, show.info = TRUE)
#Result: cp=0.00101; maxdepth=25; minsplit=18 : rmse.test.rmse=0.811
parallelStop()
detach("package:mlrMBO", unload=TRUE)
detach("package:mlr", unload=TRUE)
cluster <- makeCluster(detectCores() - 1) # convention to leave 1 core for OS
registerDoParallel(cluster)
set.seed(123)
seeds <- vector(mode = "list", length = 11)
for(i in 1:11) seeds[[i]] <- sample.int(1000, 1)
fitControl <- trainControl(method = "cv", number = 10, verboseIter = TRUE, allowParallel = TRUE,seeds=seeds)
rpart.fit.time = system.time(rpart.fit <- train(formula, data=training, method = 'rpart', control= rpart.control(minsplit = 18, maxdepth = 25),trControl=fitControl,tuneGrid=expand.grid(cp=0.00101)))
rpart.fit.size = format(object.size(rpart.fit),units="Mb")
rpart.fit.rmse=compare.rmse(rpart.fit,training,testing)
rpart.fit.tab = c(round(rpart.fit.time[3],2),rpart.fit.size)
names(rpart.fit.tab) = c("Time to run (sec)","Size to store (MB)")
plot(varImp(rpart.fit))
rpart.fit.plot=plot(varImp(rpart.fit))
varImp(rpart.fit)
rpart.fit$finalModel
fancyRpartPlot(rpart.fit$finalModel,main="Regression Tree for NIDS Income")
setNames(as.data.frame(table(predict(rpart.fit,training))), "")
#Regression tree (ctree)
# Regression trees (like those estimated with rpart) suffer from selection bias: predictors with a higher number of distinct values are
# favored over more granular predictors. This issue is not addressed by fixing the tuning parameters, so makes the splitting algorithm
# vulnerbale to selecting continious or multivalued ordinal noise variables. It is worth noting that the rpart function above included all
# the continuous variables amongst the most important predictors. The conditional inference tree estimator attempts to address this by
# using hypothesis testing of difference in the post-split means - corrected within each predictor for multiple comparisons - before enacting
# any splits. The p-value for this test is 1 - the mincriterion parameter, so these values would typically be between 0.75 and 0.99.
set.seed(123)
seeds <- vector(mode = "list", length = 11)
for(i in 1:10) seeds[[i]] <- sample.int(1000, 10)
seeds[[11]] <- sample.int(1000, 1)
fitControl <- trainControl(method = "cv", number = 10, verboseIter = TRUE, allowParallel = TRUE,seeds=seeds)
ctree.fit0 <- train(formula, data=training,method='ctree',trControl=fitControl, tuneLength = 10)
ctree.fit0
# The RMSE are fairly similiar across a wide range of the mincriterion
stopCluster(cluster)
registerDoSEQ()
library(mlr)
library(mlrMBO)
getParamSet("regr.ctree")
regr.lrn = makeLearner("regr.ctree")
ctree.num_ps = makeParamSet(
makeNumericParam("mincriterion", lower = 0, upper = 1),
makeIntegerParam("maxdepth", lower = 1, upper = 30),
makeIntegerParam("minsplit", lower = 1, upper = 100)
)
design.mat = generateRandomDesign(n = 300, par.set = ctree.num_ps)
ctrl = makeTuneControlMBO(mbo.control = mbo.ctrl, mbo.design = design.mat)
set.seed(123, "L'Ecuyer")
parallelStartSocket(min(6,detectCores()-1))
tune.pars.rpart = tuneParams(learner = regr.lrn, task = regr.task, resampling = cv10,
measures = rmse, par.set = ctree.num_ps, control = ctrl, show.info = TRUE)
#mincriterion=0.0379; maxdepth=26; minsplit=86 : rmse.test.rmse=0.806
parallelStop()
detach("package:mlrMBO", unload=TRUE)
detach("package:mlr", unload=TRUE)
cluster <- makeCluster(detectCores() - 1)
registerDoParallel(cluster)
set.seed(123)
seeds <- vector(mode = "list", length = 11)
for(i in 1:11) seeds[[i]] <- sample.int(1000, 1)
fitControl <- trainControl(method = "cv", number = 10, verboseIter = TRUE, allowParallel = TRUE,seeds=seeds)
ctree.fit.time = system.time(ctree.fit <- train(formula, data=training, method = 'ctree', trControl=fitControl,controls= ctree_control(minsplit = 18, maxdepth = 25),tuneGrid=expand.grid(mincriterion=0.0379)))
ctree.fit.size = format(object.size(ctree.fit),units="Mb")
ctree.fit.rmse=compare.rmse(ctree.fit,training,testing)
ctree.fit.tab = c(round(ctree.fit.time[3],2),ctree.fit.size)
names(ctree.fit.tab) = c("Time to run (sec)","Size to store (MB)")
ctree.fit.plot=plot(varImp(ctree.fit))
varImp(ctree.fit)
# M5 = Regression model tree
set.seed(123)
seeds <- vector(mode = "list", length = 11)
for(i in 1:10) seeds[[i]] <- sample.int(1000, 8)
seeds[[11]] <- sample.int(1000, 1)
fitControl <- trainControl(method = "cv", number = 10, verboseIter = TRUE, allowParallel = TRUE,seeds=seeds)
M5.fit0 <- train(formula, data=training,method='M5',trControl = fitControl,control = Weka_control(M = 10))
#M option represents minbucket
set.seed(123)
seeds <- vector(mode = "list", length = 11)
for(i in 1:11) seeds[[i]] <- sample.int(1000,1)
fitControl <- trainControl(method = "cv", number = 10, verboseIter = TRUE, allowParallel = TRUE,seeds=seeds)
M5.fit.time = system.time(M5.fit <- train(formula, data=training,method='M5',trControl=trainControl(method = "cv",number = 5, allowParallel = F),tuneGrid=expand.grid(pruned = "Yes", smoothed = c("Yes"), rules = c("No"))))
M5.fit.size = format(object.size(M5.fit),units="Mb")
M5.fit.rmse=compare.rmse(M5.fit,training,testing)
M5.fit.tab = c(round(M5.fit.time[3],2),M5.fit.size)
names(M5.fit.tab) = c("Time to run (sec)","Size to store (MB)")
plot(varImp(M5.fit))
varImp(M5.fit)
# mlr does not have an M5 model, but the mob function seems comparable
stopCluster(cluster)
registerDoSEQ()
library(mlr)
library(mlrMBO)
getParamSet("regr.mob")
regr.lrn = makeLearner("regr.mob")
mob.num_ps = makeParamSet(
# makeNumericParam("alpha", lower = 0, upper = 0.2),
makeNumericParam("trim", lower = 0, upper = 0.2)
# makeIntegerParam("minsplit", lower = 1, upper = 100)
)
design.mat = generateRandomDesign(n = 100, par.set = mob.num_ps)
ctrl = makeTuneControlMBO(mbo.control = mbo.ctrl, mbo.design = design.mat)
set.seed(123, "L'Ecuyer")
parallelStartSocket(min(6,detectCores()-1))
# Could not get this to work
#tune.pars.rpart = tuneParams(learner = regr.lrn, task = regr.task, resampling = cv10,
# measures = rmse, par.set = mob.num_ps, control = ctrl, show.info = TRUE)
parallelStop()
detach("package:mlrMBO", unload=TRUE)
detach("package:mlr", unload=TRUE)
# evtree
set.seed(123)
seeds <- vector(mode = "list", length = 11)
for(i in 1:10) seeds[[i]] <- sample.int(1000, 10)
seeds[[11]] <- sample.int(1000, 1)
fitControl <- trainControl(method = "cv", number = 10, verboseIter = TRUE, allowParallel = TRUE,seeds=seeds)
evtree.fit0 <- train(formula, data=training,method='evtree',trControl = fitControl, tuneLength = 10)
stopCluster(cluster)
registerDoSEQ()
library(mlr)
library(mlrMBO)
getParamSet("regr.evtree")
regr.lrn = makeLearner("regr.evtree")
evtree.num_ps = makeParamSet(
makeNumericParam("alpha", lower = 0, upper = 0.2),
makeIntegerParam("maxdepth", lower = 1, upper = 30),
makeIntegerParam("minsplit", lower = 1, upper = 100),
makeIntegerParam("niterations", lower = 100, upper = 10000),
makeIntegerParam("ntrees", lower = 10, upper = 200)
)
design.mat = generateRandomDesign(n = 100, par.set = evtree.num_ps)
ctrl = makeTuneControlMBO(mbo.control = mbo.ctrl, mbo.design = design.mat)
set.seed(123, "L'Ecuyer")
parallelStartSocket(min(6,detectCores()-1))
# This took about a day to run and then produced an error
#tune.pars.rpart = tuneParams(learner = regr.lrn, task = regr.task, resampling = cv10,
# measures = rmse, par.set = evtree.num_ps, control = ctrl, show.info = TRUE)
parallelStop()
detach("package:mlrMBO", unload=TRUE)
detach("package:mlr", unload=TRUE)
cluster <- makeCluster(detectCores() - 1)
registerDoParallel(cluster)
#Bagged regression tree (treebag)
# No tuning parameters
set.seed(123)
seeds <- vector(mode = "list", length = 11)
for(i in 1:11) seeds[[i]] <- sample.int(1000,1)
fitControl <- trainControl(method = "cv", number = 10, verboseIter = TRUE, allowParallel = TRUE,seeds=seeds)
treebag.fit.time <- system.time(treebag.fit <- train(formula, data=training, method = 'treebag', trControl=fitControl))
treebag.fit.size = format(object.size(treebag.fit),units="Mb")
treebag.fit.rmse=compare.rmse(treebag.fit,training,testing)
treebag.fit.tab = c(round(treebag.fit.time[3],2),treebag.fit.size)
names(treebag.fit.tab) = c("Time to run (sec)","Size to store (MB)")
plot(varImp(treebag.fit))
varImp(treebag.fit)
#Random forest
set.seed(123)
seeds <- vector(mode = "list", length = 11)
for(i in 1:10) seeds[[i]] <- sample.int(100, 10)
seeds[[11]] <- sample.int(100, 1)
fitControl <- trainControl(method = "cv", number = 10, verboseIter = TRUE, allowParallel = TRUE,seeds=seeds)
rf.fit0 <- train(formula, data=training,method='rf',trControl=fitControl, tuneLength = 10)
rf.fit0
#mtry=43->RMSE=0.7211
stopCluster(cluster)
registerDoSEQ()
library(mlr)
library(mlrMBO)
getParamSet("regr.randomForest")
regr.lrn = makeLearner("regr.randomForest")
rf.num_ps = makeParamSet(
makeIntegerParam("mtry", lower = 2, upper = 188),
makeIntegerParam("ntree", lower = 100, upper = 3000),
makeIntegerParam("nodesize", lower = 5, upper = 200)
)
design.mat = generateRandomDesign(n = 100, par.set = rf.num_ps)
ctrl = makeTuneControlMBO(mbo.control = mbo.ctrl, mbo.design = design.mat)
set.seed(123, "L'Ecuyer")
parallelStartSocket(min(6,detectCores()-1))
tune.pars.rf = tuneParams(learner = regr.lrn, task = regr.task, resampling = cv10,
measures = rmse, par.set = rf.num_ps, control = ctrl, show.info = TRUE)
#mtry=80; ntree=1086; nodesize=5 : rmse.test.rmse=0.72
parallelStop()
detach("package:mlrMBO", unload=TRUE)
detach("package:mlr", unload=TRUE)
cluster <- makeCluster(detectCores() - 1)
registerDoParallel(cluster)
set.seed(123)
seeds <- vector(mode = "list", length = 11)
for(i in 1:11) seeds[[i]] <- sample.int(1000, 1)
fitControl <- trainControl(method = "cv", number = 10, verboseIter = TRUE, allowParallel = F,seeds=seeds)
rf.fit.time = system.time(rf.fit <- train(formula, data=training, method = 'rf', trControl=fitControl,tuneGrid=expand.grid(mtry=43),importance = TRUE))
rf.fit.size = format(object.size(rf.fit),units="Mb")
rf.fit.rmse=compare.rmse(rf.fit,training,testing)
rf.fit.tab = c(round(rf.fit.time[3],2),rf.fit.size)
names(rf.fit.tab) = c("Time to run (sec)","Size to store (MB)")
plot(varImp(rf.fit))
varImp(rf.fit)
#Boosted regression tree (gbm)
#Stochastic Gradient Boosting
set.seed(123)
seeds <- vector(mode = "list", length = 11)
for(i in 1:10) seeds[[i]] <- sample.int(1000, 10)
seeds[[11]] <- sample.int(1000, 1)
fitControl <- trainControl(method = "cv", number = 10, verboseIter = TRUE, allowParallel = T,seeds=seeds)
gbm.fit0 <- train(formula, data=training,method='gbm',trControl=fitControl, tuneLength=10)
gbm.fit0
# n.trees = 350, interaction.depth = 4, shrinkage = 0.1 and n.minobsinnode = 10 -> RMSE = 0.6974879
stopCluster(cluster)
registerDoSEQ()
library(mlr)
library(mlrMBO)
getParamSet("regr.gbm")
regr.lrn = makeLearner("regr.gbm")
gbm.num_ps = makeParamSet(
makeIntegerParam("interaction.depth", lower = 1, upper = 20),
makeIntegerParam("n.trees", lower = 100, upper = 500),
makeIntegerParam("n.minobsinnode", lower = 5, upper = 200),
makeNumericParam("shrinkage", lower = 0, upper = 0.2)
)
design.mat = generateRandomDesign(n = 100, par.set = gbm.num_ps)
ctrl = makeTuneControlMBO(mbo.control = mbo.ctrl, mbo.design = design.mat)
set.seed(123, "L'Ecuyer")
parallelStartSocket(min(6,detectCores()-1))
tune.pars.rf = tuneParams(learner = regr.lrn, task = regr.task, resampling = cv10,
measures = rmse, par.set = gbm.num_ps, control = ctrl, show.info = TRUE)
#interaction.depth=19; n.trees=467; n.minobsinnode=16; shrinkage=0.0173 : rmse.test.rmse=0.692
parallelStop()
detach("package:mlrMBO", unload=TRUE)
detach("package:mlr", unload=TRUE)
cluster <- makeCluster(detectCores() - 1)
registerDoParallel(cluster)
set.seed(123)
seeds <- vector(mode = "list", length = 11)
for(i in 1:11) seeds[[i]] <- sample.int(1000, 1)
fitControl <- trainControl(method = "cv", number = 10, verboseIter = TRUE, allowParallel = F,seeds=seeds)
Grid <- expand.grid(n.trees = c(467), interaction.depth = c(19), shrinkage = c(0.0173),n.minobsinnode = c(16))
gbm.fit.time <- system.time(gbm.fit <- train(formula, data=training, method = 'gbm', trControl=fitControl,tuneGrid=Grid,metric='RMSE'))
gbm.fit.size = format(object.size(gbm.fit),units="Mb")
gbm.fit.rmse=compare.rmse(gbm.fit,training,testing)
gbm.fit.tab = c(round(gbm.fit.time[3],2),gbm.fit.size)
names(gbm.fit.tab) = c("Time to run (sec)","Size to store (MB)")
plot(varImp(gbm.fit))
varImp(gbm.fit)
#Extreme gradient boosting (xbgTree)
set.seed(123)
seeds <- vector(mode = "list", length = 11)
for(i in 1:10) seeds[[i]] <- sample.int(1000, 400)
seeds[[11]] <- sample.int(1000, 1)
fitControl <- trainControl(method = "cv", number = 10, verboseIter = TRUE, allowParallel = T,seeds=seeds)
xgbTree.fit0 <- train(formula, data=training,method='xgbTree',trControl=fitControl, tuneLength=10)
xgbTree.fit0
#nrounds = 250, max_depth = 2, eta = 0.3, gamma = 0, colsample_bytree = 0.8, min_child_weight = 1 and subsample = 1
stopCluster(cluster)
registerDoSEQ()
library(mlr)
library(mlrMBO)
getParamSet("regr.xgboost")
regr.lrn = makeLearner("regr.xgboost")
regr.task = makeRegrTask("Xgboost with NIDS data", data = as.data.frame(lapply(training, as.numeric)), target = "income")
# Important: Xgboost in mlr does not like dummy variables; have to force them to be numeric.
xgboost.num_ps = makeParamSet(
makeIntegerParam(id = "nrounds", lower = 1,upper = 80),
makeIntegerParam(id = "max_depth", lower = 2, upper = 15),
makeNumericParam(id = "eta", lower = .01, upper = .4)
)
design.mat = generateRandomDesign(n = 300, par.set = xgboost.num_ps)
ctrl = makeTuneControlMBO(mbo.control = mbo.ctrl, mbo.design = design.mat)
set.seed(123, "L'Ecuyer")
parallelStartSocket(min(6,detectCores()-1))
tune.pars.rpart = tuneParams(learner = regr.lrn, task = regr.task, resampling = cv10,
measures = rmse, par.set = xgboost.num_ps, control = ctrl, show.info = TRUE)
#nrounds=76; max_depth=3; eta=0.181 : rmse.test.rmse=0.706
parallelStop()
detach("package:mlrMBO", unload=TRUE)
detach("package:mlr", unload=TRUE)
cluster <- makeCluster(detectCores() - 1)
registerDoParallel(cluster)
set.seed(123)
seeds <- vector(mode = "list", length = 11)
for(i in 1:11) seeds[[i]] <- sample.int(1000, 1)
fitControl <- trainControl(method = "cv", number = 10, verboseIter = TRUE, allowParallel = F,seeds=seeds)
Grid <- expand.grid(nrounds = c(76), max_depth = c(3), eta = c(0.181),gamma = 0, colsample_bytree = 0.8, min_child_weight = 1, subsample = 1)
xgbTree.fit.time <- system.time(xgbTree.fit <- train(formula, data=training, method = 'xgbTree', trControl=fitControl,tuneGrid=Grid,metric='RMSE'))
xgbTree.fit.size = format(object.size(xgbTree.fit),units="Mb")
xgbTree.fit.rmse=compare.rmse(xgbTree.fit,training,testing)
xgbTree.fit.tab = c(round(xgbTree.fit.time[3],2),xgbTree.fit.size)
names(xgbTree.fit.tab) = c("Time to run (sec)","Size to store (MB)")
plot(varImp(xgbTree.fit))
varImp(xgbTree.fit)
# The parameters trained in caret perform better on the CV RMSE, whereas those trained in MLR do better on the testing RMSE.
# The following functions all worked, and should be further explored:
set.seed(123)
cforest.fit0 <- train(formula, data=training,method='cforest',trControl=trainControl(method = "cv",number = 5, allowParallel = F))
set.seed(123)
blackboost.fit0 <- train(formula, data=training,method='blackboost',trControl=trainControl(method = "cv",number = 5, allowParallel = F))
set.seed(123)
cubist.fit0 <- train(formula, data=training,method='cubist',trControl=trainControl(method = "cv",number = 5, allowParallel = F))
set.seed(123)
bstTree.fit0 <- train(formula, data=training,method='bstTree',trControl=trainControl(method = "cv",number = 5, allowParallel = F))
set.seed(123)
evtree.fit0 <- train(formula, data=training,method='evtree',trControl=trainControl(method = "cv",number = 5, allowParallel = F))
set.seed(123)
nodeHarvest.fit0 <- train(formula, data=training,method='nodeHarvest',trControl=trainControl(method = "cv",number = 5, allowParallel = F))
#Neural nets
set.seed(123)
nnet.fit0 <- train(formula, data=training,method='nnet',trControl=trainControl(method = "cv",number = 5, allowParallel = F))
set.seed(123)
brnn.fit0 <- train(formula, data=training,method='brnn',trControl=trainControl(method = "cv",number = 5, allowParallel = F))
set.seed(123)
brnn.fit0 <- train(formula, data=training,method='brnn',trControl=trainControl(method = "cv",number = 5, allowParallel = F))
# These functions did not work:
set.seed(123)
bartMachine.fit0 <- train(formula, data=training,method='bartMachine',trControl=trainControl(method = "cv",number = 5, allowParallel = F))
set.seed(123)
gbm_h2o.fit0 <- train(formula, data=training,method='gbm_h2o',trControl=trainControl(method = "cv",number = 5))
# Doesnt work on continuous outcomes
#set.seed(123)
#oblique.fit0 <- train(formula, data=training,method='oblique.tree',trControl = trainControl(method = "cv", number = 5, allowParallel = TRUE))
#Other trees
bagging:
Boruta (doesnt seem to work with caret)
classbagg
not sure:
nodeHarvest
fitControl <- trainControl(method = "repeatedcv", number = 2,repeats = 2,verboseIter = TRUE)
set.seed(123)
fit.bartMachine.time <- system.time(fit.bartMachine <- train(formula, data=training, method = 'bartMachine', trControl=fitControl,metric='RMSE'))
fit.bartMachine.size = object.size(fit.bartMachine)
fit.bartMachine.rmse=compare.rmse(fit.bartMachine,training,testing)
set.seed(123)
fit.blackboost.time <- system.time(fit.blackboost <- train(formula, data=training, method = 'blackboost', trControl=fitControl,metric='RMSE'))
fit.blackboost.size = object.size(fit.blackboost)
fit.blackboost.rmse=compare.rmse(fit.blackboost,training,testing)
set.seed(123)
fit.M5.time <- system.time(fit.M5 <- train(formula, data=training, method = 'rpart', trControl=fitControl,metric='RMSE'))
fit.M5.size = object.size(fit.M5)
fit.M5.rmse=compare.rmse(fit.M5,training,testing)
set.seed(123)
1
fit.bstTree.size = object.size(fit.bstTree)
fit.bstTree.rmse=compare.rmse(fit.bstTree,training,testing)
set.seed(123)
fit.gbm_h2o.time <- system.time(fit.gbm_h2o <- train(formula, data=training, method = 'gbm_h2o', trControl=fitControl,metric='RMSE'))
fit.gbm_h2o.size = object.size(fit.gbm_h2o)
fit.gbm_h2o.rmse=compare.rmse(fit.gbm_h2o,training,testing)
set.seed(123)
fit.Boruta.time <- system.time(fit.Boruta <- train(formula, data=training, method = 'Boruta', trControl=fitControl,metric='RMSE'))
fit.Boruta.size = object.size(fit.Boruta)
fit.Boruta.rmse=compare.rmse(fit.Boruta,training,testing)
set.seed(123)
fit.evtree.time <- system.time(fit.evtree <- train(formula, data=training, method = 'evtree', trControl=fitControl,metric='RMSE'))
fit.evtree.size = object.size(fit.evtree)
fit.evtree.rmse=compare.rmse(fit.evtree,training,testing)
set.seed(123)
fit.nodeHarvest.time <- system.time(fit.nodeHarvest <- train(formula, data=training, method = 'nodeHarvest', trControl=fitControl,metric='RMSE'))
fit.nodeHarvest.size = object.size(fit.nodeHarvest)
fit.nodeHarvest.rmse=compare.rmse(fit.nodeHarvest,training,testing)
set.seed(123)
fit.gbm.time <- system.time(fit.gbm <- train(formula, data=training, method = 'gbm', trControl=fitControl,metric='RMSE'))
fit.gbm.size = object.size(fit.gbm)
fit.gbm.rmse=compare.rmse(fit.gbm,training,testing)
#BartMachine
set.seed(123)
#seeds <- vector(mode = "list", length = 11)
#for(i in 1:10) seeds[[i]] <- sample.int(100, 10000)
#seeds[[11]] <- sample.int(100, 1)
#fitControl <- trainControl(method = "cv", number = 10, verboseIter = TRUE, allowParallel = TRUE,seeds=seeds)
fitControl <- trainControl(method = "cv", number = 10, verboseIter = TRUE, allowParallel = F)
bartMachine.fit0 <- train(formula, data=training,method='bartMachine',trControl=fitControl, tuneLength = 10)
bartMachine.fit0
#Boosted regression tree (gamboost)
set.seed(123)
Grid <- expand.grid(mstop=seq(100),prune=c(5))
fit.gamboost.time <- system.time(fit.gamboost <- train(formula, data=training, method = 'gamboost', trControl=fitControl,tuneGrid=Grid,metric='RMSE'))
fit.gamboost.size = object.size(fit.gamboost)
fit.gamboost.rmse=compare.rmse(fit.gamboost,training,testing)
plot(varImp(fit.gamboost))
fit.lm.rmse
fit.ctree.rmse
fit.ctree2.rmse
fit.rpart.rmse
fit.treebag.rmse
fit.bagEarth.rmse
fit.rf.rmse
fitControl <- trainControl(method = "repeatedcv", number = 2,repeats = 2,verboseIter = TRUE)
set.seed(123)
cubist.fit0 <- train(formula, data=training,method='cubist',trControl=fitControl)
|
53060a022d6e94fd02b4942107ecae31a7396e6b
|
da53bc94f4a868133940aedf6cf89051b5096f0b
|
/download.monex.R
|
a6d88decc4400613453153b2dddfbbc6aaf23d3b
|
[] |
no_license
|
pvillamichel/Shiny-Application-and-Reproducible-Pitch
|
c86ed82b0888955949d74dd25aee61ac9f0b4854
|
09b53401cdd13dfa2cb4081944e75c73946efd75
|
refs/heads/master
| 2020-04-15T13:09:56.570244
| 2019-01-08T18:06:22
| 2019-01-08T18:06:22
| 164,705,158
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,230
|
r
|
download.monex.R
|
download.monex<- function() {
library(XML)
url<-paste("http://indicadoreseconomicos.bccr.fi.cr/indicadoreseconomicos/Cuadros/frmVerCatCuadro.aspx?CodCuadro=770&Idioma=1&FecInicial=2018/12/29&FecFinal=2100/12/31&Exportar=True", sep="")
DATA <- readHTMLTable(url, stringsAsFactors=FALSE, header=FALSE, dec=",")$Table9
DATA<-as.data.frame(t(DATA)[-1,5:21][,-c(2,9:15)])
NAMES<-c("Date", "Open", "Close", "Low", "High", "Mean", "WA", "Volume", "Num")
colnames(DATA)<-NAMES
rownames(DATA)<-NULL
DATA<-as.data.frame(apply(DATA, 2, function(x) (as.character(x))))
DATA[,2:9]<-apply(DATA[,2:9], 2, function(x) as.numeric(sub(",",".",x)))
DATA<- DATA[DATA$Volume>0,]
DATA<- DATA[complete.cases(DATA),]
MONTHS <- c("Ene", "Feb", "Mar", "Abr", "May", "Jun" , "Jul", "Ago", "Set", "Oct", "Nov", "Dic")
DATA$Date <- gsub(" ", "-", DATA$Date)
for (i in 1:12) {
DATA$Date<-gsub(MONTHS[i], i, DATA$Date)
}
DATA$Date <- as.Date(DATA$Date, "%d-%m-%Y")
return(DATA)
}
|
a9adf3bbb663d1ac154c7fc30276a02d33b82ce7
|
82998c05982ed9ec5cf3b640ac9608df31b4cd0e
|
/psychmeta.R
|
0682adb2941776a6d163ffe451a6114a021d36a4
|
[] |
no_license
|
YiLalaWang/Meta-analysis-R-codes
|
1a65274c705250da0a82dfda8efd831da6cab64a
|
5a61dcd80bd20b8ecceae3a15bff36c8e3df3300
|
refs/heads/master
| 2021-08-07T20:57:02.390721
| 2020-04-12T07:28:00
| 2020-04-12T07:28:00
| 150,800,006
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,449
|
r
|
psychmeta.R
|
######################################################################
########### R code for psychometric meta-analysis main effect ########
## R code that contains the function for:
# (1) doing meta-analysis corrected for measurement errors in X & Y and weighted by sample size;
# (a) Individual corrections are used;
# (b) Missing reliabilities are imputed with the sample-size weighted mean of the full distribution of artifacts
# Although this imputation method is not recommended by default,
# it saves lots of computation compared to
# other methods such as those based on bootstrapping or simulation.
# (c) 95% CI and 90% CV are produced for the meta-analysis.
# (2) organizing all meta-analytic results into table format.
## The calculation requires preinstalling the psychmeta and metafor packages.
## "rawdata" is the original data files for meta-analysis that should include at least the following:
# (1) a column for effect size r, entitled "r";
# (2) a column for sample size n, entitled "n";
# (3) two columns for reliabilities of the correlated constructs, entitled as "rxx" and "ryy".
## At least 3 effect sizes (i.e., K>=3) is required to produce results.
## Function would return null if K<3.
psychmeta=function(rawdata){
rawdata=data.frame(rawdata)
require(psychmeta)
if (nrow(rawdata)<3) {return(NULL)
} else {
res.table=ma_r(data=rawdata,ma_method="ic",rxyi=r,n=n,rxx=rxx, ryy=ryy,
wt_type="sample_size", correct_rxx=TRUE, correct_ryy=TRUE,
control=control_psychmeta(error_type="sample",conf_level=.95, cred_level=.9,
conf_method="norm",cred_method="norm", var_unbiased=TRUE,
pairwise_ads=FALSE, residual_ads=FALSE,impute_artifacts=TRUE,
impute_method="wt_mean_full"))$meta_tables$`analysis_id: 1`$individual_correction$true_score
res.table=round(res.table,2)
res.table[,c(1:2)]=round(res.table[,c(1:2)],0)
require(metafor)
rawdata.rescale=escalc(measure="ZCOR",ri=r,ni=n,data=rawdata)
x=rma(measure="ZCOR",yi,vi,data=rawdata.rescale,level=95)
res.table[1,"I2"]=round(x$I2,2)
res.table[1,"Q(df,p)"]=paste(round(x$QE,2),"(",x$k-x$m,",",round(x$QEp,2),")")
return(res.table)}
}
|
ac72e0e2728d469ff91147cc25531c6fe49414bc
|
3ec87311971341154e85a46e79db410b568e7b37
|
/R/tskeyvalparser-package.R
|
cdefeba81c30caa2146b6f32f55c98fd9f223817
|
[
"MIT"
] |
permissive
|
ingonader/tskeyvalparser
|
a4a6209b3339475dec911b7e9d0cc3cbe2f5d6d5
|
3570df87d020d4cd1b3e33600ea8864f5a2897d8
|
refs/heads/master
| 2020-03-27T15:48:48.166688
| 2018-09-25T16:59:53
| 2018-09-25T16:59:53
| 146,741,116
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 338
|
r
|
tskeyvalparser-package.R
|
#' \code{tskeyvalparser} package
#'
#' Parses key-value pairs in a time-series text file.
#'
#' @docType package
#' @name tskeyvalparser
#' @importFrom magrittr %>%
#' @keywords internal
"_PACKAGE"
NULL
## quiets concerns of R CMD check re: the .'s that appear in pipelines
if(getRversion() >= "2.15.1") utils::globalVariables(c("."))
|
1d5582138a629774645a78b018a0b767c9a01d3e
|
5b657979777378f26d4de317723c59b11b84cd96
|
/R/OldVersions/JaguarPrepDraft.r
|
43e15c5894cf7eba6a02fe816076fb2effb0b741
|
[] |
no_license
|
piLaboratory/jaguar-codes
|
6eb7e73815ec8dbca8b9c2283b08b2d542ba3935
|
4919541548d254eaf8570029a6e9be7f95ac29f5
|
refs/heads/master
| 2021-07-14T14:43:56.523949
| 2019-04-13T18:31:35
| 2019-04-13T18:31:35
| 145,902,814
| 2
| 4
| null | 2020-05-20T15:11:50
| 2018-08-23T20:18:52
|
HTML
|
UTF-8
|
R
| false
| false
| 60,063
|
r
|
JaguarPrepDraft.r
|
###########################################################################################################
############# Jaguar Data Preparation ##############
###########################################################################################################
######################################## Preparation ######################################################
rm(list= ls()) ### For a fresh start
## Commented because is not generic
setwd("C:/RWorkDir/jaguardatapaper") ### Set directory
###########################################################################################################
###Script modified from Bernardo Niebuhr & MoveBank ("John Fieberg")
###Enter jaguar data from Morato et al. 2018
## Load packages
if(!require(install.load)) install.packages('install.load'); library(install.load)
install.load::install_load("ggmap","maptools",'move',"circular","RCurl","dplyr","readr","caTools","adehabitatLT","ctmm","ggsn","magick","rgl","tidyverse","htmltools","rglwidget","lubridate","raster","amt","tibble","knitr","leaflet","ezknitr","lattice","rgdal","sp")
### The require() can be used inside functions as it gives a warning message and returns a logical value.
### FALSE if the requested package is not found and TRUE if the package is loaded.###(Movement data should be checked and cleaned (for outliers, duplicated timestamps, etc)first!!!
### Load and adjust the data (already cleaned) and create a dataframe object
mov.data.org <- read.delim(file="c:/RWorkDir/jaguardatapaper/mov.data.org.txt")
mov.data.org <- dplyr::select(mov.data.org, -(individual.taxon.canonical.name:tag.local.identifier))
str(mov.data.org)
# Add Individual info
info <- read.delim(file="c:/RWorkDir/jaguardatapaper/info.txt")
#ind.info <- read.delim(file="c:/RWorkDir/jaguardatapaper/Jaguar_additional information.txt")
#info <- ind.info %>%
# dplyr::select(ID, Sex, Estimated.Age, Weight, Collar.Type, Collar.Brand, Planned.Schedule)
#info <- info %>% rename(individual.local.identifier..ID.=ID)
#info <- info %>% rename(sex=Sex)
#info <- info %>% rename(age=Estimated.Age)
#info <- info %>% rename(weight=Weight)
#info <- info %>% rename(collar_type=Collar.Type)
#info <- info %>% rename(brand=Collar.Brand)
#info <- info %>% rename(schedule=Planned.Schedule)
### Movement Parameters (ctmm)
#movpar <- read.delim(file="c:/RWorkDir/jaguardatapaper/movementparameters.txt")
#str(movpar)
#movpar = movpar[-118,] # there were an extra row of NA so I deleted that
#movpar <- movpar %>%
#dplyr::select(Animal_ID, Model)
#movpar <- movpar %>% rename(individual.local.identifier..ID.=Animal_ID)
#movpar <- movpar %>% rename(model=Model)
#Merge movpar with id info and save txt
#info <- merge(info,movpar)
#info <- info[with(info,order(individual.local.identifier..ID.)),]
#write.table(info,file="c:/RWorkDir/jaguardatapaper/info.txt",row.names = F,quote=F,col.names=T,sep="\t")
#Merge movement with individual info/parameters
merged<- merge(mov.data.org,info)
mov.data.org <- merged
str(mov.data.org)
#write.table(info,file="c:/RWorkDir/jaguardatapaper/mov.data.org.txt",row.names = F,quote=F,col.names=T,sep="\t")
##########################
##########################
# Organize data
#mov.data.org
# Add 2000 to years
get.year <- function(time.stamp) {
init <- gregexpr('/', time.stamp, fixed = T)[[1]][2] + 1
end <- gregexpr(' ', time.stamp, fixed = T)[[1]][1] - 1
substr(time.stamp, init, end)
}
# Test
get.year(time.stamp = mov.data.org$timestamp[10000])
# All individuals
year <- as.numeric(sapply(mov.data.org$timestamp, get.year))
table(year)
# Add 1900/2000
new.year <- as.character(ifelse(year > 50, year + 1900, year + 2000))
table(new.year)
# New dates
set.year <- function(time.stamp, year) {
init <- gregexpr('/', time.stamp, fixed = T)[[1]][2]
end <- gregexpr(' ', time.stamp, fixed = T)[[1]][1]
paste(substr(time.stamp, 1, init), year,
substr(time.stamp, end, nchar(time.stamp)), sep = "")
}
# Test
set.year(time.stamp = as.character(mov.data.org$timestamp[10000]), year = '2013')
# All individuals
date.time <- as.character(mapply(set.year, as.character(mov.data.org$timestamp),
new.year))
str(date.time)
#date.time
#########################################################
# All individuals
date.time <- as.character(mapply(set.year, as.character(mov.data.org$timestamp),
new.year))
str(date.time)
#date.time
# Date/Time as POSIXct object
mov.data.org$timestamp.posix <- as.POSIXct(date.time,
format = "%m/%d/%Y %H:%M", tz = 'GMT')
str(mov.data.org)
##################################################################################################
### Get local time!!!
# I included a column to represent the local timezone (already with the - signal) to them multiply the timestamp and get the difference:
mov.data.org$local_time <- mov.data.org$timestamp.posix + mov.data.org$timezone*60*60
mov.data.org$timestamp.posix.GMT <- mov.data.org$timestamp.posix
mov.data.org$timestamp.posix <- mov.data.org$local_time ### If we do that all the (timestamp.posix)'s calculations will be based on local time
str(mov.data.org)
################################################################
## adehabitatLT
# Transforms in ltraj object
coords <- data.frame(mov.data.org$location.long, mov.data.org$location.lat)
mov.traj <- as.ltraj(xy = coords, date=mov.data.org$timestamp.posix,
id=mov.data.org$individual.local.identifier..ID.,
burst=mov.data.org$individual.local.identifier..ID.,
infolocs = mov.data.org[,-c(3:4, ncol(mov.data.org))])
mov.traj.df <- ld(mov.traj)
#mov.traj
#plot(mov.traj)
## move
# Organize data as a move package format
move.data <- move(x = mov.traj.df$x, y = mov.traj.df$y,
time = mov.traj.df$date,
proj = CRS('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'),
data = mov.traj.df, animal = mov.traj.df$id, sensor = 'GPS')
#'+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs' "+proj=aea +lat_1=-5 +lat_2=-42 +lat_0=-32 +lon_0=-60 +x_0=0 +y_0=0 +ellps=aust_SA +towgs84=-57,1,-41,0,0,0,0 +units=m +no_defs"
move.data ### moveStack
summary(move.data)
###Separate individual animals' trajectories
unstacked <- split(move.data)
head(unstacked)
jaguar_df <- as(move.data, "data.frame")
#id <- as.integer(levels(jaguar_df$id))[jaguar_df$id]
age <- as.numeric(levels(jaguar_df$age))[jaguar_df$age]
weight <- as.numeric(levels(jaguar_df$weight))[jaguar_df$weight]
jaguar_df$id <- as.factor(jaguar_df$individual.local.identifier..ID.)
jaguar_df$age <- age ### converted to number
jaguar_df$weight <- weight ### converted to number
head(jaguar_df)
str(jaguar_df)
#########################################################################################################
#' ######################## More Data cleaning ##########################################
###############################################################
#' Delete observations where missing lat or long or a timestamp. There are no missing
#' observations in this data set, but it is still good practice to check.
ind<-complete.cases(jaguar_df[,c("y","x","date")])
jaguar_df<-jaguar_df[ind==TRUE,]
#' Check for duplicated observations (ones with same lat, long, timestamp,
#' and individual identifier). There are no duplicate
#' observations in this data set, but it is still good practice to check.
ind2<-jaguar_df %>% select("date","x","y","id") %>% duplicated
sum(ind2) # no duplicates
jaguar_df<-jaguar_df[ind2!=TRUE,]
###or duplicatedLocs <- which(jaguar_df$date[1:(nrow(jaguar_df)-1)] == jaguar_df$date[2:(nrow(jaguar_df))])
########### Clean suspectly close points!!! Above 1200 secs or 20 min minimal interval ####################
excludes <- filter(jaguar_df, dt < 1200)
### see excludeds based on id
table(select(excludes, id))
removed<- anti_join(jaguar_df, excludes) ### test if were all removed
filter(removed, dt < 1200)
jaguar_df <- removed ########### Clean suspectly close points!!! Above 1200 secs or 20 min minimal interval #######################
str(jaguar_df)
library(dplyr)
# You don't need this line if you already have DateTime in proper format
df$DateTime <- as.POSIXct(df$DateTime)
# Add a date column (with whatever timezone you want)
df$date <- as.Date(df$DateTime, tz = 'EST')
# Following generates the sunrise and sunset times for the two example dates
sunRise <- c(as.POSIXct('2016-04-15 06:40:37'), as.POSIXct('2016-03-24 06:55:00'))
sunSet <- c(as.POSIXct('2016-04-15 18:40:37'), as.POSIXct('2016-03-24 18:25:00'))
sun <- data.frame(date = as.Date(sunRise, tz = 'EST'), sunRise = sunRise, sunSet = sunSet)
sun
date sunRise sunSet
1 2016-04-15 2016-04-15 06:40:37 2016-04-15 18:40:37
2 2016-03-24 2016-03-24 06:55:00 2016-03-24 18:25:00
# Join the two tables and compute night/day
df <- inner_join(df, sun)
df$dayNight <- ifelse(df$DateTime > df$sunRise & df$DateTime < df$sunSet, 'day', 'night')
## Cleaning up columns which will be in excess due to repetition of analysis ########################################################################
jaguar_df$dx <- NULL
jaguar_df$dy <- NULL
jaguar_df$dist <- NULL
jaguar_df$dt <- NULL
jaguar_df$R2n <- NULL
jaguar_df$abs.angle <- NULL
jaguar_df$rel.angle <- NULL
jaguar_df$location.lat <- NULL
jaguar_df$timestamps <- NULL
jaguar_df$sensor <- NULL
jaguar_df$burst <- NULL
jaguar_df$optional <- NULL
jaguar_df$coords.x1 <- NULL
jaguar_df$coords.x2 <- NULL
jaguar_df$trackId <- NULL
jaguar_df$individual.local.identifier..ID.<- NULL
jaguar_df$study.name <- NULL
jaguar_df$collar_type <- NULL
jaguar_df$brand <- NULL
jaguar_df$local_time <- NULL
jaguar_df$timezone <- NULL
head(jaguar_df)
str(jaguar_df)
###############################################################################################################################
### Add UTM #################
###############################################################################################################################
###Separate individual animals' trajectories
#unstacked <- split(move.data)
#head(unstacked)
#jaguar_df <- as(move.data, "data.frame")
table(jaguar_df$project_region)
#write.table(jaguar_df,file="c:/RWorkDir/jaguardatapaper/jaguar_df.txt",row.names = F,quote=F,col.names=T,sep="\t")
#jaguar_df <- read.delim(file="c:/RWorkDir/jaguardatapaper/mov.data.org.txt")
###############################################################################################################################
### Atlantic Forest ###
###############################################################################################################################
### # Atlantic Forest W1
AFW1=subset(jaguar_df,project_region=='Atlantic Forest W1')
head(AFW1)
str(AFW1)
table(AFW1$id)
ft=ftable(AFW1$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
range(AFW1$x);range(AFW1$y)
coord.latlong = SpatialPoints(cbind(AFW1$x,AFW1$y), proj4string = CRS("+proj=longlat +datum=WGS84"))
coord.latlong
# Transforming coordinate to UTM
coord.UTM <- spTransform(coord.latlong , CRS("+proj=utm +zone=22K +south +datum=WGS84 +units=m +no_defs"))
coord.UTM
coord.latlong.df <- as.data.frame(coord.latlong)
cootd.utm.df <- as.data.frame(coord.UTM)
head(coord.latlong.df)
head(cootd.utm.df)
par(mfrow = c(1, 2))
plot(coord.latlong.df, axes = TRUE, main = "Lat-Long Coordinates", cex.axis = 0.95)
plot(cootd.utm.df, axes = TRUE, main = "UTM Coordinates", col = "red", cex.axis = 0.95)
locsj_matx=cbind(coordinates(coord.UTM), coordinates(coord.latlong))
locsj_df<- as.data.frame(locsj_matx)
head(locsj_df)
head(AFW1)
point.names<-c("utm_x","utm_y","long_x","lat_y")
colnames(locsj_df)<-point.names
head(locsj_df)
AFW1=cbind(AFW1, locsj_df)
head(AFW1); str(AFW1)
#############################################
### # Atlantic Forest W2
AFW2=subset(jaguar_df,project_region=='Atlantic Forest W2')
head(AFW2)
str(AFW2)
table(AFW2$id)
ft=ftable(AFW2$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
range(AFW2$x);range(AFW2$y)
coord.latlong = SpatialPoints(cbind(AFW2$x,AFW2$y), proj4string = CRS("+proj=longlat +datum=WGS84"))
coord.latlong
# Transforming coordinate to UTM
coord.UTM <- spTransform(coord.latlong , CRS("+proj=utm +zone=22K +south +datum=WGS84 +units=m +no_defs"))
coord.UTM
coord.latlong.df <- as.data.frame(coord.latlong)
cootd.utm.df <- as.data.frame(coord.UTM)
head(coord.latlong.df)
head(cootd.utm.df)
par(mfrow = c(1, 2))
plot(coord.latlong.df, axes = TRUE, main = "Lat-Long Coordinates", cex.axis = 0.95)
plot(cootd.utm.df, axes = TRUE, main = "UTM Coordinates", col = "red", cex.axis = 0.95)
locsj_matx=cbind(coordinates(coord.UTM), coordinates(coord.latlong))
locsj_df<- as.data.frame(locsj_matx)
head(locsj_df)
head(AFW2)
point.names<-c("utm_x","utm_y","long_x","lat_y")
colnames(locsj_df)<-point.names
head(locsj_df)
AFW2=cbind(AFW2, locsj_df)
head(AFW2)
AFW=rbind(AFW1,AFW2)
head(AFW)
ft=ftable(AFW$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
#write.table(AFW,file="c:/RWorkDir/jaguardatapaper/AFW.txt",row.names = F,quote=F,col.names=T,sep="\t")
#AFW <- read.delim(file="c:/RWorkDir/jaguardatapaper/AFW.txt")
head(AFW); str(AFW)
###############################################################################################################################
### CAATINGA ###
###############################################################################################################################
### # Caatinga
###############################################################################################################################
Caatinga=subset(jaguar_df,project_region=='Caatinga')
head(Caatinga)
str(Caatinga)
table(Caatinga$id)
ft=ftable(Caatinga$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
range(Caatinga$x);range(Caatinga$y)
coord.latlong = SpatialPoints(cbind(Caatinga$x,Caatinga$y), proj4string = CRS("+proj=longlat +datum=WGS84"))
coord.latlong
# Transforming coordinate to UTM
coord.UTM <- spTransform(coord.latlong , CRS("+proj=utm +zone=23L +south +datum=WGS84 +units=m +no_defs"))
coord.UTM
coord.latlong.df <- as.data.frame(coord.latlong)
cootd.utm.df <- as.data.frame(coord.UTM)
head(coord.latlong.df)
head(cootd.utm.df)
par(mfrow = c(1, 2))
plot(coord.latlong.df, axes = TRUE, main = "Lat-Long Coordinates", cex.axis = 0.95)
plot(cootd.utm.df, axes = TRUE, main = "UTM Coordinates", col = "red", cex.axis = 0.95)
locsj_matx=cbind(coordinates(coord.UTM ), coordinates(coord.latlong))
locsj_df<- as.data.frame(locsj_matx)
head(locsj_df)
head(Caatinga)
point.names<-c("utm_x","utm_y","long_x","lat_y")
colnames(locsj_df)<-point.names
head(locsj_df)
Caatinga=cbind(Caatinga, locsj_df)
head(Caatinga); str(Caatinga)
#write.table(Caatinga,file="c:/RWorkDir/jaguardatapaper/Caatinga.txt",row.names = F,quote=F,col.names=T,sep="\t")
#Caatinga <- read.delim(file="c:/RWorkDir/jaguardatapaper/Caatinga.txt")
###############################################################################################################################
### CERRADO ###
###############################################################################################################################
### # Cerrado1
###############################################################################################################################
Cerrado1=subset(jaguar_df,project_region=='Cerrado1')
head(Cerrado1)
str(Cerrado1)
table(Cerrado1$id)
ft=ftable(Cerrado1$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
range(Cerrado1$x);range(Cerrado1$y)
coord.latlong = SpatialPoints(cbind(Cerrado1$x,Cerrado1$y), proj4string = CRS("+proj=longlat +datum=WGS84"))
coord.latlong
# Transforming coordinate to UTM
coord.UTM <- spTransform(coord.latlong , CRS("+proj=utm +zone=22K +south +datum=WGS84 +units=m +no_defs"))
coord.UTM
coord.latlong.df <- as.data.frame(coord.latlong)
cootd.utm.df <- as.data.frame(coord.UTM)
head(coord.latlong.df)
head(cootd.utm.df)
par(mfrow = c(1, 2))
plot(coord.latlong.df, axes = TRUE, main = "Lat-Long Coordinates", cex.axis = 0.95)
plot(cootd.utm.df, axes = TRUE, main = "UTM Coordinates", col = "red", cex.axis = 0.95)
locsj_matx=cbind(coordinates(coord.UTM ), coordinates(coord.latlong))
locsj_df<- as.data.frame(locsj_matx)
head(locsj_df)
head(Cerrado1)
point.names<-c("utm_x","utm_y","long_x","lat_y")
colnames(locsj_df)<-point.names
head(locsj_df)
Cerrado1=cbind(Cerrado1, locsj_df)
head(Cerrado1)
###############################################################################################################################
### # Cerrado2
###############################################################################################################################
Cerrado2=subset(jaguar_df,project_region=='Cerrado2')
#jfl89=subset(jaguar_df,id=='89') # This animal occurs in 2 UTM zones 22K (40%) and 22L (60%)
head(Cerrado2)
str(Cerrado2)
table(Cerrado2$id)
ft=ftable(Cerrado2$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
range(Cerrado2$x);range(Cerrado2$y)
coord.latlong = SpatialPoints(cbind(Cerrado2$x,Cerrado2$y), proj4string = CRS("+proj=longlat +datum=WGS84"))
coord.latlong
# Transforming coordinate to UTM
coord.UTM <- spTransform(coord.latlong , CRS("+proj=utm +zone=22L +south +datum=WGS84 +units=m +no_defs"))
coord.UTM
coord.latlong.df <- as.data.frame(coord.latlong)
cootd.utm.df <- as.data.frame(coord.UTM)
head(coord.latlong.df)
head(cootd.utm.df)
par(mfrow = c(1, 2))
plot(coord.latlong.df, axes = TRUE, main = "Lat-Long Coordinates", cex.axis = 0.95)
plot(cootd.utm.df, axes = TRUE, main = "UTM Coordinates", col = "red", cex.axis = 0.95)
locsj_matx=cbind(coordinates(coord.UTM ), coordinates(coord.latlong))
locsj_df<- as.data.frame(locsj_matx)
head(locsj_df)
head(Cerrado2)
point.names<-c("utm_x","utm_y","long_x","lat_y")
colnames(locsj_df)<-point.names
head(locsj_df)
Cerrado2=cbind(Cerrado2, locsj_df)
head(Cerrado2)
Cerrado=rbind(Cerrado1,Cerrado2)
head(Cerrado)
ft=ftable(Cerrado$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
#write.table(Cerrado,file="c:/RWorkDir/jaguardatapaper/Cerrado.txt",row.names = F,quote=F,col.names=T,sep="\t")
#Cerrado <- read.delim(file="c:/RWorkDir/jaguardatapaper/Cerrado.txt")
###############################################################################################################################
### COSTA RICA ###
###############################################################################################################################
### # Costa Rica
###############################################################################################################################
CRica=subset(jaguar_df,project_region=='Costa Rica')
head(CRica)
str(CRica)
table(CRica$id)
ft=ftable(CRica$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
range(CRica$x);range(CRica$y)
coord.latlong = SpatialPoints(cbind(CRica$x,CRica$y), proj4string = CRS("+proj=longlat +datum=WGS84"))
coord.latlong
# Transforming coordinate to UTM
coord.UTM <- spTransform(coord.latlong , CRS("+proj=utm +zone=16P +north +datum=WGS84 +units=m +no_defs"))
coord.UTM
coord.latlong.df <- as.data.frame(coord.latlong)
cootd.utm.df <- as.data.frame(coord.UTM)
head(coord.latlong.df)
head(cootd.utm.df)
par(mfrow = c(1, 2))
plot(coord.latlong.df, axes = TRUE, main = "Lat-Long Coordinates", cex.axis = 0.95)
plot(cootd.utm.df, axes = TRUE, main = "UTM Coordinates", col = "red", cex.axis = 0.95)
locsj_matx=cbind(coordinates(coord.UTM ), coordinates(coord.latlong))
locsj_df<- as.data.frame(locsj_matx)
head(locsj_df)
head(CRica)
point.names<-c("utm_x","utm_y","long_x","lat_y")
colnames(locsj_df)<-point.names
head(locsj_df)
CRica=cbind(CRica, locsj_df)
head(CRica)
#write.table(CRica,file="c:/RWorkDir/jaguardatapaper/CRica.txt",row.names = F,quote=F,col.names=T,sep="\t")
#CRica<- read.delim(file="c:/RWorkDir/jaguardatapaper/CRica.txt")
###############################################################################################################################
### PANTANAL ###
###############################################################################################################################
### # PantanalTotal Brazil&Paraguay
###############################################################################################################################
#12 13 14 15 18 19 22 23 25 27
#28 29 30 31 32 33 41 51 52 53
#54 55 56 57 59 60 61 68 69 74
#75 79 81 84 86 87 88 91 92 101
#102 103 104 105 106 107 108 109 110 111
#112 113 114 115 116 117
Pantanal =subset(jaguar_df,project_bioveg=='Pantanal')
head(Pantanal)
str(Pantanal)
table(Pantanal$id); max(table(Pantanal$id)); min(table(Pantanal$id))
ft=ftable(Pantanal$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
head(table(Pantanal$id,Pantanal$project_region))
head(ifelse(table(Pantanal$id,Pantanal$project_region)>0,1,0))
Pant_projs=colSums(ifelse(table(Pantanal$id,Pantanal$project_region)>0,1,0))
Pant_projs
range(Pantanal$x);range(Pantanal$y)
coord.latlong = SpatialPoints(cbind(Pantanal$x,Pantanal$y), proj4string = CRS("+proj=longlat +datum=WGS84"))
coord.latlong
# Transforming coordinate to UTM
coord.UTM <- spTransform(coord.latlong , CRS("+proj=utm +zone=21K +south +datum=WGS84 +units=m +no_defs"))
coord.UTM
coord.latlong.df <- as.data.frame(coord.latlong)
cootd.utm.df <- as.data.frame(coord.UTM)
head(coord.latlong.df)
head(cootd.utm.df)
par(mfrow = c(1, 2))
plot(coord.latlong.df, axes = TRUE, main = "Lat-Long Coordinates", cex.axis = 0.95)
plot(cootd.utm.df, axes = TRUE, main = "UTM Coordinates", col = "red", cex.axis = 0.95)
locsj_matx=cbind(coordinates(coord.UTM ), coordinates(coord.latlong))
locsj_df<- as.data.frame(locsj_matx)
head(locsj_df)
head(Pantanal)
point.names<-c("utm_x","utm_y","long_x","lat_y")
colnames(locsj_df)<-point.names
head(locsj_df)
Pantanal=cbind(Pantanal, locsj_df)
head(Pantanal)
#write.table(Pantanal,file="c:/RWorkDir/jaguardatapaper/Pantanal.txt",row.names = F,quote=F,col.names=T,sep="\t")
#Pantanal <- read.delim(file="c:/RWorkDir/jaguardatapaper/Pantanal.txt")
###############################################################################################################################
### DRY CHACO ###
###############################################################################################################################
### # Dry Chaco
###############################################################################################################################
Drych=subset(jaguar_df,project_region=='Dry chaco')
head(Drych)
str(Drych)
table(Drych$id)
ft=ftable(Drych$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
range(Drych$x);range(Drych$y)
coord.latlong = SpatialPoints(cbind(Drych$x,Drych$y), proj4string = CRS("+proj=longlat +datum=WGS84"))
coord.latlong
# Transforming coordinate to UTM # 16,70 => (most 20K), 76,77 => (20 K); 71,72,73 => (21 K)
X16=subset(Drych,id=='16')
X70=subset(Drych,id=='70')
X71=subset(Drych,id=='71')
X72=subset(Drych,id=='72')
X73=subset(Drych,id=='73')
X76=subset(Drych,id=='76')
X77=subset(Drych,id=='77')
### Drych1 and Drych2
Drych1=rbind(X16,X70,X76,X77)
Drych2=rbind(X71,X72,X73)
### Drych1 ## 76,77 Zone 20 K; 16 & 70 (most is in Zone 20 K too, but a little bit in 21K)
range(Drych1$x);range(Drych1$y)
coord.latlong = SpatialPoints(cbind(Drych1$x,Drych1$y), proj4string = CRS("+proj=longlat +datum=WGS84"))
coord.latlong
coord.UTM <- spTransform(coord.latlong , CRS("+proj=utm +zone=20K +south +datum=WGS84 +units=m +no_defs"))
#coord.UTM <- spTransform(coord.latlong , CRS("+proj=utm +zone=21K +south +datum=WGS84 +units=m +no_defs"))
coord.UTM
coord.latlong.df <- as.data.frame(coord.latlong)
cootd.utm.df <- as.data.frame(coord.UTM)
head(coord.latlong.df)
head(cootd.utm.df)
par(mfrow = c(1, 2))
plot(coord.latlong.df, axes = TRUE, main = "Lat-Long Coordinates", cex.axis = 0.95)
plot(cootd.utm.df, axes = TRUE, main = "UTM Coordinates", col = "red", cex.axis = 0.95)
locsj_matx=cbind(coordinates(coord.UTM ), coordinates(coord.latlong))
locsj_df<- as.data.frame(locsj_matx)
head(locsj_df)
head(Drych1)
point.names<-c("utm_x","utm_y","long_x","lat_y")
#point.names<-c("utm_x1","utm_y1","long_x","lat_y");colnames(locsj_df)<-point.names; locsj_df$long_x <- NULL; locsj_df$lat_y <-NULL
#Drych3 <- Drych1; Drych3=cbind(Drych3, locsj_df); # ;head(Drych3)
colnames(locsj_df)<-point.names
head(locsj_df)
Drych1=cbind(Drych1, locsj_df)
head(Drych1)
ft=ftable(Drych1$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
### Drych2 ##
range(Drych2$x);range(Drych2$y)
coord.latlong = SpatialPoints(cbind(Drych2$x,Drych2$y), proj4string = CRS("+proj=longlat +datum=WGS84"))
coord.latlong
coord.UTM <- spTransform(coord.latlong , CRS("+proj=utm +zone=21K +south +datum=WGS84 +units=m +no_defs"))
coord.UTM
coord.latlong.df <- as.data.frame(coord.latlong)
cootd.utm.df <- as.data.frame(coord.UTM)
head(coord.latlong.df)
head(cootd.utm.df)
par(mfrow = c(1, 2))
plot(coord.latlong.df, axes = TRUE, main = "Lat-Long Coordinates", cex.axis = 0.95)
plot(cootd.utm.df, axes = TRUE, main = "UTM Coordinates", col = "red", cex.axis = 0.95)
locsj_matx=cbind(coordinates(coord.UTM ), coordinates(coord.latlong))
locsj_df<- as.data.frame(locsj_matx)
head(locsj_df)
head(Drych2)
point.names<-c("utm_x","utm_y","long_x","lat_y")
colnames(locsj_df)<-point.names
head(locsj_df)
Drych2=cbind(Drych2, locsj_df)
head(Drych2)
ft=ftable(Drych2$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
Drych=rbind(Drych1,Drych2)
head(Drych)
ft=ftable(Drych$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
#write.table(Drych,file="c:/RWorkDir/jaguardatapaper/Drych.txt",row.names = F,quote=F,col.names=T,sep="\t")
#Drych <- read.delim(file="c:/RWorkDir/jaguardatapaper/Drych.txt")
# Time Posix class (2014-12-05 00:00:00 )
Drych$timestamp.posix <- as.character(Drych$timestamp.posix)
Drych$timestamp.posix<- as.POSIXct(Drych$timestamp.posix, format ="%Y-%m-%d %H:%M:%S", tz = 'GMT')
Drych$date <- as.character(Drych$date)
Drych$date <- as.POSIXct(Drych$date, format ="%Y-%m-%d %H:%M:%S", tz = 'GMT')
Drych$id <- as.factor(Drych$id)
#Drych$season <- as.factor(Drych$season) #only dry, flood, inter seasons
#Drych$dt <- as.numeric(Drych$dt)
#Drych$week <- as.numeric(strftime(as.POSIXlt(Drych$timestamp.posix),format="%W"))
#Drych$day <- as.numeric(strftime(as.POSIXlt(Drych$timestamp.posix),format="%j"))
#Drych$year <- as.numeric(strftime(as.POSIXlt(Drych$timestamp.posix),format="%Y"))
str(Drych)
###############################################################################################################################
### # Humid Chaco
###############################################################################################################################
Hch=subset(jaguar_df,project_region=='Humid chaco')
head(Hch)
str(Hch)
table(Hch$id)
ft=ftable(Hch$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
range(Hch$x);range(Hch$y)
coord.latlong = SpatialPoints(cbind(Hch$x,Hch$y), proj4string = CRS("+proj=longlat +datum=WGS84"))
coord.latlong
# Transforming coordinate to UTM
coord.UTM <- spTransform(coord.latlong , CRS("+proj=utm +zone=21K +south +datum=WGS84 +units=m +no_defs"))
coord.UTM
coord.latlong.df <- as.data.frame(coord.latlong)
cootd.utm.df <- as.data.frame(coord.UTM)
head(coord.latlong.df)
head(cootd.utm.df)
par(mfrow = c(1, 2))
plot(coord.latlong.df, axes = TRUE, main = "Lat-Long Coordinates", cex.axis = 0.95)
plot(cootd.utm.df, axes = TRUE, main = "UTM Coordinates", col = "red", cex.axis = 0.95)
locsj_matx=cbind(coordinates(coord.UTM ), coordinates(coord.latlong))
locsj_df<- as.data.frame(locsj_matx)
head(locsj_df)
head(Hch)
point.names<-c("utm_x","utm_y","long_x","lat_y")
colnames(locsj_df)<-point.names
head(locsj_df)
#write.table(Hch,file="c:/RWorkDir/jaguardatapaper/Hch.txt",row.names = F,quote=F,col.names=T,sep="\t")
#Hch <- read.delim(file="c:/RWorkDir/jaguardatapaper/Hch.txt")
Hch=cbind(Hch, locsj_df)
head(Hch); str(Hch)
###############################################################################################################################
### # Forest Paraguay
###############################################################################################################################
FPy=subset(jaguar_df,project_region=='Forest Paraguay')
head(FPy)
str(FPy)
table(FPy$id)
ft=ftable(FPy$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
range(FPy$x);range(FPy$y)
coord.latlong = SpatialPoints(cbind(FPy$x,FPy$y), proj4string = CRS("+proj=longlat +datum=WGS84"))
coord.latlong
# Transforming coordinate to UTM
coord.UTM <- spTransform(coord.latlong , CRS("+proj=utm +zone=21J +south +datum=WGS84 +units=m +no_defs"))
coord.UTM
coord.latlong.df <- as.data.frame(coord.latlong)
cootd.utm.df <- as.data.frame(coord.UTM)
head(coord.latlong.df)
head(cootd.utm.df)
par(mfrow = c(1, 2))
plot(coord.latlong.df, axes = TRUE, main = "Lat-Long Coordinates", cex.axis = 0.95)
plot(cootd.utm.df, axes = TRUE, main = "UTM Coordinates", col = "red", cex.axis = 0.95)
locsj_matx=cbind(coordinates(coord.UTM ), coordinates(coord.latlong))
locsj_df<- as.data.frame(locsj_matx)
head(locsj_df)
head(FPy)
point.names<-c("utm_x","utm_y","long_x","lat_y")
colnames(locsj_df)<-point.names
head(locsj_df)
FPy=cbind(FPy, locsj_df)
#write.table(FPy,file="c:/RWorkDir/jaguardatapaper/FPy.txt",row.names = F,quote=F,col.names=T,sep="\t")
#FPy <- read.delim(file="c:/RWorkDir/jaguardatapaper/FPy.txt")
head(FPy); str(FPy)
###############################################################################################################################
### # Iguazu
###############################################################################################################################
Iguazu=subset(jaguar_df,project_region=='Iguazu')
head(Iguazu)
str(Iguazu)
table(Iguazu$id)
ft=ftable(Iguazu$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
range(Iguazu$x);range(Iguazu$y)
coord.latlong = SpatialPoints(cbind(Iguazu$x,Iguazu$y), proj4string = CRS("+proj=longlat +datum=WGS84"))
coord.latlong
# Transforming coordinate to UTM # 42,66,80,90 => (21 J); 83 => (22 J)
X42=subset(Iguazu,id=='42')
X66=subset(Iguazu,id=='66')
X80=subset(Iguazu,id=='80')
X90=subset(Iguazu,id=='90')
X83=subset(Iguazu,id=='83')
### Iguazu1 and Iguazu2
Iguazu1=rbind(X42,X66,X80,X90)
Iguazu2=(X83)
### Iguazu1 ## # 42,66,80,90 => ( Zone 21 J)
range(Iguazu1$x);range(Iguazu1$y)
coord.latlong = SpatialPoints(cbind(Iguazu1$x,Iguazu1$y), proj4string = CRS("+proj=longlat +datum=WGS84"))
coord.latlong
coord.UTM <- spTransform(coord.latlong , CRS("+proj=utm +zone=21J +south +datum=WGS84 +units=m +no_defs"))
coord.UTM
coord.latlong.df <- as.data.frame(coord.latlong)
cootd.utm.df <- as.data.frame(coord.UTM)
head(coord.latlong.df)
head(cootd.utm.df)
par(mfrow = c(1, 2))
plot(coord.latlong.df, axes = TRUE, main = "Lat-Long Coordinates", cex.axis = 0.95)
plot(cootd.utm.df, axes = TRUE, main = "UTM Coordinates", col = "red", cex.axis = 0.95)
locsj_matx=cbind(coordinates(coord.UTM ), coordinates(coord.latlong))
locsj_df<- as.data.frame(locsj_matx)
head(locsj_df)
head(Iguazu1)
point.names<-c("utm_x","utm_y","long_x","lat_y")
colnames(locsj_df)<-point.names
head(locsj_df)
Iguazu1=cbind(Iguazu1, locsj_df)
head(Iguazu1)
ft=ftable(Iguazu1$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
### Iguazu2 ## 83 => (22 J)
range(Iguazu2$x);range(Iguazu2$y)
coord.latlong = SpatialPoints(cbind(Iguazu2$x,Iguazu2$y), proj4string = CRS("+proj=longlat +datum=WGS84"))
coord.latlong
coord.UTM <- spTransform(coord.latlong , CRS("+proj=utm +zone=22J +south +datum=WGS84 +units=m +no_defs"))
coord.UTM
coord.latlong.df <- as.data.frame(coord.latlong)
cootd.utm.df <- as.data.frame(coord.UTM)
head(coord.latlong.df)
head(cootd.utm.df)
par(mfrow = c(1, 2))
plot(coord.latlong.df, axes = TRUE, main = "Lat-Long Coordinates", cex.axis = 0.95)
plot(cootd.utm.df, axes = TRUE, main = "UTM Coordinates", col = "red", cex.axis = 0.95)
locsj_matx=cbind(coordinates(coord.UTM ), coordinates(coord.latlong))
locsj_df<- as.data.frame(locsj_matx)
head(locsj_df)
head(Iguazu2)
point.names<-c("utm_x","utm_y","long_x","lat_y")
colnames(locsj_df)<-point.names
head(locsj_df)
Iguazu2=cbind(Iguazu2, locsj_df)
head(Iguazu2)
ft=ftable(Iguazu2$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
Iguazu=rbind(Iguazu1,Iguazu2)
head(Iguazu)
ft=ftable(Iguazu$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
#write.table(Iguazu,file="c:/RWorkDir/jaguardatapaper/Iguazu.txt",row.names = F,quote=F,col.names=T,sep="\t")
#Iguazu <- read.delim(file="c:/RWorkDir/jaguardatapaper/Iguazu.txt")
str(Iguazu)
###################################################################################################################################
### AMAZONIA ###
###############################################################################################################################
### Amazonia Mamiraua (Brazil) #### Flooded ########################
Mamiraua =subset(jaguar_df,project_region=='Mamiraua')
head(Mamiraua)
str(Mamiraua )
table(Mamiraua$id)
ft=ftable(Mamiraua$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
range(Mamiraua$x);range(Mamiraua$y)
coord.latlong = SpatialPoints(cbind(Mamiraua$x,Mamiraua$y), proj4string = CRS("+proj=longlat +datum=WGS84"))
coord.latlong
# Transforming coordinate from WGS=84 to UTM Zone = 20 M
coord.UTM <- spTransform(coord.latlong , CRS("+proj=utm +zone=20M +south +datum=WGS84 +units=m +no_defs"))
coord.UTM
coord.latlong.df <- as.data.frame(coord.latlong)
cootd.utm.df <- as.data.frame(coord.UTM)
head(coord.latlong.df)
head(cootd.utm.df)
par(mfrow = c(1, 2))
plot(coord.latlong.df, axes = TRUE, main = "Lat-Long Coordinates", cex.axis = 0.95)
plot(cootd.utm.df, axes = TRUE, main = "UTM Coordinates", col = "red", cex.axis = 0.95)
locsj_matx=cbind(coordinates(coord.UTM ), coordinates(coord.latlong))
locsj_df<- as.data.frame(locsj_matx)
head(locsj_df)
head(Mamiraua)
point.names<-c("utm_x","utm_y","long_x","lat_y")
colnames(locsj_df)<-point.names
head(locsj_df)
Mamiraua=cbind(Mamiraua, locsj_df)
#write.table(Mamiraua,file="c:/RWorkDir/jaguardatapaper/Mamiraua.txt",row.names = F,quote=F,col.names=T,sep="\t")
#Mamiraua <- read.delim(file="c:/RWorkDir/jaguardatapaper/Mamiraua.txt")
head(Mamiraua); str(Mamiraua)
########################################## Dry Amazonia, PA, translocated ###################################################
#IOP Para Amazonia ### Translocated
iopPA=subset(jaguar_df,id=='24')
head(iopPA)
str(iopPA )
table(iopPA$id)
ft=ftable(iopPA$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
range(iopPA$x);range(iopPA$y)
coord.latlong = SpatialPoints(cbind(iopPA$x,iopPA$y), proj4string = CRS("+proj=longlat +datum=WGS84"))
coord.latlong
# Transforming coordinate from WGS=84 to UTM Zone = 22 M
coord.UTM <- spTransform(coord.latlong , CRS("+proj=utm +zone=22M +south +datum=WGS84 +units=m +no_defs"))
coord.UTM
coord.latlong.df <- as.data.frame(coord.latlong)
cootd.utm.df <- as.data.frame(coord.UTM)
head(coord.latlong.df)
head(cootd.utm.df)
par(mfrow = c(1, 2))
plot(coord.latlong.df, axes = TRUE, main = "Lat-Long Coordinates", cex.axis = 0.95)
plot(cootd.utm.df, axes = TRUE, main = "UTM Coordinates", col = "red", cex.axis = 0.95)
locsj_matx=cbind(coordinates(coord.UTM ), coordinates(coord.latlong))
locsj_df<- as.data.frame(locsj_matx)
head(locsj_df)
head(iopPA)
point.names<-c("utm_x","utm_y","long_x","lat_y")
colnames(locsj_df)<-point.names
head(locsj_df)
iopPA=cbind(iopPA, locsj_df)
#write.table(iopPA,file="c:/RWorkDir/jaguardatapaper/iopPA.txt",row.names = F,quote=F,col.names=T,sep="\t")
#iopPA <- read.delim(file="c:/RWorkDir/jaguardatapaper/iopPA.txt")
head(iopPA); str(iopPA)
##### Greater Lacandona, Mexico #######################################################################################
Lacandona =subset(jaguar_df,project_region=='Greater Lacandona')
head(Lacandona)
str(Lacandona )
table(Lacandona$id)
ft=ftable(Lacandona$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
range(Lacandona$x);range(Lacandona$y)
coord.latlong = SpatialPoints(cbind(Lacandona$x,Lacandona$y), proj4string = CRS("+proj=longlat +datum=WGS84"))
coord.latlong
# Transforming coordinate from WGS=84 to UTM Zone = 15 Q
coord.UTM <- spTransform(coord.latlong , CRS("+proj=utm +zone=15Q +south +datum=WGS84 +units=m +no_defs"))
coord.UTM
coord.latlong.df <- as.data.frame(coord.latlong)
cootd.utm.df <- as.data.frame(coord.UTM)
head(coord.latlong.df)
head(cootd.utm.df)
par(mfrow = c(1, 2))
plot(coord.latlong.df, axes = TRUE, main = "Lat-Long Coordinates", cex.axis = 0.95)
plot(cootd.utm.df, axes = TRUE, main = "UTM Coordinates", col = "red", cex.axis = 0.95)
locsj_matx=cbind(coordinates(coord.UTM ), coordinates(coord.latlong))
locsj_df<- as.data.frame(locsj_matx)
head(locsj_df)
head(Lacandona)
point.names<-c("utm_x","utm_y","long_x","lat_y")
colnames(locsj_df)<-point.names
head(locsj_df)
Lacandona=cbind(Lacandona, locsj_df)
#write.table(Lacandona,file="c:/RWorkDir/jaguardatapaper/Lacandona.txt",row.names = F,quote=F,col.names=T,sep="\t")
#Lacandona <- read.delim(file="c:/RWorkDir/jaguardatapaper/Lacandona.txt")
head(Lacandona); str(Lacandona)
##### Mexico East ####################################################################################################
MexEast =subset(jaguar_df,project_region=='Mexico East')
head(MexEast)
str(MexEast )
table(MexEast$id)
ft=ftable(MexEast$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
range(MexEast$x);range(MexEast$y)
coord.latlong = SpatialPoints(cbind(MexEast$x,MexEast$y), proj4string = CRS("+proj=longlat +datum=WGS84"))
coord.latlong
# Transforming coordinate from WGS=84 to UTM Zone = 16 Q
coord.UTM <- spTransform(coord.latlong , CRS("+proj=utm +zone=16Q +south +datum=WGS84 +units=m +no_defs"))
coord.UTM
coord.latlong.df <- as.data.frame(coord.latlong)
cootd.utm.df <- as.data.frame(coord.UTM)
head(coord.latlong.df)
head(cootd.utm.df)
par(mfrow = c(1, 2))
plot(coord.latlong.df, axes = TRUE, main = "Lat-Long Coordinates", cex.axis = 0.95)
plot(cootd.utm.df, axes = TRUE, main = "UTM Coordinates", col = "red", cex.axis = 0.95)
locsj_matx=cbind(coordinates(coord.UTM ), coordinates(coord.latlong))
locsj_df<- as.data.frame(locsj_matx)
head(locsj_df)
head(MexEast)
point.names<-c("utm_x","utm_y","long_x","lat_y")
colnames(locsj_df)<-point.names
head(locsj_df)
MexEast=cbind(MexEast, locsj_df)
write.table(MexEast,file="c:/RWorkDir/jaguardatapaper/MexEast.txt",row.names = F,quote=F,col.names=T,sep="\t")
MexEast <- read.delim(file="c:/RWorkDir/jaguardatapaper/MexEast.txt")
head(MexEast); str(MexEast)
##### Mexico Sonora ##################################################################################################
Sonora =subset(jaguar_df,project_region=='Mexico Sonora')
head(Sonora)
str(Sonora )
table(Sonora$id)
ft=ftable(Sonora$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
range(Sonora$x);range(Sonora$y)
coord.latlong = SpatialPoints(cbind(Sonora$x,Sonora$y), proj4string = CRS("+proj=longlat +datum=WGS84"))
coord.latlong
# Transforming coordinate from WGS=84 to UTM Zone = 12 R
coord.UTM <- spTransform(coord.latlong , CRS("+proj=utm +zone=12R +south +datum=WGS84 +units=m +no_defs"))
coord.UTM
coord.latlong.df <- as.data.frame(coord.latlong)
cootd.utm.df <- as.data.frame(coord.UTM)
head(coord.latlong.df)
head(cootd.utm.df)
par(mfrow = c(1, 2))
plot(coord.latlong.df, axes = TRUE, main = "Lat-Long Coordinates", cex.axis = 0.95)
plot(cootd.utm.df, axes = TRUE, main = "UTM Coordinates", col = "red", cex.axis = 0.95)
locsj_matx=cbind(coordinates(coord.UTM ), coordinates(coord.latlong))
locsj_df<- as.data.frame(locsj_matx)
head(locsj_df)
head(Sonora)
point.names<-c("utm_x","utm_y","long_x","lat_y")
colnames(locsj_df)<-point.names
head(locsj_df)
Sonora=cbind(Sonora, locsj_df)
#write.table(Sonora,file="c:/RWorkDir/jaguardatapaper/Sonora.txt",row.names = F,quote=F,col.names=T,sep="\t")
#Sonora <- read.delim(file="c:/RWorkDir/jaguardatapaper/Sonora.txt")
head(Sonora); str(Sonora)
############# ### Mexico ###
Mex=rbind(Lacandona,MexEast,Sonora)
head(Mex)
ft=ftable(Mex$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
head(Mex)
#write.table(Mex,file="c:/RWorkDir/jaguardatapaper/Mex.txt",row.names = F,quote=F,col.names=T,sep="\t")
#Mex <- read.delim(file="c:/RWorkDir/jaguardatapaper/Mex.txt")
####################### Jaguar Dataframe with UTMs ########################################################################
head(AFW);head(Caatinga);head(Cerrado);head(CRica);head(Pantanal);str(Drych);head(Hch);head(FPy);head(Iguazu);
head(Mamiraua);head(iopPA);head(Lacandona);head(MexEast);head(Sonora)
jaguar=rbind(AFW,Caatinga,Cerrado,CRica,Pantanal,Drych,Hch,FPy,Iguazu,Mamiraua,iopPA,Lacandona,MexEast,Sonora)
head(jaguar); str(jaguar)
jaguar_df <- jaguar
ft=ftable(jaguar$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
#write.table(jaguar,file="c:/RWorkDir/jaguardatapaper/jaguar.txt",row.names = F,quote=F,col.names=T,sep="\t")
#write.table(jaguar_df,file="c:/RWorkDir/jaguardatapaper/jaguar_df.txt",row.names = F,quote=F,col.names=T,sep="\t")
#jaguar_df <- read.delim(file="c:/RWorkDir/jaguardatapaper/jaguar_df.txt")
################################################################################################################
################ ADEHABITAT LT ### Update the mov.traj object ####
## adehabitatLT
# Transforms in ltraj object
# Transforms in ltraj object
coords <- data.frame(jaguar$utm_x, jaguar$utm_y)
jaguar.traj <- as.ltraj(xy = coords, date=jaguar$timestamp.posix,
id=jaguar$id,
burst=jaguar$id,
infolocs = jaguar)
jaguar.traj.df <- ld(jaguar.traj)
head(jaguar.traj.df)
plot(jaguar.traj)
hist(jaguar.traj.df$dist)
max(jaguar.traj.df$dist,na.rm=TRUE)
range(jaguar.traj.df$dist,na.rm=TRUE)
jaguar_df<- jaguar.traj.df
jaguar_df$x <- NULL
jaguar_df$y <- NULL
jaguar_df$dx <- NULL
jaguar_df$dy <- NULL
jaguar_df$date.1 <- NULL
jaguar_df$id.1 <- NULL
jaguar_df$burst <- NULL
jaguar_df <- rename(jaguar_df, c("x.1"="x", "y.1"="y"))
jaguar_df <- jaguar_df%>%select(Event_ID,x,y,date, everything())
#write.table(jaguar_df,file="c:/RWorkDir/jaguardatapaper/jaguar_df.txt",row.names = F,quote=F,col.names=T,sep="\t")
#jaguar_df <- read.delim(file="c:/RWorkDir/jaguardatapaper/jaguar_df.txt")
AFW1=subset(jaguar_df,project_region=='Atlantic Forest W1'); head(AFW1) ### 1)
AFW2=subset(jaguar_df,project_region=='Atlantic Forest W2'); head(AFW2) ### 2)
Caatinga=subset(jaguar_df,project_region=='Caatinga'); head(Caatinga) ### 3)
Cerrado1=subset(jaguar_df,project_region=='Cerrado1'); head(Cerrado1) ### 4)
Cerrado2=subset(jaguar_df,project_region=='Cerrado2'); head(Cerrado2) ### C
CRica=subset(jaguar_df,project_region=='Costa Rica'); head(CRica) ### D
Pantanal =subset(jaguar_df,project_bioveg=='Pantanal'); head(Pantanal) ### E
Drych=subset(jaguar_df,project_region=='Dry chaco'); head(Drych) ### F
Hch=subset(jaguar_df,project_region=='Humid chaco'); head(Hch) ### G
FPy=subset(jaguar_df,project_region=='Forest Paraguay'); head(Hch) ### H
Iguazu=subset(jaguar_df,project_region=='Iguazu'); head(Iguazu) ### I
Mamiraua =subset(jaguar_df,project_region=='Mamiraua'); head(Mamiraua) ### J
iopPA=subset(jaguar_df,id=='24'); head(iopPA) ### K
Lacandona =subset(jaguar_df,project_region=='Greater Lacandona'); head(Lacandona) ### L
MexEast =subset(jaguar_df,project_region=='Mexico East'); head(MexEast) ### M
Sonora =subset(jaguar_df,project_region=='Mexico Sonora'); head(Sonora) ### N
# Organize data
# Time Posix class (2014-12-05 00:00:00 )
jaguar_df$timestamp.posix <- as.character(jaguar_df$timestamp.posix)
jaguar_df$timestamp.posix<- as.POSIXct(jaguar_df$timestamp.posix, format ="%Y-%m-%d %H:%M:%S", tz = 'GMT')
jaguar_df$date <- as.character(jaguar_df$date)
jaguar_df$date <- as.POSIXct(jaguar_df$date, format ="%Y-%m-%d %H:%M:%S", tz = 'GMT')
#jaguar_df$id <- as.factor(jaguar_df$individual.local.identifier..ID.)
#jaguar_df$season <- as.factor(jaguar_df$season) #only dry, flood, inter seasons
jaguar_df$dt <- as.numeric(jaguar_df$dt)
jaguar_df$week <- as.numeric(strftime(as.POSIXlt(jaguar_df$timestamp.posix),format="%W"))
jaguar_df$day <- as.numeric(strftime(as.POSIXlt(jaguar_df$timestamp.posix),format="%j"))
jaguar_df$year <- as.numeric(strftime(as.POSIXlt(jaguar_df$timestamp.posix),format="%Y"))
str(jaguar_df)
###############################################################################################################
### Update the MoveStack object (move.data)to contain only the cleaned points
move.data <- move(x = jaguar_df$x, y = jaguar_df$y,
time = jaguar_df$date,
proj = CRS('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'),
data = jaguar_df, animal = jaguar_df$id, sensor = 'GPS')
#"+proj=aea +lat_1=-5 +lat_2=-42 +lat_0=-32 +lon_0=-60 +x_0=0 +y_0=0 +ellps=aust_SA +towgs84=-57,1,-41,0,0,0,0 +units=m +no_defs"
################################################################################################################
##############################################################################################################################
# now do for the residence time (method of Barraquand & Benhamou 2008) #### Require to be in UTM
# choose 1km radius and 1 hours as maximum time threshold that the animal is allowed to spend outside the patch
# before we consider that the animal actually left the patch
### ltraj object from Cerrado ####
#mov.traj<-move2ade(move.data)
# Transforms in ltraj object
coords <- data.frame(jaguar_df$x, jaguar_df$y)
mov.traj <- as.ltraj(xy = coords, date=jaguar_df$timestamp.posix,
id=jaguar_df$individual.local.identifier..ID.,
burst=jaguar_df$individual.local.identifier..ID.,
infolocs = jaguar_df)
mov.traj.df <- ld(mov.traj)
#mov.traj
#plot(mov.traj)
hist(mov.traj.df$dist)
cerrado1<- mov.traj[1]
res1 <- residenceTime(cerrado1, radius = 1000, maxt=24, units="hour")
plot(res1)
# maybe lets increase to 2km and 12 hours
res4049 <- residenceTime(elk4049, radius = 2000, maxt=12, units="hour")
plot(res4049)
# add this to the infolocs slot
res4049 <- residenceTime(elk4049, radius = 2000, maxt=12, addinfo = TRUE, units="hour")
res4049
# repeat for the other individuals
jtraj1<- mov.traj[1]
res1 <- residenceTime(jtraj1, radius = 1, maxt=24, units="hour")
plot(res1)
###############################################################################################################
# Prepare data in the ctmm format
mov.tel <- as.telemetry(move.data)
str(mov.tel)
###############################################################################################################
# it can come handy to add an identifier variable, identifying the first location of each individual - let us call this 'firstRec'
foo <- which(jaguar_df$individual.local.identifier..ID.[1:(nrow(jaguar_df)-1)] != jaguar_df$individual.local.identifier..ID.[2:(nrow(jaguar_df))])
jaguar_df$firstRec <- rep(0,nrow(jaguar_df))
jaguar_df$firstRec[foo+1] <- 1
jaguar_df$firstRec[1] <- 1
# let us check if this is correct
length(unique(jaguar_df$individual.local.identifier..ID.)) # count N individuals
sum(jaguar_df$firstRec) # sum = 117, looks fine!
jaguar_df[sort(c(foo-1,foo,foo+1)),c('individual.local.identifier..ID.','date','firstRec')] # first records seem correctly identified
rm(foo) # keep workspace clean
# time between steps
# ?difftime
# syntax: difftime(then,now,units="secs")
foo <- difftime(jaguar_df$date[2:nrow(jaguar_df)], jaguar_df$date[1:(nrow(jaguar_df)-1)], units = "secs")
foo <- c(NA, foo)
summary(as.numeric(foo))
foo <- ifelse(jaguar_df$firstRec == 1, NA, foo)
summary(as.numeric(foo))
jaguar_df$dt <- foo
rm(foo) ### clean foo
# investigate distribution of step length and time lags between steps
hist(jaguar_df$dt); summary(jaguar_df$dt)
# think about the sampling regime -- 7200s = 2h; 3600s = 1h; 900s = 15min; likely a resampling and/or imputation will be required for analyses
hist(jaguar_df[jaguar_df$dt <86400,'dt'])
hist(jaguar_df[jaguar_df$dt < 172800 & jaguar_df$dt > 1200,'dt'])
test <- filter(jaguar_df, dt > 100000) ### 2489 obs. for dt > 100000 approx 1 day
hist(jaguar_df[jaguar_df$dt > 100000& jaguar_df$dt <2600000,'dt']); summary(test$dt) ### approx between 1 day and a month
testb <- filter(jaguar_df, dt > 2600000); summary(testb$dt) ### only 54 obs. with more than a month
hist(jaguar_df[jaguar_df$dt > 2600000,'dt'])
testb2 <- testb %>% group_by(id)%>%nest(id, dt)%>% print(n = Inf) ### we can see the frequency which animals had highest timelags
rm(test) ; rm(testb);rm(testb2) ### clean tests
##############################################################################################################
#' ## Creating a track in amt (commom to both RSF and SSF)
##############################################################################################################
#' Before we can use the amt package to calculate step lengths, turn angles, and bearings
#' for fisher data, we need to add a class (track) to the data. Then, we can summarize
#' the data by individual, month, etc. First, create a track using utms and the timestamp.
#'
#' If we have a data set with locations in utms, we could use:
#trk.temp <- make_track(fisher.dat, .x=utm.easting, .y=utm.northing, .t=timestamp, id = individual_local.identifier)
#trk.temp
#' Note: we did not need to explicitly specify x, y and t (but probably good to do so).
#' This would also have worked
#' trk <- make_track(fisher.dat, utm.easting, utm.northing, timestamp, id = local_identifier)
#' We can also use lat, long, which will allow us to determine
#' time of day
trk <- mk_track(jaguar_df,.x=x, .y=y, .t=date, Event_ID=Event_ID, id=id, sex=sex,
age=age, weight=weight, status=status,dist=dist,project_region=project_region, crs = CRS("+init=epsg:4326")) ### season=season, speed_kmh=speed_kmh,
trk <- trk %>% arrange(id)
trk
trk <- transform_coords(trk, sp::CRS("+proj=aea +lat_1=-5 +lat_2=-42 +lat_0=-32 +lon_0=-60 +x_0=0 +y_0=0 +ellps=aust_SA +units=m +no_defs"))
trk
trk <- trk %>% time_of_day()
trk.class<-class(trk)
#' ## Movement Characteristics
nesttrk<-trk%>%nest(-id)
nesttrk #%>% print(n = 117)
#' Each row contains data from an individual. For example, we can access data
#' from the first individual using:
nesttrk$data[[1]]
#' We could calculate movement characteristics by individual using:
temp<-direction_rel(nesttrk$data[[1]])
head(temp)
#' or:
temp<-trk %>% filter(id=="1") %>% direction_rel
head(temp)
#' Or, we can add a columns to each nested column of data using purrr::map
trk<-trk %>% nest(-id) %>%
mutate(dir_abs = map(data, direction_abs,full_circle=TRUE, zero="N"),
dir_rel = map(data, direction_rel),
sl = map(data, step_lengths),
nsd_=map(data, nsd))%>%unnest()
trk
#' Now, calculate month, year, hour, week of each observation and append these to the dataset
#' Unlike the movement charactersitics, these calculations can be done all at once,
#' since they do not utilize successive observations (like step lengths and turn angles do).
trk<-trk%>%
mutate(
week=week(t_),
month = month(t_, label=TRUE),
year=year(t_),
hour = hour(t_)
)
#' Now, we need to again tell R that this is a track (rather
#' than just a data frame)
class(trk)
class(trk)<-trk.class
#' Lets take a look at what we created
trk
a <-trk$sl
b <-trk$dist
c <-a-b
head(c)
max(c, na.rm=TRUE)
trk<-trk%>% mutate(difdist=(trk$sl-trk$dist))
tapply(abs(trk$difdist),list(trk$id),na.rm=TRUE,max)
tapply(trk$difdist,list(trk$id),na.rm=TRUE,mean)
tapply(trk$difdist,list(trk$project_region,trk$id),na.rm=TRUE,max)
proj_dif=tapply(abs(trk$difdist),list(trk$id),na.rm=TRUE,max)
plot(proj_dif)
xyplot(difdist~id, data = trk, groups = id)
xyplot(difdist~id, data = trk, groups = project_region, auto.key=list(columns = 2))
xyplot(difdist~project_region, data = trk, groups = project_region, auto.key=list(columns = 2))
table(trk1$id)
ft=ftable(AFW1$id)
f<-as.data.frame.table(ft)
f$Var1 <- NULL
f$Var2 <- NULL
subset(f,Freq>0)
ftable(tapply(Frequency,list(ScenarioC,BiodUNEP,Biome),sum))
(difdist<-trk %>% nest(-id,-project_region) %>% mutate(sr = map(data, summarize_sampling_rate, time_unit = "hour")) %>%
select(id,sex,age,weight,status,sr) %>% unnest)
nesttrk<-trk%>%nest(-id,-project_region)%>% max(trk$difdist, na.rm=TRUE)
nesttrk #%>% print(n = 117)
########################################################################################################################################
####################### Jaguar Dataframe with UTMs ########################################################################
head(AFW1);head(AFW2),head(Caatinga);head(Cerrado1);head(Cerrado2);head(CRica);head(Pantanal);head(Drych1);str(Drych2);head(Hch);
head(FPy);head(Iguazu1);head(Iguazu2);head(Mamiraua);head(iopPA);head(Lacandona);head(MexEast);head(Sonora)
jaguar=rbind(AFW1,AFW2,Caatinga,Cerrado1,Cerrado2,CRica,Pantanal,Drych1,Drych2,Hch,FPy,Iguazu1,Iguazu2,Mamiraua,iopPA,Lacandona,MexEast,Sonora)
#head(jaguar); str(jaguar)
#jaguar_df <- jaguar
#write.table(jaguar,file="c:/RWorkDir/jaguardatapaper/jaguar.txt",row.names = F,quote=F,col.names=T,sep="\t")
#write.table(jaguar_df,file="c:/RWorkDir/jaguardatapaper/jaguar_df.txt",row.names = F,quote=F,col.names=T,sep="\t")
#jaguar_df <- read.delim(file="c:/RWorkDir/jaguardatapaper/jaguar_df.txt")
##############################################################################################################
#' ## Creating a track in amt (commom to both RSF and SSF)
##############################################################################################################
### (1) ##############################################
### # Atlantic Forest W1
########################################################
#AFW1 <- read.delim(file="c:/RWorkDir/jaguardatapaper/AFW1.txt") # <-- if use read table it will be required to adjust variables again!!!
#age <- as.numeric(levels(AFW1$age))[AFW1$age]
#weight <- as.numeric(levels(AFW1$weight))[AFW1$weight]
#AFW1$id <- as.factor(AFW1$individual.local.identifier..ID.)
#AFW1$age <- age ### converted to number
#AFW1$weight <- weight ### converted to number
#AFW1 <- read.delim(file="c:/RWorkDir/jaguardatapaper/AFW1.txt")
#AFW1$timestamp.posix <- as.character(AFW1$timestamp.posix)
#AFW1$timestamp.posix<- as.POSIXct(AFW1$timestamp.posix, format ="%Y-%m-%d %H:%M:%S", tz = 'GMT')
#AFW1$date <- as.character(AFW1$date)
#AFW1$date <- as.POSIXct(AFW1$date, format ="%Y-%m-%d %H:%M:%S", tz = 'GMT')
head(AFW1);str(AFW1)
#" Checking order again
jaguar_ord <- AFW1[order(AFW1$id,AFW1$date),]
all.equal(AFW1,jaguar_ord)
trk <- mk_track(AFW1,.x=utm_x, .y=utm_y, .t=date, id=id,Event_ID=Event_ID, sex=sex, age=age, weight=weight,
status=status, period=period,long_x=long_x, lat_y=lat_y, crs = CRS("+proj=utm +zone=22K +south +datum=WGS84 +units=m +no_defs")) ### WGS84
trk <- trk %>% arrange(id)
trk
#' ## Movement Characteristics
nesttrk<-trk%>%nest(-id)
nesttrk #%>% print(n = 117)
#' We can add a columns to each nested column of data using purrr::map
trk<-trk %>% nest(-id) %>%
mutate(dir_abs = map(data, direction_abs,full_circle=TRUE, zero="N"),
dir_rel = map(data, direction_rel),
sl = map(data, step_lengths),
nsd_=map(data, nsd))%>%unnest()
trk.class<-class(trk)
trk
#' Calculate month, year, hour, week of each observation and append these to the dataset
#' Unlike the movement charactersitics, these calculations can be done all at once,
#' since they do not utilize successive observations (like step lengths and turn angles do).
trk<-trk%>%
mutate(
week=week(t_),
month = month(t_, label=TRUE),
year=year(t_),
hour = hour(t_)
)
#' Now, we need to again tell R that this is a track (rather than just a data frame)
class(trk)
class(trk)<-trk.class
#' Lets take a look at what we created
trk
AFW1trk <-trk
########################################
### (2) ##############################################
### # Atlantic Forest W2
########################################################
#AFW2 <- read.delim(file="c:/RWorkDir/jaguardatapaper/AFW2.txt") # <-- if use read table it will be required to adjust variables again!!!
#age <- as.numeric(levels(AFW2$age))[AFW2$age]
#weight <- as.numeric(levels(AFW2$weight))[AFW2$weight]
#AFW2$id <- as.factor(AFW2$individual.local.identifier..ID.)
#AFW2$age <- age ### converted to number
#AFW2$weight <- weight ### converted to number
#AFW2 <- read.delim(file="c:/RWorkDir/jaguardatapaper/AFW2.txt")
#AFW2$timestamp.posix <- as.character(AFW2$timestamp.posix)
#AFW2$timestamp.posix<- as.POSIXct(AFW2$timestamp.posix, format ="%Y-%m-%d %H:%M:%S", tz = 'GMT')
#AFW2$date <- as.character(AFW2$date)
#AFW2$date <- as.POSIXct(AFW2$date, format ="%Y-%m-%d %H:%M:%S", tz = 'GMT')
head(AFW2);str(AFW2)
#" Checking order again
jaguar_ord <- AFW2[order(AFW2$id,AFW2$date),]
all.equal(AFW2,jaguar_ord)
trk <- mk_track(AFW2,.x=utm_x, .y=utm_y, .t=date, id=id,Event_ID=Event_ID, sex=sex, age=age, weight=weight,
status=status, period=period,long_x=long_x, lat_y=lat_y, crs = CRS("+proj=utm +zone=22K +south +datum=WGS84 +units=m +no_defs")) ### WGS84
trk <- trk %>% arrange(id)
trk
#' ## Movement Characteristics
nesttrk<-trk%>%nest(-id)
nesttrk #%>% print(n = 117)
#' We can add a columns to each nested column of data using purrr::map
trk<-trk %>% nest(-id) %>%
mutate(dir_abs = map(data, direction_abs,full_circle=TRUE, zero="N"),
dir_rel = map(data, direction_rel),
sl = map(data, step_lengths),
nsd_=map(data, nsd))%>%unnest()
trk.class<-class(trk)
trk
#' Calculate month, year, hour, week of each observation and append these to the dataset
#' Unlike the movement charactersitics, these calculations can be done all at once,
#' since they do not utilize successive observations (like step lengths and turn angles do).
trk<-trk%>%
mutate(
week=week(t_),
month = month(t_, label=TRUE),
year=year(t_),
hour = hour(t_)
)
#' Now, we need to again tell R that this is a track (rather than just a data frame)
class(trk)
class(trk)<-trk.class
#' Lets take a look at what we created
trk
AFW2trk <-trk
#################################################################################
### Atlantic Forest West => AFWtrk ####
AFWtrk=rbind(AFW1trk,AFW2trk); AFWtrk
################################################################################
|
df7255ec9e075ba97634cfefe52a92fc88d9ced8
|
6c584706e6eab645e11357bde8f393013c69e4c9
|
/Análise Preditiva e Data Mining/Aula 2/Aula 2.R
|
5d5dc5207c1e9b9588dc948a3c4b8b093c7699c5
|
[] |
no_license
|
charlesartbr/fiap-mba-big-data-data-science
|
cce1b64c301187a049cd9929d5fafd7e6985503e
|
de4d8372a7ce26ac8e4556925416e5c9e1932020
|
refs/heads/master
| 2022-09-05T00:33:21.367281
| 2022-08-09T14:01:28
| 2022-08-09T14:01:28
| 185,289,505
| 0
| 1
| null | 2021-01-15T16:47:00
| 2019-05-07T00:10:35
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 1,143
|
r
|
Aula 2.R
|
arquivo <- read.csv(file = 'Bike_Sharing.csv')
View(arquivo)
attach(arquivo)
modelo1 <- lm(cnt ~ temp)
summary(modelo1)
plot(cnt~temp)
abline(modelo1)
fitted(modelo1)
residuals(modelo1)
modelo2 <- lm(cnt ~ temp + atemp)
summary(modelo2)
modelo3 <- lm(cnt ~ temp * atemp)
summary(modelo3)
#Variáveis preditoras qualitativas
# criar variáveis dicotômicas
season1 = season
season1 = ifelse(season==1,"1","0")
season2 = season
season2 = ifelse(season==2,"1","0")
season3 = season
season3 = ifelse(season==3,"1","0")
season4 = season
season4 = ifelse(season==4,"1","0")
weathersit1 = weathersit
weathersit1 = ifelse(weathersit==1,"1","0")
weathersit2 = weathersit
weathersit2 = ifelse(weathersit==2,"1","0")
weathersit3 = weathersit
weathersit3 = ifelse(weathersit==3,"1","0")
modelo2 <- lm(cnt ~ temp + season2 + season3 + season4 + mnth + weathersit1 + weathersit2)
summary(modelo2)
fitted(modelo2)
residuals(modelo2)
rstandard(modelo2)
modelo3 <- lm(cnt ~ temp * atemp +
season2 + season3 + season4 +
hum + windspeed +
weathersit2 + weathersit3)
summary(modelo3)
|
5432922943418608973cd2a11bb980287b33ab51
|
e58cb0a3ce95401501f0f0441a492529632b41f7
|
/processes/trinity/createTrinityJob.R
|
dbc58605bcff316c602fb42cb8d887b529d191e1
|
[] |
no_license
|
larsgr/GRewdPipeline
|
ea451c75b5f4d91d4f92a941e3b2f3461566ee98
|
77a7d5b17373a2139d34327942fcec300b62fb40
|
refs/heads/master
| 2020-12-29T02:19:46.034273
| 2019-01-15T10:24:33
| 2019-01-15T10:24:33
| 30,870,561
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,004
|
r
|
createTrinityJob.R
|
source("R/fillTemplateFile.R")
source("processes/SLURMscript/createSLURMscript.R")
createTrinityJob <- function( leftReadFiles, rightReadFiles, outDir, trinityOutputName,
jobName = "trinity", seqType="fq", SS_lib_type="RF",
CPU=10, max_memory="calculate"){
# stop if outDir already exists
if(file.exists(outDir)){
stop(paste0("Could not create job because directory ",outDir," already exists!"))
}
dir.create(outDir) # create output directory
if(max_memory=="calculate"){
# require 4x fq file size
memUsageGB <- max(2,4*sum(file.info(leftReadFiles)$size) %/% 1000000000)
max_memory <- paste0(memUsageGB,"G")
}
script <- paste( sep="\n",
"module load trinity",
"module load bowtie",
"module load samtools",
"",
paste(
"Trinity",
"--seqType", seqType,
"--SS_lib_type", SS_lib_type,
"--left", paste(leftReadFiles,collapse=','),
"--right", paste(rightReadFiles,collapse=','),
"--CPU", CPU,
"--max_memory", max_memory,
"--full_cleanup" ),
"",
paste0('
if [ -f trinity_out_dir.Trinity.fasta ];
then
echo "CMD: mv trinity_out_dir.Trinity.fasta ',trinityOutputName,'"
mv trinity_out_dir.Trinity.fasta ',trinityOutputName,'
else
echo "Trinity assembly file does not exists. Check log for errors and try again"
echo "[$(date +"%Y-%m-%d %H:%M:%S")] Job finished with errors" >> trinity.$SLURM_JOB_ID.started
mv ',jobName,'.$SLURM_JOB_ID.started ',jobName,'.$SLURM_JOB_ID.failed
exit 1
fi')
)
createSLURMscript(script = script,workdir = normalizePath(outDir),jobName = jobName,
ntasks = CPU, partition="hugemem", mem=max_memory)
}
|
95b80ac3936b926d0ef7a7af09dfeeb8bedb8b85
|
9284bd4866430a33c9fc5003b76de7ce4858361d
|
/man/geo_fda.Rd
|
d4fc74d9a0351a702dc5441994e69c90e9e72221
|
[
"MIT"
] |
permissive
|
gilberto-sassi/geoFourierFDA
|
dd0f8b27120708acf8689c2f5b00c71f1f378599
|
07cffa1366d82636add7fb7d02a49a5f331147fe
|
refs/heads/main
| 2023-03-02T18:33:23.976846
| 2021-02-10T21:46:41
| 2021-02-10T21:46:41
| 276,725,569
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,149
|
rd
|
geo_fda.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geo_fda.R
\name{geo_fda}
\alias{geo_fda}
\title{Geostatistical estimates for function-valued data.}
\usage{
geo_fda(
m_data,
m_coord,
new_coord,
m,
n_quad = 20,
t = seq(from = -pi, to = pi, length.out = 1000)
)
}
\arguments{
\item{m_data}{a matrix where each column is a time series in a location}
\item{m_coord}{a matrix with coordinates (first column is latitude and
second column longitude)}
\item{new_coord}{a vector with a new coordinate (first column is latitude
and second longitude)}
\item{m}{order of the Fourier polynomial}
\item{n_quad}{a scalar with number of quadrature points. Default value
\code{nquad = 20}.}
\item{t}{a vector with points to evaluate from \eqn{-\pi} to \eqn{\pi}.
Default \code{t = seq(from = -pi,to = pi,length.out = 1e+3)}.}
}
\value{
a list with three components
\describe{
\item{\code{curve}}{estimate curve at \code{t} points}
\item{\code{lambda}}{weights in the linear combination in the functional
kriging}
\item{\code{x}}{points where the curve was evaluated}
}
}
\description{
\code{geo_fda} finds the ordinary kriging estimate for sptial functional
data using the model proposed by Giraldo(2011).
}
\details{
\code{geo_fda} is similar to model proposed by
\cite{giraldo2011ordinary}. The mais difference is we have used
gauss-legendre quadrature to estimate the trace-variogram. Using
gauss-legendre qudrature gives estimates with smaller mean square error
than the trace-variogram estimates from Giraldo(2011).
For now, we have used Fourier's series to smooth the time series.
}
\examples{
data(canada)
y_hat <- geo_fda(canada$m_data, canada$m_coord, canada$ThePas_coord,
n_quad = 2)
}
\references{
Giraldo, R., Delicado, P., & Mateu, J. (2011). Ordinary kriging
for function-valued spatial data. \emph{Environmental and Ecological
Statistics}, 18(3), 411-426.
Giraldo, R., Mateu, J., & Delicado, P. (2012). geofd: an \code{R} package
for function-valued geostatistical prediction.
\emph{Revista Colombiana de Estadística}, 35(3), 385-407.
}
\seealso{
\code{\link{coef_fourier}}, \code{\link{fourier_b}}
}
|
f9c76ff4d620cc789d4b2443fcc989629635a0a8
|
c20ee8390240c178add9f49b497bc3de9a660bba
|
/R/produce.two.dimensional.noise.R
|
51ac484e8b87861ae6b656aa53fbad2eeb08cc05
|
[] |
no_license
|
meteoswiss-mdr/NowPrecip
|
e3c90040f5e5545bfc1808fc2569aee59042c31b
|
2ee0521a7cd8b7961bb7eed399f745aa6845c22b
|
refs/heads/master
| 2021-01-22T02:49:03.171561
| 2017-10-16T09:26:06
| 2017-10-16T09:26:06
| 81,074,161
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,612
|
r
|
produce.two.dimensional.noise.R
|
produce.two.dimensional.noise <-
function( z
,nr.frames = 60
,win.type = 'flat.hanning'
,do.set.seed = FALSE
,on.screen = FALSE
) {
#Produces a 2-dimensional correlated noise
#
#Examples:
#z=Xln0[,,nr.obs]
#At = produce.two.dimensional.noise(z=Xln0[,,nr.obs])
# make sure non-rainy pixels are set to zero
min.value = min(z)
orig.z = z
z = z - min.value
# store original field size
orig.dim.x = dim(z)[1]
orig.dim.y = dim(z)[2]
orig.dm.size = c(orig.dim.x,orig.dim.y)
# buffer the field with zeros to obtain a squared domain
dim.x = max(orig.dm.size)
dim.y = dim.x
dm.size = c(dim.x,dim.y)
zs = array(0,dim=dm.size)
if(orig.dm.size[1] == dim.x){
idx.buffer = round((dim.y - orig.dim.y)/2)
zs[,idx.buffer:(idx.buffer+orig.dim.y-1)] = z
}else{
idx.buffer = round((dim.x - orig.dim.x)/2)
zs[idx.buffer:(idx.buffer+orig.dim.x-1),] = z
}
#Use the last observation as filter
if(win.type == 'none'){
mask = array(1,dim=dm.size)
}else{
mask = build.two.dimensional.window(wsize=dm.size,wtype=win.type)
}
mfilter = fft(zs*mask)
mfilter = Mod(mfilter)
#Produce normal noise array
if(do.set.seed==TRUE) set.seed(42)
Ztf = array(rnorm(dm.size[1]*dm.size[2]*nr.frames) ,dim=c(dm.size[1],dm.size[2],nr.frames))
#Power-filter images
result = array(dim=c(orig.dim.x,orig.dim.y,nr.frames))
for(m in 1:nr.frames) {
z.ftp = fft(Ztf[,,m])
#Multiply noise by the filter to build correlation
z.ftp.fl = z.ftp * mfilter
z.iftp = fft(z.ftp.fl ,inverse=T)
#If everything is correct all imaginary parts should be 0
z.cor = Re(z.iftp)
#Crop a square equal to the original size
#If smaller than that just stick the z.cor at the corner of a
a = array(dim=c(orig.dim.x,orig.dim.y))
if(orig.dim.y > dim(z.cor)[[1]]) {
a[1:size,1:size] = z.cor
} else {
difx = dim.x-orig.dim.x
dify = dim.y-orig.dim.y
a = z.cor[(difx/2+1):(dim.x-difx/2),(dify/2+1):(dim.y-dify/2)]
}
#Bring everything back to a N(0,1)
a = (a-mean(a))/sd(a)
if(on.screen==TRUE) {
x11()
image(a,breaks=seq(-5,5,0.1), col = rainbow(100))
}
result[,,m] = a
}
return(result)
}
build.two.dimensional.window <-
function( wsize, wtype = 'flat.hanning'
) {
two.dimensional.window <- tryCatch(
{
# Asymmetric window
# dim 1
switch(wtype,
hanning={
w1d1 = 0.5 - 0.5*cos(2*pi*seq(wsize[1])/(wsize[1] - 1))
},
flat.hanning={
T = wsize[1]/4
W = wsize[1]/2
B = seq(-W,W,length.out=2*W)
R = abs(B)-T
R[R<0]=0.
A = 0.5*(1.0 + cos(pi*R/T))
A[abs(B)>(2*T)]=0.0
w1d1 = A
},
stop('Unsupported window type.')
)
# dim 2
switch(wtype,
hanning={
w1d2 = 0.5 - 0.5*cos(2*pi*seq(wsize[2])/(wsize[2] - 1))
},
flat.hanning={
T = wsize[2]/4
W = wsize[2]/2
B = seq(-W,W,length.out=2*W)
R = abs(B)-T
R[R<0]=0.
A = 0.5*(1.0 + cos(pi*R/T))
A[abs(B)>(2*T)]=0.0
w1d2 = A
},
stop('Unsupported window type.')
)
# 2d window
two.dimensional.window = sqrt(outer(w1d1,w1d2))
},
error=function(cond) {
# Symmetric window
switch(wtype,
hanning={
w1d = 0.5 - 0.5*cos(2*pi*seq(wsize[1])/(wsize[1] - 1))
},
flat.hanning={
T = wsize[1]/4
W = wsize[1]/2
B = seq(-W,W,length.out=2*W)
R = abs(B)-T
R[R<0]=0.
A = 0.5*(1.0 + cos(pi*R/T))
A[abs(B)>(2*T)]=0.0
w1d = A
},
stop('Unknown window type.')
)
# 2d window
two.dimensional.window = sqrt(outer(w1d,w1d))
return(two.dimensional.window)
}
)
return(two.dimensional.window)
}
|
8e7409b9686194d60c74dff14d9b852f67b41f7d
|
9ae6b5fa9a3e4ff67901b81702ae1cdf3c6c891b
|
/inst/dashboard/ui.R
|
79b1824c80abf30289f4fbc145d95b3205fc080a
|
[] |
no_license
|
dimagor/socraticswirlInstructor
|
ae314942c2882e0d881c44c211d8d000346d228b
|
b9ebb96d6ed26c00b534627d83353b0bfaf91a89
|
refs/heads/master
| 2021-01-18T22:32:54.656665
| 2016-07-14T21:31:13
| 2016-07-14T21:31:13
| 33,081,253
| 6
| 5
| null | 2015-12-16T21:04:36
| 2015-03-29T17:02:54
|
R
|
UTF-8
|
R
| false
| false
| 4,666
|
r
|
ui.R
|
library(shiny)
library(shinydashboard)
header <- dashboardHeader(title = "SocraticSwirl",
dropdownMenuOutput("progressMenu"))
sidebar <- dashboardSidebar(
uiOutput("usersessions"),
hr(),
sidebarMenu(
menuItem("Exercise Dashboard", tabName = "exercise_tab", icon = icon("dashboard")),
menuItem("Lesson Overview", tabName = "overview_tab", icon = icon("list")),
menuItem("Submitted Questions", tabName = "questions_tab", icon = icon("question-circle")),
menuItem("Student Dashboard",tabName = "success_tab", icon=icon("list")),
menuItem("Response Details",tabName = "details_tab", icon = icon("list"))
),
p(), #Fix for better separation
hr(),
box(style = "color: black;",
width = NULL, title = "Selections", collapsible = FALSE,
uiOutput("selectCourse"),
uiOutput("selectLesson"),
uiOutput("selectPrecept")
),
p(), #Fix for better separation
box(style = "color: black;",
width = NULL, title = "Data Update", collapsible = TRUE,
actionButton("refresh", "Refresh Now"),
uiOutput("timeSinceLastUpdate")
)
)
body <- dashboardBody(
tabItems(
tabItem(tabName = "exercise_tab",
fluidRow(
# Left Column
column(width = 6,
# Exercise Selector & Progress
box(collapsible = FALSE, width = NULL, title = "Select Exercise",
uiOutput("selectExercise"),
uiOutput("attemptedBar", style = "list-style-type: none;"),
uiOutput("completedBar", style = "list-style-type: none;")),
# Plots
tabBox(width = NULL,
tabPanel(title = "Attempt Frequency",
plotOutput("plotFreqAttempts")),
tabPanel(title = "Progress Tracking",
plotOutput("plotProgress"))
)
),
# Right Column
column(width = 6,
# Exercise Info
tabBox(width = NULL,
tabPanel(title = "Exercise Prompt",
uiOutput("exerciseQuestion")),
tabPanel(title = "Correct Answer",
verbatimTextOutput("exerciseAnswer"), collapsible = TRUE)
),
# Answer Table
tabBox(width = NULL,
tabPanel(title = "Incorrect Answers",
# selectInput("incorrectSort", label = "Sort Column:", width = "50%",
# choices = c("updatedAt", "command", "isError", "errorMsg"),
# selected = "updatedAt"),
# checkboxInput("incorrectSortDescending", label = "Descending", value = TRUE),
dataTableOutput("incorrectAnswers")),
tabPanel(title = "Common Errors",
dataTableOutput("commonErrors")
)
)
)
)
),
tabItem(tabName = "overview_tab",
box(collapsible = TRUE, width = NULL,
plotOutput("overviewGraph"))
),
tabItem(tabName = "questions_tab",
box(width = NULL,
dataTableOutput("questionsasked")
)
),
tabItem(tabName = "details_tab",
tabBox(width = NULL
,tabPanel(title = "Select Exercise"
,uiOutput("selectExercise2")
,dataTableOutput("exerciseanswers") )
,tabPanel(title = "Select Student"
,uiOutput("selectStudent")
,dataTableOutput("studentanswers") )
)),
tabItem(tabName = "success_tab",
tabBox(width = NULL
,tabPanel(title = "Unique Attempt"
,dataTableOutput("uniqueAttemptTab") )
,tabPanel(title = "Unique Success"
,dataTableOutput("uniqueSuccessTab") )
,tabPanel(title = "Success Ratio"
,dataTableOutput("uniqueRatioTab") )
,tabPanel(title = "Incomplete Attempt"
,dataTableOutput("unfinishedTab") )
,tabPanel(title = "Unique Skip"
,dataTableOutput("uniqueSkipTab") )
,tabPanel(title = "Total Time"
,dataTableOutput("timerTab") )
,tabPanel(title = "Attempt Counts"
,dataTableOutput("attemptTab") )
,tabPanel(title = "Success Counts"
,dataTableOutput("successTab") )
,tabPanel(title = "Overall Success Ratio"
,dataTableOutput("ratioTab") )
))
)
)
dashboardPage(header, sidebar, body, skin = "blue")
|
9b37b8b8cc9803a22eec6024e84e402d844e7d9e
|
3c5d52b699e6645cbb086b4f45519f898a4b2f64
|
/R/read.fh.R
|
67fda3994fb344ab659e1131ae5701bb708aa221
|
[] |
no_license
|
AndyBunn/dplR
|
249427c0be6c68c80c267ad4392a03a331bb056d
|
8bb524ae6a024affea2c9ec982b0972fc5175280
|
refs/heads/master
| 2023-07-06T04:02:45.073034
| 2023-04-12T21:41:31
| 2023-04-12T21:41:31
| 195,873,071
| 31
| 12
| null | 2023-06-23T14:33:40
| 2019-07-08T19:21:39
|
R
|
UTF-8
|
R
| false
| false
| 13,582
|
r
|
read.fh.R
|
read.fh <- function(fname, BC_correction = FALSE) {
inp <- readLines(fname, ok=TRUE, warn=FALSE)
## Get start and end positions of headers and data blocks
header.begin <- grep("^HEADER:$", inp)
header.end <- grep("^DATA:(Tree|Single)$", inp)
n <- length(header.end)
if(n == 0) {
stop('file has no data in "Tree" or "Single" formats')
}
## For each data block in one of the supported formats, find the
## corresponding header block
header.taken <- logical(length(header.begin))
for (i in seq_len(n)) {
n.preceding <- sum(header.begin < header.end[i] - 1)
if (n.preceding == 0 || header.taken[n.preceding]) {
stop("invalid file: HEADER and DATA don't match")
} else {
header.taken[n.preceding] <- TRUE
}
}
if (!all(header.taken)) {
warning("more HEADER blocks than DATA blocks in supported formats")
}
## For each data block in one of the supported formats, find the
## following header block (or end of file)
data.end <- numeric(n)
for (i in seq_len(n-1)) {
tmp <- header.begin[header.begin > header.end[i]]
data.end[i] <- tmp[1]
}
tmp <- header.begin[header.begin > header.end[n]]
if (length(tmp) > 0) {
data.end[n] <- tmp[1]
} else {
data.end[n] <- length(inp) + 1
}
## Forget headers that are not used by the data blocks
header.begin <- header.begin[header.taken]
## Get essential metadata from headers
keycodes <- character(n)
lengths <- numeric(n)
end.years <- numeric(n)
start.years <- numeric(n)
multipliers <- rep(1, n)
divisors <- rep(100, n)
site.code <- rep(NA_character_, n)
tree.vec <- rep(NA_real_, n)
core.vec <- rep(NA_real_, n)
radius.vec <- rep(NA_real_, n)
stemdisk.vec <- rep(NA_real_, n)
pith.offset <- rep(NA_real_, n)
for (i in seq_len(n)) {
this.header <- inp[(header.begin[i]+1):(header.end[i]-1)]
## get keycode (= series id)
this.keycode <- sub("KeyCode=", "", fixed=TRUE,
x=grep("^KeyCode=|Keycode=", this.header, value=TRUE))
if (length(this.keycode) != 1) {
string2 <- gettext('number of "KeyCode" lines is not 1',
domain="R-dplR")
stop(gettextf("in series %s: ", as.character(i), domain="R-dplR"),
string2, domain=NA)
} else {
keycodes[i] <- this.keycode
}
## get length
this.length <- sub("Length=", "", fixed=TRUE,
x=grep("^Length=", this.header, value=TRUE))
if (length(this.length) != 1) {
string2 <- gettext('number of "Length" lines is not 1',
domain="R-dplR")
stop(gettextf("in series %s: ", keycodes[i], domain="R-dplR"),
string2, domain=NA)
} else {
lengths[i] <- as.numeric(this.length)
}
## get end year
this.end.year <- sub("DateEnd=", "", fixed=TRUE,
x=grep("^DateEnd=", this.header, value=TRUE))
if (length(this.end.year) != 1) {
string2 <- gettext('number of "DateEnd" lines is not 1',
domain="R-dplR")
stop(gettextf("in series %s: ", keycodes[i], domain="R-dplR"),
string2, domain=NA)
} else {
end.years[i] <- as.numeric(this.end.year)
}
## get start year
this.start.year <- sub("DateBegin=", "", fixed=TRUE,
x=grep("^DateBegin=", this.header, value=TRUE))
if (length(this.start.year) != 1) {
if(length(this.end.year) == 1) {
start.years[i] <- end.years[i]- lengths[i] +1
} else {
string2 <- gettext('number of "DateBegin" lines is not 1',
domain="R-dplR")
stop(gettextf("in series %s: ", keycodes[i], domain="R-dplR"),
string2, domain=NA) }
} else {
start.years[i] <- as.numeric(this.start.year)
# check for BC - AD dates and add not existing year 0
if (start.years[i] <0 && end.years[i]>0 && BC_correction == TRUE) {
start.years[i] <- start.years[i]+1 }
}
## correct BC dates to +1, because rwl-format uses non-existing year 0 and this is not necessary in the FH-format
if (start.years[i]<0 && end.years[i]<0 && BC_correction == TRUE) {
start.years[i] <- start.years[i] + 1
end.years[i] <- end.years[i] + 1
}
## get unit (by default, divide by 100)
this.unit <- sub("Unit=", "", fixed=TRUE,
x=grep("^Unit=", this.header, value=TRUE))
if (length(this.unit) == 1) {
this.unit <- sub("mm", "", this.unit, fixed=TRUE)
div.loc <- regexpr("/", this.unit, fixed=TRUE)
if (div.loc > 0) {
multipliers[i] <- as.numeric(substr(this.unit, 1, div.loc-1))
divisors[i] <- as.numeric(substr(this.unit, div.loc+1,
nchar(this.unit)))
} else {
multipliers[i] <- as.numeric(this.unit)
divisors[i] <- 1
}
if (is.na(multipliers[i]) || is.na(divisors[i])) {
string2 <- gettext('cannot interpret "Unit" line',
domain="R-dplR")
stop(gettextf("in series %s: ", keycodes[i], domain="R-dplR"),
string2, domain=NA)
}
} else if (length(this.unit) > 1) {
string2 <- gettext('number of "Unit" lines is > 1',
domain="R-dplR")
stop(gettextf("in series %s: ", keycodes[i], domain="R-dplR"),
string2, domain=NA)
}
## get site code
this.site <- sub("SiteCode=", "", fixed=TRUE,
x=grep("^SiteCode=", this.header, value=TRUE))
if (length(this.site) == 1) {
site.code[i] <- this.site
}
## get tree number
this.tree <- sub("TreeNo=", "", fixed=TRUE,
x=grep("^TreeNo=", this.header, value=TRUE))
if (length(this.tree) == 1) {
tmp <- suppressWarnings(as.numeric(this.tree))
if (identical(tmp, round(tmp))) {
tree.vec[i] <- tmp
}
}
## get core number
this.core <- sub("CoreNo=", "", fixed=TRUE,
x=grep("^CoreNo=", this.header, value=TRUE))
if (length(this.core) == 1) {
tmp <- suppressWarnings(as.numeric(this.core))
if (identical(tmp, round(tmp))) {
core.vec[i] <- tmp
}
}
## get radius number
this.radius <- sub("RadiusNo=", "", fixed=TRUE,
x=grep("^RadiusNo=", this.header, value=TRUE))
if (length(this.radius) == 1) {
tmp <- suppressWarnings(as.numeric(this.radius))
if (identical(tmp, round(tmp))) {
radius.vec[i] <- tmp
}
}
## get stem disk number
this.stemdisk <- sub("StemDiskNo=", "", fixed=TRUE,
x=grep("^StemDiskNo=", this.header, value=TRUE))
if (length(this.stemdisk) == 1) {
tmp <- suppressWarnings(as.numeric(this.stemdisk))
if (identical(tmp, round(tmp))) {
stemdisk.vec[i] <- tmp
}
}
## get pith offset (missing rings before start of series)
this.missing <-
sub("MissingRingsBefore=", "", fixed=TRUE,
x=grep("^MissingRingsBefore=", this.header, value=TRUE))
if (length(this.missing) == 1) {
tmp <- suppressWarnings(as.numeric(this.missing))
if (identical(tmp, round(tmp)) && tmp >= 0) {
pith.offset[i] <- tmp + 1
}
}
}
## calculate time span for data.frame
min.year <- min(start.years)
r.off <- min.year - 1
max.year <- max(end.years)
span <- min.year:max.year
dendro.matrix <- matrix(NA, ncol = n, nrow = length(span))
colnames(dendro.matrix) <- keycodes
rownames(dendro.matrix) <- span
## get rid of comments (if any)
strip.comment <- function(x) {
strsplit(x, ";")[[1]][1]
}
for (i in seq_len(n)) { # loop through data blocks
portion.start <- header.end[i] + 1
portion.end <- data.end[i] - 1
n.expected <- end.years[i] - start.years[i] + 1
if (portion.end < portion.start) {
stop(gettextf("in series %s: ", keycodes[i], domain="R-dplR"),
gettextf("too few values (expected %d, got %d)",
n.expected, 0, domain="R-dplR"), domain=NA)
}
portion <- inp[portion.start:portion.end]
if (nchar(portion[1]) < 60 ||
grepl(";", portion[1], fixed=TRUE)) { # data is in column format
data <- as.numeric(vapply(portion, strip.comment, "foo"))
} else { # data is in block format
data <- numeric(length(portion) * 10)
for (j in seq_along(portion)) {
row.fwf <- substring(portion[j],
seq(from=1, by=6, length=10),
seq(from=6, by=6, length=10))
row.numeric <- as.numeric(row.fwf)
data[(j * 10 - 9):(j * 10)] <- row.numeric
}
## Remove trailing zeros
zeros <- which(data == 0)
if (length(zeros) > 0) {
nonzeros <- setdiff(zeros[1]:length(data), zeros)
if (length(nonzeros) > 0) {
zeros <- zeros[zeros > max(nonzeros)]
if (length(zeros) > 0) {
data <- data[-zeros]
}
} else {
data <- data[-zeros]
}
}
}
data <- data * multipliers[i] / divisors[i]
n.true <- length(data)
if (n.true == n.expected) {
## write data into matrix
dendro.matrix[(start.years[i]-r.off):(end.years[i]-r.off), i] <-
data
} else if (n.true < n.expected) {
stop(gettextf("in series %s: ", keycodes[i], domain="R-dplR"),
gettextf("too few values (expected %d, got %d)",
n.expected, n.true, domain="R-dplR"), domain=NA)
} else if (all(is.na(data[(n.expected+1):n.true]))) {
dendro.matrix[(start.years[i]-r.off):(end.years[i]-r.off), i] <-
data[seq_len(n.expected)]
} else {
stop(gettextf("in series %s: ", keycodes[i], domain="R-dplR"),
gettextf("too many values (expected %d, got %d)",
n.expected, n.true, domain="R-dplR"), domain=NA)
}
}
cat(sprintf(ngettext(n,
"There is %d series\n",
"There are %d series\n",
domain="R-dplR"),
n))
start.years.char <- format(start.years, scientific=FALSE, trim=TRUE)
end.years.char <- format(end.years, scientific=FALSE, trim=TRUE)
seq.series.char <- format(seq_len(n), scientific=FALSE, trim=TRUE)
cat(paste0(format(seq.series.char, width=5), "\t",
format(keycodes, width=8), "\t",
format(start.years.char, width=5, justify="right"), "\t",
format(end.years.char, width=5, justify="right"), "\t",
format(multipliers/divisors,
scientific=FALSE, drop0trailing=TRUE),"\n"), sep="")
rwl <- as.data.frame(dendro.matrix) # return data.frame
## Create data.frame for site, tree, core, radius, stem disk IDs
all.have.treeID <- !any(is.na(tree.vec))
na.core <- is.na(core.vec)
all.have.coreID <- !any(na.core)
## Try to find implicit core IDs (tree ID occurs once)
if (all.have.treeID && !all.have.coreID) {
foo <- table(tree.vec)
measured.once <- as.numeric(names(foo)[foo == 1])
core.vec[na.core & tree.vec %in% measured.once] <- 1
all.have.coreID <- !any(is.na(core.vec))
}
## Only include "ids" data.frame if all tree and core IDs are known
if (all.have.treeID && all.have.coreID) {
unique.sites <- unique(site.code)
n.unique <- length(unique.sites)
if (n.unique > 1) {
site.vec <- match(site.code, unique.sites)
tree.vec2 <- complex(n, NA_real_, NA_real_)
total.dupl <- 0
for (i in seq_len(n.unique)) {
idx <- which(site.vec == i)
ut <- unique(tree.vec[idx])
for (this.tree in ut) {
idx2 <- idx[tree.vec[idx] == this.tree]
if (this.tree %in% tree.vec2) {
tree.vec2[idx2] <- 1i * (total.dupl + 1)
total.dupl <- total.dupl + 1
} else {
tree.vec2[idx2] <- this.tree
}
}
}
if (total.dupl > 0) {
dont.change <- Im(tree.vec2) == 0
existing <- unique(Re(tree.vec2[dont.change]))
max.existing <- max(existing)
if (max.existing < 1) {
free.ids <- 1:total.dupl
} else {
free.ids <- which(!(1:max.existing %in% existing))
free.ids <-
c(free.ids,
seq(from=max.existing+1, by=1,
length.out=max(0, total.dupl-length(free.ids))))
}
tree.vec2[!dont.change] <-
free.ids[Im(tree.vec2[!dont.change])]
}
tree.vec2 <- Re(tree.vec2)
adf <- data.frame(tree=tree.vec2, core=core.vec, site=site.vec,
row.names=keycodes)
} else {
adf <- data.frame(tree=tree.vec, core=core.vec, row.names=keycodes)
}
if (any(!is.na(radius.vec))) {
adf <- cbind(adf, radius=radius.vec)
}
if (any(!is.na(stemdisk.vec))) {
adf <- cbind(adf, stemDisk=stemdisk.vec)
}
attr(rwl, "ids") <- adf
cat(gettext('Tree and core IDs were found. See attribute "ids".\n',
domain="R-dplR"))
}
## Include pith offset data.frame if some pith offsets are known
na.po <- is.na(pith.offset)
if (any(!na.po)) {
attr(rwl, "po") <- data.frame(series=keycodes, pith.offset=pith.offset)
if (any(na.po)) {
cat(gettext('Pith offsets were found (some missing values). See attribute "po".\n',
domain="R-dplR"))
} else {
cat(gettext('Pith offsets were found (no missing values). See attribute "po".\n',
domain="R-dplR"))
}
}
class(rwl) <- c("rwl", "data.frame")
rwl
}
|
775636df890424201df004eaef7b7f6b2810ee10
|
4b73241bda37bad4f17fdd53db997c12a59995fd
|
/man/calc_d18Ow.Rd
|
ad6ac373c8ee2e13f7df0cfb21cd07d7e01649d0
|
[] |
no_license
|
cubessil/isoprocessCUBES
|
b6c157376f8bfa14b32fab7c1dd689db1d69444c
|
d9efe3d52b50a2ef96b82c65bc76f2524268a775
|
refs/heads/master
| 2021-06-07T19:58:59.673194
| 2021-05-26T21:21:48
| 2021-05-26T21:21:48
| 133,712,554
| 0
| 1
| null | 2019-08-05T18:29:26
| 2018-05-16T19:15:33
|
R
|
UTF-8
|
R
| false
| true
| 210
|
rd
|
calc_d18Ow.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/custom.functions.R
\name{calc_d18Ow}
\alias{calc_d18Ow}
\title{Calc d18}
\usage{
calc_d18Ow(d18Om, Temp)
}
\description{
Calc d18
}
|
4ae0af12d94314af3c0eb05e23a0d039985594c4
|
cbc8bdd64cf3c0eb4bb3b6767d0fb7f2e26280dd
|
/man/Run_JTree.Rd
|
48269157bc0cd730b86651b00f9f1c2818a82373
|
[] |
no_license
|
cran/treelet
|
6ae35b4c910fb995d9bbdfb0c41b7c7ab441fbd8
|
00c8e56c1910ade023267e10a4bf958c6d7d4242
|
refs/heads/master
| 2021-01-18T14:02:41.134700
| 2015-02-10T00:00:00
| 2015-02-10T00:00:00
| 17,700,589
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,655
|
rd
|
Run_JTree.Rd
|
\name{Run_JTree}
\alias{Run_JTree}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
Treelet basis/hierarchical tree construction
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
Returns information on the simultaneous construction of the treelet orthonormal basis and hierarchical tree, including which nodes were merged at each step and the basis at each specified step of the construction.
}
\usage{
Run_JTree(X, maxlev, whichsave)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{
%% ~~Describe \code{X} here~~
the covariance matrix of the data. For example, if using this function on genetics data to improve estimates of heritability, as in the Crossett et al arXiv paper, this argument will be the estimated additive genetic relationship matrix \eqn{\hat{A}}.
}
\item{maxlev}{
%% ~~Describe \code{maxlev} here~~
the maximum height of the tree. This must be an integer between 1 and \code{nrow(X)}-1.
}
\item{whichsave}{
%% ~~Describe \code{whichsave} here~~
a vector containing the levels of the tree, specified as integers between 1 and \code{maxlev}, for which you want to save the basis functions and the covariance matrix.
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
This function serves as a wrapper for the functions \code{\link{Build_JTree}} and \code{\link{JTree_Basis}}, which build the hierarchical tree and calculate the basis and covariance matrix at each level, respectively.
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
a list with components
\item{basis}{This is a list with \code{maxlev} elements. Only those elements that are specified in the \code{whichsave} argument will be non-null entries in the list. For the non-null entries, this is the orthonormal treelet basis calculated at that level of the tree.}
\item{Zpos}{A matrix of dimension \code{maxlev} x 2. Each row records which two nodes/clusters of the tree were combined at each step in its construction.}
\item{T}{This is a list with \code{maxlev} elements, where each element is a 2x2 Jacobi rotation matrix for each step of the treelet algorithm.}
\item{PCidx}{A matrix of dimension \code{maxlev} x 2, where each row is a permutation of \eqn{(1,2)} indicating which of the two nodes/clusters merged at that step is the sum variable (value of 1) and which is the difference (value of 2).}
\item{all_nodes}{A matrix of dimension \code{maxlev} x \code{nrow(X)} giving node/cluster labels at each step of the treelet algorithm. A label of zero indicates a node/cluster that was merged with another node/cluster and was the difference variable.}
\item{TreeCovs}{This is a list with \code{maxlev} elements. Only those elements that are specified in the \code{whichsave} argument will be non-null entries in the list. For the non-null entries, this is the covariance matrix calculated at that level of the tree. The covariances in this matrix are those between the weights (orthogonal projections onto local basis vectors) in the basis expansion of the data vector.}
}
\references{
%% ~put references to the literature/web site here ~
\href{http://arxiv.org/abs/1208.2253}{arXiv:1208.2253v1 [stat.AP]}
Lee, AB, Nadler, B, Wasserman, L (2008). Treelets - an adaptive multi-scale basis for sparse unordered data. The Annals of Applied Statistics 2: 435-471. \url{http://www.stat.cmu.edu/~annlee/AOAS137.pdf}
}
\author{
Trent Gaugler \email{gauglert@lafayette.edu}
}
%\note{
%%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
\code{\link{Build_JTree}}, \code{\link{JTree_Basis}}, \code{\link{TCS}}
}
\examples{
data(Ahat)
out=Run_JTree(Ahat,49,49)
#The information in out$Zpos[1,] and out$all_nodes[1,]
#both show which two individuals were the first merged
#in the tree. The remaining rows give information
#on subsequent merges in the tree.
basis=out$basis[[49]]
cov=out$TreeCovs[[49]]
temp=basis%*%cov%*%t(basis)
#This is how you can use the basis and cov output
#to reconstruct the estimated relationship matrix.
#See how close temp and the original Ahat are:
Ahat1=round(Ahat,14)
temp1=round(temp,14)
sum(Ahat1!=temp1)
#In this example, we do start seeing discrepancies in the 15th digit and beyond.
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
d0f8ac98d021f5f8fb30ff427cd2f1af9f7ad4c6
|
285c603a0782a4806e14278470640d4dfc085b25
|
/R/vegInd_RGB.R
|
a3afd3b9b3baec808a002e5d2be501b55d3b94a9
|
[] |
no_license
|
SchoenbergA/LEGION
|
6b34ac82132d3e67c5eed274fd688450b0314633
|
b01db2c0fa99602578355e246d7b3bde79a1e187
|
refs/heads/master
| 2023-02-26T15:26:07.459129
| 2021-01-28T15:40:54
| 2021-01-28T15:40:54
| 271,737,635
| 0
| 0
| null | 2020-10-22T09:28:03
| 2020-06-12T07:33:24
|
R
|
UTF-8
|
R
| false
| false
| 5,457
|
r
|
vegInd_RGB.R
|
#' Calculate RGB indices
#' @description computes several indices based on RGB bands
#' @param rgb a RasterStack with RGB bands
#' @param red the band/layer number of band 'red'
#' @param green the band/layer number of band 'green'
#' @param blue the band/layer number of band 'blue'
#' @param indlist comma-separated character combinations of the desired indices. Select from
#' "VVI","VARI","NDTI","RI","CI","BI","SI","HI","TGI","GLI","NGRDI", default=all. See details
#' @return returns a RasterStack with the selected indices
#' @details
#' ## available indices
#' * "VVI" Visible Vegetation Index (1 - abs((red - 30) / (red + 30))) * (1 - abs((green - 50) / (green + 50))) *(1 - abs((blue - 1) / (blue + 1)))
#' * "VARI" Visible Atmospherically Resistant Index (green-red)/(green+red-blue)
#' * "NDTI" Normalized Difference Turbidity Index (red-green)/(red+green)
#' * "RI" Redness index (red**2/(blue*green**3)
#' * "CI" Soil Colour Index (red-green)/(red+green)
#' * "BI" Brightness Index sqrt((red**2+green**2+blue*2)/3)
#' * "SI" Spectra Slope Saturation Index (red-blue)/(red+blue)
#' * "HI" Primary Colours Hue Index (2*red-green-blue)/(green-blue)
#' * "TGI" Triangular Greenness Index (-0.5*(190*(red - green)- 120*(red - blue))
#' * "GLI" Green Leaf Index (2*green-red-blue)/(2*green+red+blue)
#' * "NGRDI" Normalized Green Red Difference Index (green-red)/(green+red)
#' @author Andreas Schönberg
#' @references
#' The IDB Project (2020): Index Database (https://www.indexdatabase.de/)
#' @examples
#' ### load data
#' require(raster)
#' require(LEGION)
#' mspec <- raster::stack(system.file("extdata","lau_mspec.tif",package = "LEGION"))
#' names(mspec)<- c("blue","green","red","nir")
#' ### compute all vegetation indizes
#' x <-LEGION::vegInd_RGB(mspec,3,2,1)
#' plot(x)
#' ### select specific vegetation indices
#' vi <-c("VVI","SI","GLI")
#' y <-LEGION::vegInd_RGB(mspec,3,2,1,indlist=vi)
#' plot(y)
#' @export vegInd_RGB
#' @aliases vegInd_RGB
vegInd_RGB<- function(rgb,red=NULL,green=NULL,blue=NULL,indlist="all"){
### check input
if(any(indlist=="all")){
indlist <-c("VVI","VARI","NDTI","RI","CI","BI","SI","HI","TGI","GLI","NGRDI")
}else{indlist=indlist}
#create notin and check for wrong input
`%notin%` <- Negate(`%in%`)
if(any(indlist %notin% c("VVI","VARI","NDTI","RI","CI","BI","SI","HI","TGI","GLI","NGRDI"))) {
stop("wrong Vegetation Index selected or not supported")
}
#check if raster is an 3 band layer
if (any(is.null(red),is.null(green),is.null(blue))==TRUE){
stop("no bands or less bands defined")
}
red <- rgb[[red]]
green <- rgb[[green]]
blue <- rgb[[blue]]
#calculate selected indizes
indices <- lapply(indlist, function(item){
if (item=="VVI"){
cat(" ",sep = "\n")
cat("### LEGION calculating (Visible Vegetation Index (VVI)) ###",sep = "\n")
VVI <- (1 - abs((red - 30) / (red + 30))) *
(1 - abs((green - 50) / (green + 50))) *
(1 - abs((blue - 1) / (blue + 1)))
names(VVI) <- "VVI"
return(VVI)
} else if (item=="VARI"){
cat(" ",sep = "\n")
cat("### LEGION calculating (Visible Atmospherically Resistant Index (VARI)) ###",sep = "\n")
VARI<-(green-red)/(green+red-blue)
names(VARI) <- "VARI"
return(VARI)
} else if (item=="NDTI"){
cat(" ",sep = "\n")
cat("### LEGION calculating (Normalized difference turbidity index (NDTI)) ###",sep = "\n")
NDTI<-(red-green)/(red+green)
names(NDTI) <- "NDTI"
return(NDTI)
} else if (item=="RI"){
cat(" ",sep = "\n")
cat("### LEGION calculating (Redness index (RI)) ###",sep = "\n")
RI<-red**2/(blue*green**3)
names(RI) <- "RI"
return(RI)
} else if (item=="CI"){
cat(" ",sep = "\n")
cat("### LEGION calculating (Soil Colour Index (CI)) ###",sep = "\n")
CI<-(red-green)/(red+green)
names(CI) <- "CI"
return(CI)
} else if (item=="BI"){
cat(" ",sep = "\n")
cat("### LEGION calculating (Brightness Index (BI)) ###",sep = "\n")
BI<-sqrt((red**2+green**2+blue*2)/3)
names(BI) <- "BI"
return(BI)
} else if (item=="SI"){
cat(" ",sep = "\n")
cat("### LEGION calculating (Spectra Slope Saturation Index (SI)) ###",sep = "\n")
SI<-(red-blue)/(red+blue)
names(SI) <- "SI"
return(SI)
} else if (item=="HI"){
cat(" ",sep = "\n")
cat("### LEGION calculating (Primary colours Hue Index (HI)) ###",sep = "\n")
HI<-(2*red-green-blue)/(green-blue)
names(HI) <- "HI"
return(HI)
} else if (item=="TGI"){
cat(" ",sep = "\n")
cat("### LEGION calculating (Triangular greenness index (TGI)) ###",sep = "\n")
TGI <- -0.5*(190*(red - green)- 120*(red - blue))
names(TGI) <- "TGI"
return(TGI)
} else if (item=="GLI"){
cat(" ",sep = "\n")
cat("### LEGION calculating (Green leaf index (GLI)) ###",sep = "\n")
GLI<-(2*green-red-blue)/(2*green+red+blue)
names(GLI) <- "GLI"
return(GLI)
} else if (item=="NGRDI"){
cat(" ",sep = "\n")
cat("### LEGION calculating (Normalized green red difference index (NGRDI)) ###",sep = "\n")
NGRDI<-(green-red)/(green+red)
names(NGRDI) <- "NGRDI"
return(NGRDI)
}
})
cat(" ",sep = "\n")
cat("###########################",sep = "\n")
cat("### The LEGION is ready ###",sep = "\n")
return(raster::stack(indices))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.