blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
858ad4990f25304cabff7514144b457649b5f39b
|
d72718d1504349b1f664a32ca3fd0ce848ee19a9
|
/man/determine_winner.Rd
|
f4357938a1cb26e9b49a4e075d6206e4f05aeb48
|
[] |
no_license
|
gzbib/montyhall
|
4052f471268cb544b14c2b5dbe433f85c5bb7017
|
b40661d047b9a0237478cf9e2ffdac607bec8e2a
|
refs/heads/master
| 2022-12-16T17:32:27.839007
| 2020-09-19T14:03:45
| 2020-09-19T14:03:45
| 296,879,116
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 731
|
rd
|
determine_winner.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/monty-hall-problem (1).R
\name{determine_winner}
\alias{determine_winner}
\title{Determine Winner}
\usage{
determine_winner(final.pick, game)
}
\arguments{
\item{...}{arguments are (final.pick,game)}
}
\value{
The function prints if its a win or a lost
}
\description{
The function determines if the contestant has won or lost based on the
final pick. The contestant could have either stayed on the initial selection
or switched to a new one.
}
\details{
If the final pick by the contestant is a car door then the contestant wins.
However, if the final pick is a goat door, then the contestant looses.
}
\examples{
determine_winner (final.pick,game)
}
|
bc255f38e004c1825f4aad6fa14c7acb894cdef5
|
0a1d7d499dfd3ecb962e02b5615e8373950c4868
|
/4_coevolution/procedure/barfigs.r
|
4e3bc360b0a348475537c9e9f756722e1e3d23d2
|
[] |
no_license
|
xushifen/GCMP_Australia_Coevolution
|
92221e8bffb12a9ff4746fc56150e4c60066e8f8
|
fba13cd74d717727d1990e5848d9c86695454a90
|
refs/heads/master
| 2021-09-21T23:28:30.220838
| 2018-09-02T21:52:34
| 2018-09-02T21:52:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 314
|
r
|
barfigs.r
|
new <- merge(assocs, sample_data(pruned)[,'host_genus'], by.x='sample', by.y=0, all.x=T, all.y=F)
new$rel <- new$count/new$sample_sum
pl <- ggplot(new, aes(x=sample, y=rel, fill=otu))
pl + geom_bar(color="black", stat="identity", position="stack") + facet_wrap(~ host_genus + geographic_area, scales='free_x')
|
281891ef92e113e1d177d578e70a3283769e8db5
|
f29a6df961f150c0fdf554f14ff0bf0cf116acff
|
/ui.R
|
5fd0bb372a85b1be8273f2f6fb4b68fe6b7e6deb
|
[] |
no_license
|
LStepanek/Conway_Game_of_Life
|
d180e397ce7ab47823c182dc675cb25fd1dee7aa
|
8d51f886ac60aa3fe9e34389bb67afc0e82675b3
|
refs/heads/master
| 2021-01-11T20:26:59.476350
| 2017-01-30T14:25:05
| 2017-01-30T14:25:05
| 79,117,638
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 28,735
|
r
|
ui.R
|
###############################################################################
###############################################################################
###############################################################################
## loaduju globální proměnné --------------------------------------------------
source("global.R")
## ----------------------------------------------------------------------------
###############################################################################
## loaduju tvary --------------------------------------------------------------
source("patterns.R")
## ----------------------------------------------------------------------------
###############################################################################
## loaduju balíčky ------------------------------------------------------------
library(shiny)
## ----------------------------------------------------------------------------
###############################################################################
## ----------------------------------------------------------------------------
shinyUI(fluidPage(
## --------------------------------------------------------------------------
#############################################################################
## zavádím busy indicator ---------------------------------------------------
tagList(
tags$head(
tags$link(rel = "stylesheet",
type = "text/css",
href = "style.css"),
tags$script(type = "text/javascript",
src = "busy.js")
)
),
div(class = "busy",
p("Aplikace je zaneprázdněná..."),
img(src = "busy_indicator.gif")
),
## zavádím graficky hezky vypadající header ---------------------------------
div(id = "header",
div(id = "title", "Conwayova hra života"),
div(id = "subsubtitle",
"Implementace",
tags$a(
href = "http://cs.wikipedia.org/wiki/Hra_života",
"známého celulárního automatu",
target = "_blank"
),
HTML("•"),
"Vytvořil",
tags$a(
href = "http://www.fbmi.cvut.cz/user/stepalu2",
"Lubomír Štěpánek",
target = "_blank"
)
)
),
## --------------------------------------------------------------------------
#############################################################################
sidebarLayout(
sidebarPanel(
#########################################################################
## první záložka --------------------------------------------------------
conditionalPanel(
condition = "input.conditionedPanels == 1",
selectInput(inputId = "my_pattern",
label = "Vyberte rozestavení buněk:",
choices = c("náhodné buňky" = "random_cells",
"blok" = "block",
"včelín" = "beehive",
"bochník" = "loaf",
"loď" = "boat",
"dvojblok" = "biblock",
"blikač" = "blinker",
"ropucha" = "toad",
"maják" = "beacon",
"pulzar" = "pulsar",
"křídlo" = "glider",
"lehká hvězdná loď" = "LWSS",
"Gosperovo křídlové dělo" = paste("Gosper",
"glider",
"gun",
sep = "_"),
"R-pentomino" = "R_pentomino",
"žalud" = "acorn",
"králíci" = "rabbits",
"benátské rolety" = "venetian_blinds",
"zápalná šňůra" = "fuse"),
selected = 1),
uiOutput(outputId = "covering_percentage_of_board"),
tags$hr(),
"Kliknutím spustíte nový celulární automat.",
tags$br(),
tags$br(),
actionButton(inputId = "start_button",
label = "Start!",
width = 150),
tags$hr(),
uiOutput("my_text_origination"),
tags$br(),
uiOutput(outputId = "my_step_button_origination")
),
## ----------------------------------------------------------------------
#########################################################################
## druhá záložka --------------------------------------------------------
conditionalPanel(
condition = "input.conditionedPanels == 2",
HTML("<b>Diagram závislosti počtu živých buněk na čase.</b>"),
tags$br(),
tags$br(),
"Diagram se týká aktuálně simulovaného celulárního automatu",
"v záložce 'Simulace'."
),
## ----------------------------------------------------------------------
#########################################################################
## třetí záložka --------------------------------------------------------
conditionalPanel(
condition = "input.conditionedPanels == 3",
HTML("<b>Stručný úvod ke Conwayově hře života</b>")
),
## ----------------------------------------------------------------------
#########################################################################
## čtvrtá záložka -------------------------------------------------------
conditionalPanel(
condition = "input.conditionedPanels == 4",
HTML("<b>Stručně o aplikaci</b>")
), width = 3
## ----------------------------------------------------------------------
#########################################################################
),
## ------------------------------------------------------------------------
###########################################################################
## ------------------------------------------------------------------------
mainPanel(
tabsetPanel(
#######################################################################
## první záložka ------------------------------------------------------
tabPanel(
title = HTML("<b>Simulace</b>"),
plotOutput("my_printable_board"),
value = 1
),
## --------------------------------------------------------------------
#######################################################################
## druhá záložka ------------------------------------------------------
tabPanel(
title = HTML("<b>Počet živých buněk v čase</b>"),
plotOutput("number_of_alive_cells_vs_time"),
value = 2
),
## --------------------------------------------------------------------
#######################################################################
## třetí záložka ------------------------------------------------------
tabPanel(
title = HTML("<b>O Conwayově hře života</b>"),
HTML("<h2>Conwayova hra života jako celulární automat</h2>"),
HTML(
"Conwayova hra života, běžně zvaná jen <i>Hra života</i> či jen",
"<i>Život</i>",
"je známý dvourozměrný celulární automat, který má svým chováním",
"připomínat vývoj kolonie konečného počtu jednoduchých",
"(jednobuněčných) živých organismů (buněk) v čase. Na počátku",
"je dána pouze iniciální konfigurace rozestavení (a počtu) buněk",
"a systém jednoduchých a neměnných pravidel, které říkají, za",
"jakých podmínek může mrtvá buňka oživnou a naopak. Konfigurace",
"je v každém časovém okamžiku prezentována maticí, kdy hodnoty",
"matice rovné 1 představují buňky, jež jsou v daném okamžiku",
"živé, a hodnoty matice rovné 0 představují naopak ty, které",
"jsou v daném okamžiku mrtvé. Vývoj konfigurace a počtu",
"žijících buněk v dalších časových okamžicích, kdy čas je chápán",
"diskrétně, je iterativně aktualizován pro každou buňku matice",
"podle daných pravidel, tím pádem je již plně determinován.",
"Dopředu je však vývoj pro velkou část vstupních konfigurací",
"nevypočitatelný, je tedy nutné jej krok po kroku simulovat."
),
tags$br(),
tags$br(),
HTML(
"Myšlenka celulárních automatů se datuje do roku 1940, kdy",
"první koncepty navrhl známý maďarský matematik John von Neumann",
"ve snaze vytvořit stroj, který by repdoukoval sám sebe.",
"Implementace automatů vša tou dobou narážela na omezené možnosti",
"výpočetní techniky, proto zájem o další vývoj opadl a byl oživen",
"až v 70. letech. Autorem samotné Hry života je britský",
"matematik John Horton Conway, emeritní profesor na Princetonské",
"univerzitě, který precisoval její pravidla v roce 1970. Díky",
"relativně snadné uchopitelnosti konceptu Hry života se získal",
"tento typ celulárního automatu oblibu i mimo vědeckou komunitu",
" -- vnikaly různé výzvy o tvorbu iniciální konfigurace buněk s",
"nějakou <i>danou</i> vlastností, kterých se účastnila i široká",
"veřejnost. Díky zájmu o <i>Hru života</i> vznikl i časopis",
"věnovaný přímo problematice diskrétního celulárního automatu.",
"Zajímavými se tehdy jevily především tyto dvě otázky, obě",
"vyslovil sám Conway:"
),
tags$br(),
tags$br(),
HTML("<ol>
<li>Existuje nějaká (vstupní) konečná konfigurace buněk,
která může neomezeně růst (velikostí, ne nutně počtem buněk),
i nad limity dané velikostí mřížky? Pro větší mřížku tedy,
konfigurace dosáhne opět hranic mřížky.</li>
<li>Existuje nějaká konečná konfigurace buněk, která se,
vyskytuje pouze v druhé generaci a poté již ne? Jde též
o problém zvaný <i>The Grandfather Problem</i>.</li>
</ol>"),
tags$br(),
HTML(
"Pro první otázku byly takové konfigurace již nalezeny,",
"autorem jedné z nich je William Gosper; jeho konfigurace",
"řešící první otázku je nazývána <i>Gosper glider gun</i>,",
"česky nejspíše <i>Gosperovo křídlové dělo</i>; to je",
"mimochodem implementováno i v naší aplikace (viz dále).",
"Na druhou otázku není známá odpověď dodnes."
),
tags$br(),
tags$br(),
HTML(
"Zajímavých otázek a výzkumných problémů je celá řada --",
"jsou například zkoumány konfigurace, které mají právě <i>k</i>",
"různých stavů, jež se periodicky střídají (s periodou <i>k</i>);",
"tedy že dva stavy konfigurace v okamžicích <i>i</i> a",
"<i>i + k</i> pro všechna <i>i</i> ∈ <i>Z</i> a dané",
"<i>k</i> ∈ <i>N</i> jsou zcela shodné."
),
tags$br(),
tags$br(),
HTML(
"V průběhu každé konkrétní hry (tedy pro danou iniciální",
"konfiguraci buněk) mohou vznikat různě komplexní sestavy buněk.",
"I přes jednoduchá pravidla je složitost vznikajících sestav",
"buněk a složitost změn mezi jendotlivými sousedními časovými",
"kroky značná; v tomto smyslu jsou někdy celulární automaty",
"považovány za diskrétní analogie spojitých komplexních",
"nelineárních systémů, které studuje nelineární dynamika",
"(z této oblasti pochází populárně známé pojmy jako",
"<i>chaos</i> či <i>butterfly-wing effect</i>)."
),
tags$br(),
tags$br(),
HTML(
"Některé iniciální či vzniklé sestavy buněk mají naopak",
"chování (tedy vývoj v čase) dobře predikovatelné, mnohdy",
"bylo spočítáno a známo dřív, než byla vůbec technicky možná",
"první solidní implementace <i>Hry života</i>. Jindy bylo",
"pozorování vypozorováno až empiricky sledováním vývoje dané",
"hry. Kategorie některých sestav buněk podle typu chování",
"(tzv. <i>tvary</i>) budou probrány dále."
),
tags$br(),
tags$br(),
"Celulární automaty obecně jsou aplikovány jako modely v",
"experimentální fyzice či biologii, díky vztahům mezi",
"jednoduchými a komplexními konfiguracemi pomocí jednoduchých",
"pravidel lze celulární automaty použít i v kompresi dat,",
"např. v některých zvukových formátech.",
HTML("<h3>Prostor a pravidla hry</h3>"),
HTML(
"Prostorem hry je dvourozměrná matice, též nazývaná mřížka.",
"V reálných simulacích včetně naší musíme obvykle vystačit s",
"konečnou mřížkou; hypoteticky lze ale uvažovat i nekonečně",
"velkou dvourozměrnou matici. Hodnotami matice jsou obecně",
"jedničky, resp. nuly představující živé, resp. mrtvé buňky.",
"Rozestavení živých (a mrtvých) buněk na mřížce se nazývá",
"konfigurace či vzor nebo tvar. Čas je vnímán diskrétně,",
"okamžik 0 odpovidá iniciálnímu (vstupnímu) stavu, pro",
"každý přechod do následujícího okamžiku je podle daných",
"pravidel (podle tzv. <i>přechodové funkce</i>) pro každou",
"buňku spočítáno, jestli bude i v následujícím okamžiku živá,",
"či mrtvá. O tom v zásadě rozhoduje to, zda je buňka v daném",
"okamžiku živá a jaký je počet živých buněk v jejím těsném",
"okolí (tj. v osmici polí matice, které sousedí s danou",
"buňkou alespoň rohem). Probíhá-li <i>Hra života</i> na",
"matici <i>m x n</i>, pak je počet všech navzájem možných",
"konfigurací, do kterých může hra teoreticky dojít, roven",
"2<sup><i>mn</i></sup>, neboť každá z <i>mn</i> buněk je buďto",
"živá, nebo mrtvá."
),
tags$br(),
tags$br(),
HTML(
"Původní pravidla přechodové funkce <i>Hry života</i>",
"definoval již v roce 1970 sám profesor Conway. Jedná se o",
"čtveřici relativně jednoduchých pravidel:"
),
tags$br(),
tags$br(),
HTML("<ol>
<li>Každá živá buňka s méně než dvěma živými sousedy zemře.</li>
<li>Každá živá buňka se dvěma nebo třemi živými sousedy zůstává
žít.</li>
<li>Každá živá buňka s více než třemi živými sousedy zemře.</li>
<li>Každá mrtvá buňka s právě třemi živými sousedy oživne.</li>
</ol>"),
tags$br(),
"Postupně vznikla celá řada variací původních pravidel; především",
"jsou diskutována taková, která zajišťují, že vývoj konfigurace v",
"čase není dopředu předvídatelný, či zajišťují dlouhodobé přežití",
"populace. Jejich uvedení je však nad rámec tohoto textu.",
tags$br(),
tags$br(),
"Aplikace pravidel je na jedné z možných konfigurací předvedena",
"na následujícím obrázku.",
tags$br(),
tags$br(),
img(src = "progress.jpg", align = "center", width = "500px"),
HTML("<figcaption>Vývoj jedné z možných konfigurací</figcaption>"),
tags$br(),
tags$br(),
HTML("<h3>Přehled tvarů</h3>"),
HTML(
"<ul>
<li><b>Zátiší (Still life)</b>. Jedná se o stabilní konfigurace,
které jsou vždy i svým vlastním rodičem, tj. ve dvou po sobě
následujících okamžicích je taková konfigurace zcela totožná.
Proto jsou někdy nazývány též jako invariantní formy. Patří
sem např. blok (block), včelín (beehive), bochník (loaf),
loď (boat) či dvojblok (bi-block)
<ul><li>
<br>
<figure>
<img src='block.jpg' align = 'center', width = '100px'/>
<figcaption>Blok (block)</figcaption>
</figure>
</li>
<br>
<li>
<figure>
<img src='beehive.jpg' align = 'center', width = '100px'/>
<figcaption>Včelín (beehive)</figcaption>
</figure>
</li>
<br>
<li>
<figure>
<img src='loaf.jpg' align = 'center', width = '100px'/>
<figcaption>Bochník (loaf)</figcaption>
</figure>
</li>
<br>
<li>
<figure>
<img src='boat.jpg' align = 'center', width = '100px'/>
<figcaption>Loď (boat)</figcaption>
</figure>
</li>
<br>
<li>
<figure>
<img src='bi_block.jpg' align = 'center', width = '100px'/>
<figcaption>Dvojblok (bi-block)</figcaption>
</figure>
</li>
</ul>
</li>
<br>
<br>
<li><b>Oscilátory (Oscillators).</b> Oscilátor je nestabilní
vzor, který je sám sobě předchůdcem, tj. vyvinul se sám
ze sebe po konečném počtu časových okamžiků. Oscilátory
pravidelně přechází mezi konstantním počtem konfigurací,
po počtu okamžiků rovným periodě oscilátoru se oscilátor
vrací do své původní konfiguraci. Oscilátory s periodou
2 jsou někdy nazývány alternátory -- mezi ně patří blikač
(blinker), ropucha (toad) či maják (beacon). Periodu 3 má
pulzar (pulsar)
<ul><li>
<br>
<figure>
<img src='blinker.jpg' align = 'center', width = '100px'/>
<figcaption>Blikač (blinker)</figcaption>
</figure>
</li>
<br>
<li>
<figure>
<img src='toad.jpg' align = 'center', width = '100px'/>
<figcaption>Ropucha (toad)</figcaption>
</figure>
</li>
<br>
<li>
<figure>
<img src='beacon.jpg' align = 'center', width = '100px'/>
<figcaption>Maják (beacon)</figcaption>
</figure>
</li>
<br>
<li>
<figure>
<img src='pulsar.jpg' align = 'center', width = '200px'/>
<figcaption>Pulsar (pulsar)</figcaption>
</figure>
</li>
</ul>
</li>
<br>
<br>
<li><b>Děla (guns).</b> Jde o stacionární vzor, který
donekonečna produkuje posunující se vzory. Příkladem je
již zmíněné </i>Gosperovo křídlové dělo</i>.</li>
<ul><li>
<br>
<figure>
<img src='Gosper_glider_gun.jpg' align = 'center',
width = '500px'/>
<figcaption>Gosperovo křídlové dělo (Gosper glider gun)
</figcaption>
</figure>
</li></ul>
</li>
<br>
<br>
<li><b>Posunující se vzory (Spaceships).</b> Jedná se o
pohybující se vzor, který se znovu objevuje po konečném
počtu časových okamžiků. Protože je zřejmě maximální možnou
rychlostí posunu vzoru rychlost 1 buňka/1 časový okamžik,
je někdy taková rychlost označovaná za rychlost světla a
míra posunu každého posunujícího se vzoru se uvádí jako
podíl rychlosti světla. Mezi posunující se vzory patří
křídlo (glider) a lehká hvězdná loď (LWSS).</li>
<ul><li>
<br>
<figure>
<img src='glider.jpg' align = 'center', width = '100px'/>
<figcaption>Křídlo (glider)</figcaption>
</figure>
</li>
<br>
<li>
<figure>
<img src='LWSS.jpg' align = 'center', width = '100px'/>
<figcaption>Lehká hvězdná loď (LWSS)</figcaption>
</figure>
</li>
</ul>
</li>
<br>
<br>
<li><b>Metuzalémové (Methuselahs).</b> Jde o jakýkoliv malý vzor,
jehož stabilizace trvá dlouhou dobu. Např. R-pentomino se
stabilizuje až po 1103 generacích, žalud (accorn) po 5206
generacích a králíkům (rabbits) přechod do stabilního stavu
trvá 17332 generací</li>
<ul><li>
<br>
<figure>
<img src='R_pentomino.jpg' align = 'center', width = '100px'/>
<figcaption>R-pentomino</figcaption>
</figure>
</li>
<br>
<li>
<figure>
<img src='acorn.jpg' align = 'center', width = '200px'/>
<figcaption>Žalud (acorn)</figcaption>
</figure>
</li>
<br>
<li>
<figure>
<img src='rabbits.jpg' align = 'center', width = '200px'/>
<figcaption>Králíci (rabbits)</figcaption>
</figure>
</li>
</ul>
</li>
<br>
<br>
<li><b>Agary (Agars).</b> Jsou vzory, které pokrývají celou
plochu či její velkou část a pediodicky se mění. Příkladem
jsou </i>benátské záclony</i> (venetian blinds)</li>
<ul><li>
<br>
<figure>
<img src='venetian_blinds.jpg' align = 'center',
width = '200px'/>
<figcaption>Benátské záclony (venetian blinds)</figcaption>
</figure>
</li>
<br>
<li>
<figure>
<img src='venetian_blinds_processed.jpg' align = 'center',
width = '200px'/>
<figcaption>Benátské záclony (v 8. časovém okamžiku)
</figcaption>
</figure>
</li>
</ul>
</li>
<br>
<br>
<li><b>Knoty (Wicks).</b> Vzory složené ze zátiší či
oscilátorů,
což ve výsledku vrací efekt uhořívající zápalné šňůry.
Příkladem je <i>zápalná šňůra</i> (fuse)</li>
<ul><li>
<br>
<figure>
<img src='fuse.jpg' align = 'center',
width = '200px'/>
<figcaption>Zápalná šňůra (fuse)</figcaption>
</figure>
</li>
</ul>
</li>
</ul>"
),
value = 3
),
## --------------------------------------------------------------------
#######################################################################
## třetí záložka ------------------------------------------------------
tabPanel(
title = HTML("<b>O aplikaci</b>"),
HTML("<h3>Poděkování</h3>"),
"Veškerý kredit jde autorům celulárních automatů a",
"autorům jazyka a prostředí R. Až v poslední řadě",
"autorovi aplikace.",
tags$hr(),
HTML("<h3>Náměty a bug reporting</h3>"),
"Svoje náměty, připomínky či upozornění na chyby můžete",
"směřovat na",
tags$br(),
tags$br(),
HTML(
"<a href='http://www.fbmi.cvut.cz/user/stepalu2'
target='_blank'>
<b>Lubomír Štěpánek, M. D.</b></a>"
),
tags$br(),
"Katedra biomedicínské informatiky",
tags$br(),
"Fakulta biomedicínského inženýrství",
tags$br(),
"České vysoké učení technické v Praze",
tags$br(),
HTML("<a href='mailto:lubomir.stepanek@fbmi.cvut.cz'>
lubomir.stepanek[AT]fbmi[DOT]cvut[DOT]cz</a>"),
tags$br(),
value = 4
),
## --------------------------------------------------------------------
#######################################################################
## --------------------------------------------------------------------
id = "conditionedPanels"
## --------------------------------------------------------------------
), width = 9
)
)
))
## ----------------------------------------------------------------------------
###############################################################################
###############################################################################
###############################################################################
|
5fcc5521796bfe6b9d00723ea2cf18605463c0ad
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/Biograph/R/locpath.R
|
9fff7e1d5756c77e25481973f18790c156bad20a
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 99
|
r
|
locpath.R
|
locpath <-
function(Bdata)
{loc <- which (colnames(Bdata)=="path",arr.ind=TRUE)
return (loc)}
|
975f66c1c91dd620d85a4bf8936880a1de1d7f89
|
9dbdfee7a2737ae5144650a0675cec521e8d41cd
|
/RawtoTPM.R
|
11346a9fccca182982d08f4f21409ec1fe3a678c
|
[] |
no_license
|
dmworstell/GTEx_HML2
|
51bb3e89de92651124d8f75efe7576c7de21532a
|
859d16fd0175f724a022dfafad2b554992bad2cc
|
refs/heads/main
| 2023-04-16T15:32:30.529978
| 2021-05-05T22:37:00
| 2021-05-05T22:37:00
| 364,721,318
| 0
| 0
| null | 2021-05-05T22:27:46
| 2021-05-05T22:27:46
| null |
UTF-8
|
R
| false
| false
| 3,230
|
r
|
RawtoTPM.R
|
library(data.table)
library(Biobase)
library(edgeR)
library(org.Hs.eg.db)
library(DESeq2)
library(GenomicAlignments)
library(GenomicRanges)
library(Rsamtools)
library(refGenome)
library(plyr)
setwd("/Users/far122/Desktop/GTEx_HML2_Expression")
Tissue = "Adipose_Subcutaneous"
#load in raw counts data and rename rows to reflect provirus name
counts=read.csv(paste(Tissue, "Telescope_output.csv", sep="_"), header=TRUE, sep=",")
head(counts[,1:5])
tail(counts[,1:5])
#counts_mod = counts[,-1] #FENRIR only
#rownames(counts_mod) = counts[,1] #FENRIR only
#head(counts_mod[,1:5]) #FENRIR only
#convert to TPM and filter low expressed genes (TPM < 1 in at least half of the samples)
#read in hg38.gtf for transcript coordinates
ens = ensemblGenome()
read.gtf(ens, "hg38.gtf")
class(ens)
Identifier = "exon"
hg38_Annotations = extractFeature(ens, Identifier)
hg38_Annotations
hg38_Annotation_df = data.frame(start=getGtf(hg38_Annotations)$start,end=getGtf(hg38_Annotations)$end,gene_id=getGtf(hg38_Annotations)$gene_id, transcript_id=getGtf(hg38_Annotations)$transcript_id)
hg38_Annotation_df[1:5,]
#for each row, get the difference between start and end
hg38_Annotation_df$difference = hg38_Annotation_df$end-hg38_Annotation_df$start
hg38_Annotation_df[1:5,]
#for each gene_id, sum the difference to get transcript length. This also converts length to kb.
#hg38_Annotation_df_lengthSum = ddply(hg38_Annotation_df, .(transcript_id), summarise, difference=(sum(difference)/1000)) #FENRIR
hg38_Annotation_df_lengthSum = ddply(hg38_Annotation_df, .(gene_id), summarise, difference=(sum(difference)/1000)) #with Telescope
hg38_Annotation_df_lengthSum[1:5,]
# Divide the read counts by the length of each gene (transcript?) in kilobases. This gives you reads per kilobase (RPK).
#stuff = merge(counts, hg38_Annotation_df_lengthSum, by.x = "X", by.y = "transcript_id") #FENRIR
stuff = merge(counts, hg38_Annotation_df_lengthSum, by.x = "X", by.y = "gene_id") #with Telescoppe
head(stuff)
tail(stuff)
stuff_mod = stuff[,-1]
rownames(stuff_mod) = stuff[,1]
head(stuff_mod)
tail(stuff_mod)
#featured_genes = subset(hg38_Annotation_df_lengthSum, gene_id %in% rownames(counts_mod))
#head(featured_genes)
RPK = stuff_mod/(stuff_mod$difference)
head(RPK)
# Count up all the RPK values in a sample and divide this number by 1,000,000. This is your “per million” scaling factor.
Per_Million=(colSums(RPK))/1e6
Per_Million
# Divide the RPK values by the “per million” scaling factor. This gives you TPM.
#Counts_HML2_TPM = RPK/Per_Million
Counts_HML2_TPM <- t(t(RPK)/Per_Million)
Counts_HML2_TPM
#filter data by TPM < 1 for half the samples
Counts_HML2_TPM_Filtered = Counts_HML2_TPM[, -which(rowMeans(Counts_HML2_TPM < 1) > 0.5)]
keepgenes <- rowSums(Counts_HML2_TPM > 0.5) > ncol(Counts_HML2_TPM)/4
Counts_HML2_TPM_Filtered = Counts_HML2_TPM[keepgenes, ]
dim(Counts_HML2_TPM_Filtered)
Counts_HML2_TPM_Filtered
write.csv(Counts_HML2_TPM_Filtered, file=paste(Tissue, "TPM_HML2.csv", sep="_"))
#log2(counts+1) transform
Counts_HML2_TPM_Filtered_Log2Trans = log2(Counts_HML2_TPM_Filtered + 1)
head(Counts_HML2_TPM_Filtered_Log2Trans)
write.csv(Counts_HML2_TPM_Filtered_Log2Trans, file=paste(Tissue, "TPM_Log2_HML2.csv", sep="_"))
|
08ae6fb9253699da6c11850b3f8bae00ddbe0018
|
b5b9e40aa3f9fa46e62c0235e592766995ac8376
|
/R/band.R
|
20745522e1402172506dfe41fc051c4650fe3624
|
[] |
no_license
|
dongyi1996/exploreRGEE
|
50adbb355d03037b51f1e2ded5b391e39542ac78
|
1ce4a689ddb1d30497995d851b1ba93c971dc0c5
|
refs/heads/main
| 2023-04-18T21:48:13.284439
| 2021-05-11T20:42:27
| 2021-05-11T20:42:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,203
|
r
|
band.R
|
#' Extract Regions by Date
#' @description This function allows the user to pass a previously created get_*() object to get
#' a time series from a collection, e.g. banding/toBands. This uses the \link[rgee]{ee_extract} function from rgee.
#' @param data A previously created get_* object
#' @param geeFC A known GEE FeatureCollection or asset, e.g. "USGS/WBD/2017/HUC12"
#' @param scale \code{numeric} value indicating what to reduce the regions by, e.g. 800 (m) default.
#' @param band A \code{character} indicating what bands/type to use when you have more than one.
#' @param temporal A \code{character} indicating what temporal filter to use on the collection, e.g. 'yearly' (default), 'monthly', 'year_month', 'all'.
#' @param stat A \code{character} indicating what to reduce the imageCollection when using temporal filtering, e.g. 'median' (default), 'mean', 'max', 'min', 'sum', 'stdDev', 'first'.
#' @param lazy \code{logical} whether to run a 'sequential' future in the background or not.
#' @param fun A earth engine reducer, e.g. ee$Reducer$median() (default).
#' @param variable \code{character} indicating what to facet ggplot by. Need to know ahead of time.
#' @param ggplot \code{logical} TRUE/FALSE. Whether to print side-effect ggplot. See details.
#' @param save.plot \code{logical} TRUE/FALSE. Whether to save the plot in a list, e.g. data + ggplot.
#' @note Goal is to add more capabilities to this function in the future by using map() server-side, e.g. monthly and annual filters. Faster with points or centroids of polygons.
#' If lazy is TRUE, the function will be run in the background.
#' @return A \code{data.frame} and a side-effect plot (if ggplot = TRUE); unless using \code{save.plot} then a list with \code{data.frame} and ggplot.
#' @importFrom rlang .data
#' @export
#'
#' @examples \dontrun{
#'
#' # Load Libraries
#'
#' library(rgee)
#' rgee::ee_intialize()
#' library(exploreRGEE)
#'
#' # Bring in data
#' huc <- exploreRGEE::huc
#'
#' ld8 <- get_landsat(huc, method = 'ld8', startDate = '2014-01-01',
#' endDate = '2018-12-31', c.low = 6, c.high = 11)
#'
#' # without plotting save to object
#' ld8_ts <- ld8 %>% band(scale = 30, band = 'NDVI')
#'
#' # with plotting as side-effect
#' ld8 %>% band(scale = 30, band = 'NDVI', ggplot = TRUE, variable = 'name')
#'
#' # save both plot and data
#' ld8_ts <- ld8 %>% band(scale = 30, band = 'NDVI',
#' ggplot = TRUE, variable = 'name',
#' save.plot = TRUE)
#'
#' }
band <- function(data, geeFC = NULL, scale, band = NULL,
temporal = 'yearly', stat = 'median', lazy = FALSE,
fun = ee$Reducer$median(), variable = NULL,
ggplot = FALSE, save.plot = F) {
if(missing(data)){stop("Need a get_* object to use this function")}
if(class(data) == 'diff_list' | class(data) == 'terrain_list' | class(data) == 'ee.image.Image'){stop("Can't band with this type of list")}
if(!temporal %in% c('yearly', 'monthly', 'year_month', 'all')){stop("Need correct temporal argument")}
# dissecting the passed get_*() object
aoi <- data$aoi
imageCol <- data$imageCol
startDate <- data$startDate
endDate <- data$endDate
imageCol <- data$imageCol
image <- data$data
geom <- data$geom
method <- data$method
param <- data$param
c.low <- data$c.low
c.high <- data$c.high
if(is.null(param) & is.null(band))stop({"Need to choose a band name."})
if(is.null(param)){
imageCol = imageCol$select(band)
param <- band
}
if(temporal == 'yearly'){
imageCol <- year_filter(startDate = startDate, endDate = endDate,imageCol = imageCol, stat = stat)
} else if (temporal == 'monthly'){
imageCol <- month_filter(c.low = c.low, c.high = c.high,imageCol = imageCol, stat = stat)
} else if (temporal == 'year_month') {
imageCol <- year_month_filter(startDate = startDate, endDate = endDate,c.low = c.low, c.high = c.high,imageCol = imageCol, stat = stat)
} else if (temporal == 'all'){
}
if(is.null(geeFC)) {
reg <- sf_setup(aoi)
} else {
if (isTRUE(lazy)){
reg <- geeFC_setup_aoi(aoi, geeFC)
} else {
reg <- geeFC_setup(aoi, geeFC)
}}
if(isTRUE(lazy)){
prev_plan <- future::plan(future::sequential, .skip = TRUE)
on.exit(future::plan(prev_plan, .skip = TRUE), add = TRUE)
future::future({
fut_band_func(imageCol = imageCol, data = data, reg = reg, fun = fun, scale = scale, param = param, method = method, tmp_type = temporal)
}, lazy = TRUE)
} else {
band_func(imageCol = imageCol, reg = reg, fun = fun, scale = scale, param = param,
method = method, data = data, startDate = startDate, endDate = endDate, stat = stat,
save.plot = save.plot, ggplot = ggplot, variable = variable, c.low = c.low, c.high = c.high, tmp_type = temporal)
}
}
# function for getting the banding
fut_band_func <- function(imageCol, data, reg, fun, scale, param, method, tmp_type){
n_lists <- nrow(reg$aoi)/10
reggy <- reg$aoi %>%
dplyr::group_by((dplyr::row_number()-1) %/% (dplyr::n()/n_lists))%>%
tidyr::nest() %>% dplyr::pull(data)
final_proc <- data.frame()
for(i in 1:length(reggy)){
aoi <- reggy[[i]]
tB <- imageCol$toBands()
data_tb <- rgee::ee_extract(tB, aoi, fun = fun, scale = scale)
param_name <- paste0("_", param)
proc <- data_tb %>% tidyr::pivot_longer(dplyr::contains(param_name), names_to = "Date", values_to = param)
proc <- getting_proc(data = data, proc = proc, param_name = param_name, method = method, tmp_type = tmp_type)
final_proc <- plyr::rbind.fill(proc, final_proc)
Sys.sleep(1/100)
}
final_proc
}
band_func <- function(imageCol, reg, fun, scale, param, method, data,
startDate, endDate, stat, save.plot, ggplot, variable, c.low, c.high, tmp_type){
tB <- imageCol$toBands()
data_tb <- rgee::ee_extract(x = tB, y = reg$reg, fun = fun, scale = scale)
param_name <- paste0("_",param)
proc <- data_tb %>% tidyr::pivot_longer(dplyr::contains(param_name), names_to = "Date", values_to = param)
proc <- getting_proc(data = data, proc = proc, param_name = param_name, method = method, tmp_type = tmp_type)
if(ggplot == TRUE){
proc_ggplot <- plot_proc(proc = proc, param_v = param, facet_col_var = variable)
print(proc_ggplot +
ggplot2::labs(title = paste0(method, " ", param, ' ', stat, " values for date range: "),
subtitle = paste0("Years: ",stringr::str_remove(startDate,"(-).*"), " - ", stringr::str_remove(endDate,"(-).*"), "; Months: ", c.low, " - ", c.high),
y = paste0(param, ' values'), color = "ID"))
}
if(save.plot == TRUE){
return(list(proc = proc, proc_ggplot = proc_ggplot))
} else {return(proc)}
}
# processing function for bands by 'class'
getting_proc <- function(data, proc, param_name, method, tmp_type){
if(tmp_type == 'all'){
if(class(data) == 'met_list'){
if(method == "AN81m" | method == "TERRACLIMATE"){
proc <- proc %>% dplyr::mutate(Date = stringr::str_remove_all(.data$Date, "X"),
Date = stringr::str_remove_all(.data$Date, param_name),
Date = stringr::str_replace(.data$Date,"(\\d{4})", "\\1-"),
Date = paste0(.data$Date, "-01"),
Date = lubridate::as_date(.data$Date))
} else if (method == "AN81d" | method == 'GRIDMET' | method == 'DAYMET') {
proc <- proc %>% dplyr::mutate(Date = stringr::str_remove_all(.data$Date, "X"),
Date = stringr::str_remove_all(.data$Date,param_name),
Date = stringr::str_replace(.data$Date,"(\\d{4})", "\\1-"),
Date = lubridate::as_date(.data$Date))
} else if (method == 'TRMMh'){
proc <- proc %>% dplyr::mutate(Date = stringr::str_remove_all(.data$Date, 'X'),
Date = stringr::str_remove_all(.data$Date, param_name),
Date = stringr::str_sub(.data$Date, start = 6),
Date = stringr::str_sub(.data$Date, end = -3),
Date = stringr::str_replace_all(.data$Date, "_", " "),
Date = stringr::str_replace(.data$Date, "(\\d{6})", "\\1-"),
Date = stringr::str_replace(.data$Date, "(\\d{4})", "\\1-"),
Date = paste0(.data$Date, "00"),
Date = lubridate::parse_date_time2(.data$Date, orders = '%Y/%m/%d %H:%M'))
} else if (method == 'TRMMm'){
proc <- proc %>% dplyr::mutate(Date = stringr::str_remove_all(.data$Date, 'X'),
Date = stringr::str_remove_all(.data$Date, param_name),
Date = stringr::str_sub(.data$Date, start = 6),
Date = stringr::str_sub(.data$Date, end = -3),
Date = lubridate::as_date(.data$Date)
)
}
} else if (class(data) == 'landsat_list'){
proc <- proc %>% dplyr::mutate(Date = stringr::str_remove(.data$Date, param_name),
Date = stringr::str_sub(.data$Date, start = -8),
Date = lubridate::as_date(.data$Date))
} else if (class(data) == 'sent2_list'){
proc <- proc %>% dplyr::mutate(Date = stringr::str_sub(.data$Date, end = 9),
Date = stringr::str_remove(.data$Date, "X"),
Date = lubridate::as_date(.data$Date))
} else if (class(data) == 'npp_list'){
proc <- proc %>% dplyr::mutate(Date = stringr::str_remove(.data$Date, "_annualNPP"),
Date = stringr::str_remove(.data$Date, "X"),
Date = as.numeric(.data$Date))
} else if (class(data) == 'any_list'){
proc <- proc %>% dplyr::mutate(raw_date = .data$Date,
Date = dplyr::row_number())
}
} else if (tmp_type == 'year_month'){
proc <- proc %>% dplyr::mutate(Date = stringr::str_replace(.data$Date, "[^_]*_(.*)", "\\1"),
Date = stringr::str_replace(.data$Date, '_', '-'),
Date = lubridate::as_date(.data$Date))
} else if (tmp_type == 'monthly' | tmp_type == 'yearly'){
proc <- proc %>% dplyr::mutate(Date = stringr::str_remove(.data$Date, param_name),
Date = stringr::str_remove(.data$Date, "X"),
Date = as.numeric(.data$Date))
}
return(proc)
}
# ggplot plot for proc data
plot_proc <- function(proc, param_v, facet_col_var){
mapping <- ggplot2::aes(.data$Date, .data[[param_v]], colour = .data[[facet_col_var]])
if(is.null(facet_col_var)){
mapping$colour <- NULL
}
if (is.null(facet_col_var)){
facet <- NULL
} else {
facet <- ggplot2::facet_wrap(dplyr::vars(.data[[facet_col_var]]))
}
proc %>% ggplot2::ggplot(mapping) +
ggplot2::geom_line() +
ggplot2::geom_smooth(alpha = 0.3) +
ggplot2::theme_bw() +
facet
}
|
33427919ebbeebefaa012aa6751f8fca8e6f6a40
|
9d0faaf1f45e3c4f3fdea6c2d44d662ff0214e9c
|
/man/ral.Rd
|
a370461fc6c7d18b33da17f7eef14ce23c966614
|
[] |
no_license
|
edwindj/ral
|
0026c98e1f4b87d1854a887e720e5e0cf04099ac
|
949de5e9a57a95832277e2a629e674f736017c4c
|
refs/heads/master
| 2021-01-23T08:10:34.438921
| 2017-03-31T07:22:20
| 2017-03-31T07:22:20
| 80,532,011
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 568
|
rd
|
ral.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{ral}
\alias{ral}
\title{RAL colors with code.}
\format{A data frame with 213 rows and 5 variables:
\describe{
\item{RAL}{RAL number, integer}
\item{color}{html color code}
\item{name}{english name}
\item{german_name}{original german name}
\item{description}{Optional description of color usage}
}}
\source{
\url{https://en.wikipedia.org/wiki/List_of_RAL_colors}
}
\usage{
ral
}
\description{
A dataset containing the RAL colors, color codes
}
\keyword{datasets}
|
4b9a50eeb095eb28f8d60ed9fe77b28788612e6f
|
dbcdfd44aefa58ff112e222ed05a6cc81a192532
|
/code/VISUALIZE/FIGUREA10.R
|
cb8b5aac77847cbd4a15a55720cfc83cc2dc4361
|
[] |
no_license
|
MGanslmeier/covid19vaccinationProject
|
b6b14d8a54024dd741336fe10eb25396382ab44f
|
d91614aa76ef1e8d7d4bb3681963f0259af0058c
|
refs/heads/main
| 2023-04-29T06:30:46.369658
| 2021-05-19T15:54:07
| 2021-05-19T15:54:07
| 368,921,168
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,929
|
r
|
FIGUREA10.R
|
setwd("XXX")
pacman::p_load(plyr, dplyr, readr, tidyr, stringr, openxlsx, ggplot2, gdata, forcats,
readxl, purrr, tvthemes, ggthemes, usmap, scales, ggthemes, gtools)
rm(list = ls())
options(scipen = 999)
#########################
# FILEPATH
files <- list.files('res', full.names = T, recursive = T) %>%
subset(., grepl('txt', .) & grepl('extended\\/FIGUREA10', .))
df <- data.frame(stringsAsFactors = F)
for(i in 1:length(files)){
# LOAD
temp <- read.delim(files[i], header = FALSE)
index <- temp %>% subset(., grepl('VARIABLES|Observations', V1)) %>% row.names() %>% as.numeric() %>% sort(.)
index_vacc <- temp %>% subset(., grepl('L\\.peopleVacc100', V1)) %>% row.names() %>% as.numeric() %>% sort(.)
# EXTRACT
DEP <- temp %>% subset(., V1 %in% c('VARIABLES')) %>% t() %>% as.data.frame() %>% set_names('obs') %>% .[-1,] %>% gsub(',', '', .)
LAG <- temp %>% subset(., V1 %in% c('LAG')) %>% t() %>% as.data.frame() %>% set_names('obs') %>% .[-1,] %>% gsub(',', '', .)
FSTAT <- temp %>% subset(., V1 %in% c('FSTAT')) %>% t() %>% as.data.frame() %>% set_names('obs') %>% .[-1,] %>% gsub(',', '', .)
GLOBALTERM <- temp %>% subset(., V1 %in% c('GLOBALTERM')) %>% t() %>% as.data.frame() %>% set_names('obs') %>% .[-1,] %>% gsub(',', '', .)
LOCALTERM1 <- temp %>% subset(., V1 %in% c('LOCALTERM1')) %>% t() %>% as.data.frame() %>% set_names('obs') %>% .[-1,] %>% gsub(',', '', .)
df <- temp[(index_vacc):(index[2]-2), ] %>%
subset(., V1 != '') %>% t(.) %>% as.data.frame() %>%
set_names(gsub(' ', '', .[1,])) %>% .[-1,] %>%
mutate(DEP = DEP, LAG = LAG, FSTAT = FSTAT, GLOBALTERM = GLOBALTERM,
LOCALTERM1 = LOCALTERM1) %>%
gather(., INDEP, coef, -c(DEP, LAG, GLOBALTERM, LOCALTERM1, FSTAT)) %>%
mutate(INDEP = gsub(".*\\.", '', INDEP)) %>%
mutate(LAG = as.numeric(LAG), FSTAT = as.numeric(FSTAT)) %>%
subset(., coef != '') %>% subset(., !INDEP %in% c('Constant')) %>%
mutate(NSTAR = str_count(coef, "\\*") %>% as.character()) %>%
mutate(SIGN = as.numeric(grepl('\\*', coef)) %>% as.character()) %>%
mutate(SIGN = case_when(NSTAR == '3' ~ '01% SI', NSTAR == '2' ~ '05% SI', NSTAR == '1' ~ '10% SI', NSTAR == '0' ~ 'None')) %>%
mutate(SIGN = case_when(NSTAR %in% c('2', '3') ~ 'significant (p-value <5%)', TRUE ~ 'not significant (p-value >5%)')) %>%
mutate(coefnum = gsub('\\*|\\,', '', coef) %>% as.numeric()) %>%
mutate(lab = paste0(round(FSTAT, 0) %>% as.character(.))) %>%
mutate(INST = paste(GLOBALTERM, LOCALTERM1, sep = ' - ')) %>%
bind_rows(., df)
}
df <- df %>%
mutate(DEP = case_when(DEP == 'D.aod550_pop' ~ 'AOD (change)',
DEP == 'D.case_pop' ~ ' new Covid19 cases',
DEP == 'D.mobility' ~ 'Mobility (change)',
DEP == 'D.ntl_pop' ~ 'NTL (change)'))
#############
# PLOT
gterm <- unique(df$GLOBALTERM)
n <- length(unique(df$LOCALTERM1))
inst <- combinations(n = n, r = 1, v = unique(df$LOCALTERM1)) %>%
as.data.frame() %>% mutate(IV = paste(V1, sep = ' - ')) %>% pull(IV) %>%
expand.grid(lterm = ., gterm = gterm, stringsAsFactors = F) %>%
mutate(IV = paste(gterm, lterm, sep = ' - ')) %>% pull(IV)
temp <- df %>% subset(., INST == inst[i]) %>% unique()
ggplot() +
geom_label(data = temp, aes(x = LAG, y = coefnum, label = lab), vjust = 1.5, size = 1.5, label.size = 0, alpha = 0) +
geom_point(data = temp, aes(x = LAG, y = coefnum, color = SIGN), size = 1.5) +
geom_hline(yintercept = 0, color = 'red', size = 0.5) +
scale_color_manual(values = c('significant (p-value <5%)' = 'darkblue', 'not significant (p-value >5%)' = 'brown3')) +
facet_wrap(~DEP, scales = 'free', nrow = 2) +
theme_classic() + labs(x = 'lag', y = 'coefficient') +
scale_x_continuous(breaks = seq(0, 30, 5)) +
theme(legend.position = 'bottom',
legend.direction = 'horizontal',
legend.title = element_blank())
|
129ede3722199985ca94983c441abd1b29c41d32
|
63bd69b8e6c1b1c86adaa22aa1495783b8e33d72
|
/course_project/ui.R
|
2c9b8d1c4fc280800997d715407fb5beff58cd08
|
[] |
no_license
|
jjsmartin/data_products_coursera
|
dfe880e22ec7e608a4c09fbeca682375db43632a
|
7bf7020ea95ddfb098d64c70f8c77e0c0543f5bc
|
refs/heads/master
| 2021-01-23T06:44:13.430820
| 2014-07-27T18:50:52
| 2014-07-27T18:50:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,396
|
r
|
ui.R
|
## UI for flu time series display
library(shiny)
shinyUI(fluidPage(
fluidRow(
titlePanel("Flu Cases in USA, 2009 to 2014"),
helpText("Use the checkboxes to select which time series you want to see plotted." ),
helpText("Use the slider to select the order of smoothing - i.e. the number of weeks to average over"),
plotOutput("plot1"),
hr(),
# slider setting the order of smoothing (i.e. the number of weeks to average over )
column(3,
wellPanel(
sliderInput("slider1",
label = h5("Number of weeks to average over:"),
min = 1,
max = 20,
value = 1) ) ),
# checkboxes for selection of the diferent time series
column( 3, offset= 1,
wellPanel(
checkboxGroupInput( "checkGroup",
label = h3("Age ranges"),
choices = list( "All ages" = 1,
"0 to 4" = 2,
"5 to 24" = 3,
"25 to 49" = 4,
"50 to 64" = 5,
"65+" = 6),
selected = 1) ) )
))
)
|
6bc809c3788acb1492288435cff3fa3c3a9305eb
|
cf5998744c0c76ef67647473da3c1b79d07fbff7
|
/R/analyze.coalescent.R
|
36dab8db5a87935ebb8d02ecf4bf2dbc821987df
|
[] |
no_license
|
fmichonneau/starbeastPPS
|
00a3797e72ce882475669ce556628d08ec61db5f
|
3c07296a35d327ce840bc25d81a7a183259d9c19
|
refs/heads/master
| 2020-12-24T15:23:36.039533
| 2014-10-31T21:07:18
| 2014-10-31T21:07:18
| 18,690,628
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,685
|
r
|
analyze.coalescent.R
|
analyze.coalescent <- function(data, msdir = "/directory/containing/ms") {
ntrees <- length(data$genes)
nsamples <- length(data$species.trees)
### data structures
empirical.gene.coal <- matrix(nrow = nsamples, ncol = ntrees)
empirical.gene.probs <- matrix(nrow = nsamples, ncol = ntrees)
colnames(empirical.gene.probs) <- data$genes
colnames(empirical.gene.coal) <- data$genes
## empirical.sum.probs<-c()
## simulated.genes<-list()###should I actually bother to save the gene trees?
simulated.gene.coal <- matrix(nrow = nsamples, ncol = ntrees)
simulated.gene.probs <- matrix(nrow = nsamples, ncol = ntrees)
colnames(simulated.gene.probs) <- data$genes
colnames(simulated.gene.coal) <- data$genes
## simulated.sum.probs<-c()
#### calculate empirical gene tree stats
for (j in 1:ntrees) {
for (i in 1:nsamples) {
empirical.gene.probs[i, j] <- gene.tree.prob(data$species.trees[[i]],
data$gene.trees[[j]][[i]], data$associations[["empirical"]][[j]],
ploidy = data$ploidy[j])
empirical.gene.coal[i, j] <- deep.coal(data$species.trees[[i]], data$gene.trees[[j]][[i]],
data$associations[["empirical"]][[j]])
}
cat("empirical tree ", j, " is done!\n")
}
#### simulate new gene trees and calculate their stats
for (j in 1:ntrees) {
ns <- length(data$gene.trees[[j]][[1]]$tip.label)
for (i in 1:nsamples) {
simtree <- mstree(data$species.trees[[i]], msdir, nseq = ns, nreps = 1,
samplescheme = data$associations[["simulate"]][[j]], ploidy = data$ploidy[j])
simulated.gene.probs[i, j] <- gene.tree.prob(data$species.trees[[i]],
simtree, data$associations[["calculate"]][[j]], ploidy = data$ploidy[j])
simulated.gene.coal[i, j] <- deep.coal(data$species.trees[[i]], simtree,
data$associations[["calculate"]][[j]])
}
cat("simulated tree ", j, " is done!\n")
}
result.all <- list()
result.prob <- list()
result.coal <- list()
result.prob[["empirical"]] <- empirical.gene.probs
result.prob[["simulated"]] <- simulated.gene.probs
result.prob[["test.stat"]] <- simulated.gene.probs - empirical.gene.probs
result.coal[["empirical"]] <- empirical.gene.coal
result.coal[["simulated"]] <- simulated.gene.coal
result.coal[["test.stat"]] <- simulated.gene.coal - empirical.gene.coal
result.all[["probs"]] <- result.prob
result.all[["coal"]] <- result.coal
class(result.all) <- "coalescentteststats"
return(result.all)
}
|
63ae7e67a41f54d219f46aea0ec6f7a9bf284826
|
00fa720b1d2ca2cfada7f849b3575a7a9c7b315b
|
/sim-info.R
|
d3b299f2fbd98f1facf9e17bb1dd25a5fa81f852
|
[] |
no_license
|
petrelharp/spatial_selection
|
84230958bcf5f669dde2a79b8e48fa45b0d494df
|
2f6fac14e71af729ed8de153685d604cc2e5dd32
|
refs/heads/master
| 2021-01-10T05:33:24.169012
| 2015-10-19T19:58:51
| 2015-10-19T19:58:51
| 44,549,223
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 963
|
r
|
sim-info.R
|
#!/usr/bin/Rscript
simfiles <- list.files(pattern="*-pophistory-run.Rdata")
names(simfiles) <- gsub( "([^-]*)-.*","\\1", simfiles)
filetimes <- file.info(simfiles)$mtime
simfiles <- simfiles[ order(filetimes) ]
filetimes[ order(filetimes) ]
simparams <- lapply( simfiles, function (x) {
load(x)
c( pophist$pop$params, list( has.occupation=( !is.null(pophist$occupation) | max(pophist$occupation)==0 ) ) )
} )
simparam.df <- data.frame( t( sapply( lapply( simparams, "[", c("mu","r","m","N","range","patchsize","sb","sm","sigma",'has.occupation') ), unlist ) ) )
simparam.df$nsteps <- sapply( sapply( simparams, "[", "nsteps" ), function (x) { if (is.null(x)) { NA } else { x[[1]] } } )
simparam.df$stepsize <- sapply( sapply( simparams, "[", "stepsize" ), function (x) { if (is.null(x)) { NA } else { x[[1]] } } )
simparam.df$ngens <- with(simparam.df, nsteps*stepsize)
write.table(simparam.df, file='siminfo.tsv', sep='\t', quote=FALSE)
|
ca858429f63f16d31021cbcc6636327c589032d2
|
c1d864235d46a20ecd975bf4a6a29586184c606c
|
/visualizacion/shiny/data.R
|
8bfe7a2c5773e7831bf73ac001980e9c29c5242e
|
[] |
no_license
|
ludwigrubio/datascience_academic
|
fb2fdb659016cc31ee968735e4a4495040b4dd05
|
8291ea1c925b8ecfb12985a653abf1e0f8ce0107
|
refs/heads/master
| 2020-05-29T14:13:05.864835
| 2019-09-05T19:43:47
| 2019-09-05T19:43:47
| 189,186,661
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,175
|
r
|
data.R
|
#Carga de variables----
library(dplyr)
library(tidyr)
load(file="points_map")
load(file="first_chart")
load(file="second_chart")
load(file="third_chart")
load(file="fourth_chart")
load(file="second_map")
#Preprocesado gráfico 3
total_pg_pi_idh_g3 <- total_pg_pi_idh
total_pg_pi_idh_g3$pni <- total_pg_pi_idh_g3$pni - total_pg_pi_idh_g3$pi
total_pg_pi_idh_g3 <- total_pg_pi_idh_g3 %>% gather(type_pob, pob,-idh, -year) %>% arrange(desc(type_pob))
total_pg_pi_idh_g3$type_pob <- factor(total_pg_pi_idh_g3$type_pob, label=c("Indígena","No Indígena"))
#Preporcesado gráfico 4
pob_edo_gm$pg <- pob_edo_gm$pg - pob_edo_gm$pi
pob_edo_type <- pob_edo_gm %>% group_by(year) %>%
mutate(rank = rank(desc(pi))) %>%
arrange(-pi)
pob_edo_type_gm <- pob_edo_type
pob_edo_type<- pob_edo_type %>% arrange(year, rank)
pob_edo_type <- gather(pob_edo_type, type_pob, pob, -ent, -cve_ent, -gm, -year, -rank)
pob_edo_type$type_pob <- factor(pob_edo_type$type_pob, labels = c("No indígena","Indígena"))
pob_edo_type <- pob_edo_type%>% ungroup()
#Preporcesado gráfico 5
pob_edo_type_gm_indi <- pob_edo_type_gm[,c("year","ent","gm","pi","rank")]
gmColors <- c("#A71E4A","#EE3425","#F15523","#F99B20","#F3EC3A")
names(gmColors) <- levels(pob_edo_type_gm_indi$gm)
#Estado
list_edos <- unique(pob_edo_gm[,"ent"])
radom_ten_edos <- sample(list_edos, 10)
#Lenguajes
list_languaje <- as.character(unique(pob_ci$comunity))
list_languaje <- sort(list_languaje)
random_ten_languaje <- sample(list_languaje, 10)
#Valores random inciales
radom_ten_edos <- sample(list_edos, 10)
radom_ten_languaje <- sample(list_languaje, 10)
#Indicadores
list_indicators <- c( "% de hablantes sin acceso a servicios de salud" = "SINDERHAB",
"% hogares con acceso a agua potable" = "VCONELE",
"% hogares con acceso a electricidad" = "VCONAGUA",
"% hogares con acceso a internet" = "VCONINTER")
#Paletas de colores
indi <- c("SINDERHAB", "VCONELE", "VCONAGUA", "VCONINTER")
lowc <- c("#1B5E20","#81D4FA","#FFCC80","#B39DDB")
hightc <- c("#A5D6A7","#01579B","#E65100","#311B92")
indicators_colors <- data.frame(indi, lowc, hightc, stringsAsFactors = F)
#Cargando GeoJson de estados
geojson <- readLines("estados-de-mexico.geojson", warn = FALSE) %>%
paste(collapse = "\n") %>%
fromJSON(simplifyVector = FALSE)
#Funciones auxiliares ----
f2si<-function (number, rounding=F, digits=ifelse(rounding, NA, 6))
{
lut <- c(1e-24, 1e-21, 1e-18, 1e-15, 1e-12, 1e-09, 1e-06,
0.001, 1, 1000, 1e+06, 1e+09, 1e+12, 1e+15, 1e+18, 1e+21,
1e+24, 1e+27)
pre <- c("y", "z", "a", "f", "p", "n", "u", "m", "", "k",
"M", "G", "T", "P", "E", "Z", "Y", NA)
ix <- findInterval(number, lut)
if (ix>0 && ix<length(lut) && lut[ix]!=1) {
if (rounding==T && !is.numeric(digits)) {
sistring <- paste(round(number/lut[ix]), pre[ix])
}
else if (rounding == T || is.numeric(digits)) {
sistring <- paste(signif(number/lut[ix], digits), pre[ix])
}
else {
sistring <- paste(number/lut[ix], pre[ix])
}
}
else {
sistring <- as.character(number)
}
return(sistring)
}
|
ac2c14225fe9c4b2b80a193730014118408e1086
|
74051a004138cc577fe910e00c133e1d0e2d25c0
|
/Plotting/rna.R
|
815dd8d8171da4468a2cee32f2e8d45cfd4b5150
|
[] |
no_license
|
patrickCNMartin/ChIPanalyserSub
|
0e8e7845937420efcffbf24d39002425fbc9e959
|
4c3afeecf0f12bd465312c921df7cfcee2801f71
|
refs/heads/main
| 2023-01-03T07:12:32.097939
| 2020-10-28T12:45:54
| 2020-10-28T12:45:54
| 301,397,014
| 0
| 0
| null | 2020-10-05T13:40:36
| 2020-10-05T12:13:19
| null |
UTF-8
|
R
| false
| false
| 5,982
|
r
|
rna.R
|
tf<-c("CTCF","BEAF-32","su(Hw)")
cell<-c("Kc167","BG3","BG3","S2","Kc167","S2")
labs<-LETTERS[1:6]
## you need to set up your data, which is not here because you are an idiot :D
pdf(paste0("RNA_rescale_ChIPanalyser_","MSE",".pdf"), width=27,height=15)
#par(oma=c(0,0,9,0))
layout(matrix(cbind(c(1,2,5,6,9,10),c(3,4,7,8,11,12),c(13,13,14,14,15,15)),ncol=3), width=c(7,7,5.5),height=c(1,1,1))
par(family="sans")
par(xpd=NA)
cols<-c("#ff5959","#233142","#facf5a","#facf5a")
count<-0
for(i in seq_along(rescaledExtract)){
setTrain <- loci(ChIPProfiles[[i]])
scoresTrain<- scores(ChIPProfiles[[i]])
print(count)
param <- as.numeric(predictions[[i]][[1]][[1]][[method]])
lambda <- param[1]
bound <-param[2]
count<-count+1
subcount<-1
for(k in TFsub[[i]]){
predictionTrain <- searchSites(predictions[[i]]$ChIPProfiles, lambda, bound,names(scoresTrain)[k])
x<-seq(start(setTrain)[k],end(setTrain)[k],by=100)
x<-c(x[1]-1,x,x[length(x)]+1)
if(subcount==1){par(mar=c(5,2,4.5,2))}else{par(mar=c(5,2,3.8,2))}
plot(0,type="n", axes=FALSE,xlab="",ylab="",xlim=c(start(setTrain)[k],end(setTrain)[k]),ylim=c(0,1))
title(xlab=paste0("Genomic Position on ",as.character(seqnames(setTrain))[k]),cex.lab=1.5)
if(subcount==1){
title(main=paste0(tf[i]," in ",cell[count]," - lambda = ",lambda," & Bound Molecules = ",bound),cex.main=1.8,line=0.5)
text(x=(x[1]-1000), y=1.2, labels=labs[count],cex=4)
}
subcount<-subcount+1
axis(1,at=round(seq(start(setTrain)[k],end(setTrain)[k],length.out=10)),labels=round(seq(start(setTrain)[k],end(setTrain)[k],length.out=10)),cex.axis=1.5)
noaccess<-.AccessExtract(setTrain[k],AccessOriginal[[i]])[[1]]
for(j in seq_len(nrow(noaccess))){
rect(noaccess[j,"start"],0,noaccess[j,"end"],0.9,col="#facf5a",density=50,angle=45,lwd=1,border=NA)
#rect(noaccess[[k]][j,"start"],0,noaccess[[k]][j,"end"],0.9,col="#facf5a",density=10,angle=135,lwd=1,border=NA)
#rect(noaccess[[k]][j,"start"],0,noaccess[[k]][j,"end"],0.9,col="#facf5a",density=10,angle=90,lwd=1,border=NA)
}
local<-scoresTrain[[k]]
chipInd<-c(0,local[seq(0,length(local),by=100)],0)
predInd<-c(0,predictionTrain[[1]][[1]]$ChIP,0)
polygon(x,chipInd,density=NA,col="#233142",lwd=2)
lines(x,predInd,col="#ff5959",lwd=2.5)
}
count<-count+1
print(count)
subcou<-1
for(k in TFsub[[i]]){
boundres <- as.numeric(round((param[2]/scale[[i]][[1]])*scale[[i]][[2]]))
validationScore <- scores(rescaledChIPsignal[[i]])
validationSet <-loci(rescaledChIPsignal[[i]])
x<-seq(start(validationSet)[k],end(validationSet)[k],by=100)
x<-c(x[1]-1,x,x[length(x)]+1)
if(subcou==1){par(mar=c(5,2,4.5,2))}else{par(mar=c(5,2,3.8,2))}
plot(0,type="n", axes=FALSE,xlab="",ylab="",xlim=c(start(validationSet)[k],end(validationSet)[k]),ylim=c(0,1))
title(xlab=paste0("Genomic Position on ",as.character(seqnames(validationSet))[k]),cex.lab=1.5)
if(subcou==1){
title(main=paste0(tf[i]," in ",cell[count]," - lambda = ",lambda," & Bound Molecules = ",boundres),cex.main=1.8,line=0.5)
text(x=(x[1]-1000), y=1.2, labels=labs[count],cex=4)
}
subcou<-subcou+1
axis(1,at=round(seq(start(validationSet)[k],end(validationSet)[k],length.out=10)),labels=round(seq(start(validationSet)[k],end(validationSet)[k],length.out=10)),cex.axis=1.5)
noaccess<-.AccessExtract(validationSet[k],Access[[i]])[[1]]
for(j in seq_len(nrow(noaccess))){
rect(noaccess[j,"start"],0,noaccess[j,"end"],0.9,col="#facf5a",density=50,angle=45,lwd=1,border=NA)
#rect(noaccess[[k]][j,"start"],0,noaccess[[k]][j,"end"],0.9,col="#facf5a",density=10,angle=135,lwd=1,border=NA)
#rect(noaccess[[k]][j,"start"],0,noaccess[[k]][j,"end"],0.9,col="#facf5a",density=10,angle=90,lwd=1,border=NA)
}
local<-validationScore[[k]]
chipInd<-c(0,local[seq(0,length(local),by=100)],0)
Carry<-profiles(rescaledExtract[[i]][[1]]$ChIPPrediction)
CarryInd<-c(0,Carry[[1]][[k]]$ChIP,0)
rescale <-profiles(rescaledExtract[[i]][[2]]$ChIPPrediction)
rescaleInd<-c(0,rescale[[1]][[k]]$ChIP,0)
fold10 <-profiles(rescaledExtract[[i]][[3]]$ChIPPrediction)
foldInd10<-c(0,fold10[[1]][[k]]$ChIP,0)
fold100 <-profiles(rescaledExtract[[i]][[4]]$ChIPPrediction)
foldInd100<-c(0,fold100[[1]][[k]]$ChIP,0)
polygon(x,chipInd,density=NA,col="#233142",lwd=2)
lines(x,CarryInd,col="#56B4E9",lwd=2.5)
lines(x,rescaleInd,col="#ff5959",lwd=2.5,lty=2)
lines(x,foldInd10,col="#a64f9d",lwd=2.5,lty=1)
lines(x,foldInd100,col="#009E73",lwd=2.5,lty=1)
legend(x=x[length(x)],y=1,col=c("#56B4E9","#ff5959","#a64f9d","#009E73"),
lty=c(1,2,1),legend=c("Carry-Over","Rescaled","10 Fold","100 Fold"),bty="n",lwd=rep(2.5,3),cex=2)
}
}
## initating boxplot for loop
labs<-LETTERS[7:9]
par(xpd=NA)
for(i in seq_along(rescaledExtract)){
param<-as.numeric(predictions[[i]][[1]][[1]][[method]])
ext <- searchSites(predictions[[i]]$goodnessOfFit,param[1],param[2])[[1]]
ext <- sapply(ext, function(x){return(x[method])})
carry <- profiles(rescaledExtract[[i]][["Carry"]][["GoF"]])[[1]]
carry <- sapply(carry, function(x){return(x[method])})
rescale <- profiles(rescaledExtract[[i]][["Rescaled"]][["GoF"]])[[1]]
rescale <- sapply(rescale, function(x){return(x[method])})
fold <- profiles(rescaledExtract[[i]][["100Fold"]][["GoF"]])[[1]]
fold <- sapply(fold, function(x){return(x[method])})
fold10 <- profiles(rescaledExtract[[i]][["10Fold"]][["GoF"]])[[1]]
fold10 <- sapply(fold10, function(x){return(x[method])})
par(xpd=NA)
par(mar=c(4,17,4,2))
dat<-list("Estimated"=ext,"Rescaled"=rescale,"Carry-Over"=carry,"10 Fold"=fold10,"100 Fold"=fold)
boxplot(dat,main="MSE Distribution",col=c("#4f9da6","#ff5959","#facf5a","#a64f9d","#009E73"),frame=F,cex.axis=1.5,cex.main=1.8,ylim=c(0,0.1))
text(x=(-0.25),y=0.11, labels=labs[i],cex=4)
}
dev.off()
|
d9b997f7125a20a66a4670a33877a4279e9c16e8
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/PKgraph/R/global.r
|
f1df1ca442cf97677ef7e481ec22a9baa83b7bec
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,940
|
r
|
global.r
|
#############################################################################################
## Project: PKgraph
## File: global.R
## Author: Xiaoyong Sun
## Date: 08/19/2009
## Goal: PKgraph
## - interface
## Notes:
#############################################################################################
.pk <- local(
{
.term <- data.frame()
.datasets <- list()
#.dataSpeialPlot <- list()
.validateData <- data.frame()
## for save...splom, abs...
.dataSpecialPlot <- list()
## multiple page saving
.dataLayoutPlot <- NULL
#.datasetsno <- 0
.dataType <- list()
## model comparison
.comDataName <- NULL
.comMap <- data.frame()
## for interactive
.itDataName <- NULL
.itMap <- NULL
# 600
.subheight <- 600
.subwidth <- 600*1.6
.pkcode <- list()
.pkcodenote <- list()
.pkggobi <- list()
# saving format
.saveFormat <- list()
.figureConfig <- list(col="royalblue")
# ggobi data type
.ggobiPlotType <- list()
list(
getTerm = function() return(.term),
setTerm = function(term.df)
{
.term <<- term.df
},
getValidateData = function() return(.validateData),
setValidateData = function(vdata) .validateData <<- vdata,
## for abs plot, get special data set instead of default data: getCurrentData()
getNameDataSpecialPlot = function() return(names(.dataSpecialPlot)),
getDataSpecialPlot = function(i) return(.dataSpecialPlot[[i]]),
setDataSpecialPlot = function(tdata, tname) .dataSpecialPlot[[tname]] <<- tdata,
cleanDataSpecialPlot = function() .dataSpecialPlot <<- list(),
## for multiple layout
# cleanDataLayoutPlot always put below $show... syntax set.
getDataLayoutPlot = function() return(.dataLayoutPlot),
setDataLayoutPlot = function(tdata) .dataLayoutPlot <<- c(.dataLayoutPlot, tdata),
cleanDataLayoutPlot = function() .dataLayoutPlot <<- NULL,
getDatasets = function() return(.datasets),
setDatasets = function(dataset, dataname)
{
.datasets[[dataname]] <<- dataset
#names(.datasets)[[thisDatano]] <<- thisDatano
#.datasetsno <<- .datasetsno + 1
},
getCurrentData = function(currentNo)
{
if (missing(currentNo))
{
if(length(.datasets)!= 0)
{
return(.datasets[[length(.datasets)]])
}
else return(NULL)
}
else
{
#if(currentNo > 0 && currentNo <= length(.datasets))
#{
return(.datasets[[currentNo]])
#}
#else return(NULL)
}
},
getTotalDataLen = function() return(length(.datasets)),
# setTotalDataLen = function(thisDatano) .datasetsno <<- thisDatano,
getTotalDataName = function() return(names(.datasets)),
getCurrentDataType = function(currentNo)
{
if (missing(currentNo))
{
if(length(.dataType)!= 0) return(.dataType[[length(.dataType)]])
else return(NULL)
}
else
{
if(currentNo > 0 && currentNo <= length(.dataType))
{
return(.dataType[[currentNo]])
}
else return(NULL)
}
},
setCurrentDataType = function(thisDataType, dataname) .dataType[[dataname]] <<- thisDataType,
getItDataName = function() return(.itDataName),
setItDataName = function(itname) .itDataName <<- itname,
getItMap = function() return(.itMap),
setItMap = function(key) .itMap <<- key,
getComDataName = function() return(.comDataName),
setComDataName = function(comname) .comDataName <<- comname,
getComMap = function() return(.comMap),
setComMap = function(key.df) .comMap <<- key.df,
getSubHeight = function() return(.subheight),
getSubWidth = function() return(.subwidth),
getPKCode = function(i) return(.pkcode[[i]]),
getPKCodeLen = function(i) return(length(.pkcode)),
setPKCode = function(newlist)
{
newlen <- length(.pkcode)
.pkcode[[newlen+1]] <<- newlist
},
cleanPKCode = function() .pkcode <<- list(),
getPKCodeNote = function(i) return(.pkcodenote[[i]]),
getAllPKCodeNote = function(i) return(.pkcodenote),
setPKCodeNote = function(newlist)
{
newlen <- length(.pkcodenote)
.pkcodenote[[newlen+1]] <<- newlist
},
cleanPKCodeNote = function() .pkcodenote <<- list(),
{
},
# set default x, y for ggobi. a list of x,y name
setPKGGobi = function(newxy)
{
newlen <- length(.pkggobi)
.pkggobi[[newlen+1]] <<- newxy
},
getPKGGobi = function(i) return(.pkggobi[[i]]),
cleanPKGGobi = function() .pkggobi <<- list(),
setSaveFormat = function(newformat) .saveFormat <<- newformat,
getSaveFormat = function() return(.saveFormat),
setFigConfig = function(newconfig) .figureConfig <<- newconfig,
getFigConfig = function() return(.figureConfig),
## ggobi time series plot requirement
getGGobiPlotType = function(currentNo)
{
if (missing(currentNo))
{
if(length(.ggobiPlotType)!= 0) return(.ggobiPlotType[[length(.ggobiPlotType)]])
else return(NULL)
}
else
{
if (currentNo > length(.ggobiPlotType)) return(NULL)
else return(.ggobiPlotType[[currentNo]])
}
},
setGGobiPlotType = function(typelist, dataname)
{
.ggobiPlotType[[dataname]] <<- typelist
},
cleanAll = function()
{
.term <<- data.frame()
.datasets <<- list()
.dataType <<- list()
.ggobiPlotType <<- list()
.validateData <<- data.frame()
.dataSpecialPlot <<- list()
.dataLayoutPlot <<- NULL
## model comparison
.comDataName <<- NULL
.comMap <<- data.frame()
## for interactive
.itDataName <<- NULL
.itMap <<- NULL
.pkcode <<- list()
.pkcodenote <<- list()
.pkggobi <<- list()
}
)
})
## mainGUI
PKW = NULL
pmg.dialog.notebook = NULL
pmg.dialog.notebook2 = NULL
pmg.statusBar=NULL
pk.dirname = NULL
pk.dir = NULL
## current data
global.data <- NULL
## for specific data type
requiredDataType.PKmodel <- "PK data"
modelComType <- "ModelComparison"
|
ee83d7a9234bf7bffe25a1f0c5977f9f784d1d13
|
c8e55ba787d500c25d351bb903b310768fbccf1b
|
/magic.R
|
ca40f5271463678e8d00dd50a25a5017ebabe93d
|
[] |
no_license
|
cem1002/2015-10-05
|
17c39bf33cb869eb46587767176ab53d34c597f6
|
4f6e2873cccb4aeff0086d0cd9988e47a7130795
|
refs/heads/master
| 2021-01-10T13:26:17.284299
| 2015-10-05T13:24:06
| 2015-10-05T13:24:06
| 43,677,882
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 303
|
r
|
magic.R
|
# Quietly load dplyr if not already loaded
suppressMessages(require(dplyr))
random_source_1 <- function(sample_size = 1) {
return(data_frame(x = rnorm(n = sample_size, mean = 15, sd = 2)))
}
random_source_2 <- function(sample_size = 1) {
return(data_frame(x = rexp(n = sample_size, rate = 0.5)))
}
|
04a1a2508a53fcb7d35e9adf262d1b3a5f77bbf2
|
70bf0ea97a15fb6cccf36f693bf162e7dc56bc0c
|
/man/crwMLE.Rd
|
71e0676a75738ec6239cf0c53c9ad735d7f80b77
|
[] |
no_license
|
jmlondon/crawl
|
047389cd73acd7ea093dc9b1be78c10a32496094
|
2a3cb39c97722d2867ca826f6155924517951015
|
refs/heads/master
| 2022-11-11T20:38:38.440220
| 2022-10-26T18:32:12
| 2022-10-26T18:32:12
| 61,844,867
| 1
| 0
| null | 2016-06-24T00:13:50
| 2016-06-24T00:13:50
| null |
UTF-8
|
R
| false
| true
| 9,674
|
rd
|
crwMLE.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crwMLE.R
\name{crwMLE}
\alias{crwMLE}
\alias{crwMLE.default}
\alias{crwMLE.SpatialPoints}
\alias{crwMLE.sf}
\title{Fit Continuous-Time Correlated Random Walk Models to Animal Telemetry Data}
\usage{
crwMLE(data, ...)
\method{crwMLE}{default}(
data,
mov.model = ~1,
err.model = NULL,
activity = NULL,
drift = FALSE,
coord = c("x", "y"),
proj = NULL,
Time.name = "time",
time.scale = NULL,
theta = NULL,
fixPar = NULL,
method = "Nelder-Mead",
control = NULL,
constr = list(lower = -Inf, upper = Inf),
prior = NULL,
need.hess = TRUE,
initialSANN = list(maxit = 200),
attempts = 1,
retrySD = 1,
skip_check = FALSE,
...
)
\method{crwMLE}{SpatialPoints}(
data,
mov.model = ~1,
err.model = NULL,
activity = NULL,
drift = FALSE,
Time.name = "time",
time.scale = NULL,
theta = NULL,
fixPar = NULL,
method = "Nelder-Mead",
control = NULL,
constr = list(lower = -Inf, upper = Inf),
prior = NULL,
need.hess = TRUE,
initialSANN = list(maxit = 200),
attempts = 1,
retrySD = 1,
skip_check = FALSE,
coord = NULL,
...
)
\method{crwMLE}{sf}(
data,
mov.model = ~1,
err.model = NULL,
activity = NULL,
drift = FALSE,
Time.name = "time",
time.scale = NULL,
theta = NULL,
fixPar = NULL,
method = "Nelder-Mead",
control = NULL,
constr = list(lower = -Inf, upper = Inf),
prior = NULL,
need.hess = TRUE,
initialSANN = list(maxit = 200),
attempts = 1,
retrySD = 1,
skip_check = FALSE,
...
)
}
\arguments{
\item{data}{a data set of location observations as a data.frame, tibble,
SpatialPointsDataFrame ('sp' package), or a data.frame of class 'sf' that
contains a geometry column of type \code{sfc_POINT}}
\item{...}{further arguments passed to or from other methods}
\item{mov.model}{formula object specifying the time indexed covariates for
movement parameters.}
\item{err.model}{A 2-element list of formula objects specifying the time
indexed covariates for location error parameters.}
\item{activity}{formula object giving the covariate for the activity (i.e., stopped or fully moving)
portion of the model.}
\item{drift}{logical indicating whether or not to include a random
drift component. For most data this is usually not necessary. See \code{\link{northernFurSeal}} for an example
using a drift model.}
\item{coord}{A 2-vector of character values giving the names of the "X" and
"Y" coordinates in \code{data}. Ignored if \code{data} inherits class
'sf' or 'sp'.}
\item{proj}{A valid epsg integer code or proj4string for \code{data} that does not
inherit either 'sf' or 'sp'. A valid 'crs' list is also accepted. Otherwise, ignored.}
\item{Time.name}{character indicating name of the location time column. It is
strongly preferred that this column be of type POSIXct and in UTC.}
\item{time.scale}{character. Scale for conversion of POSIX time to numeric
for modeling. Defaults to "hours" and most users will not need to change this.}
\item{theta}{starting values for parameter optimization.}
\item{fixPar}{Values of parameters which are held fixed to the given value.}
\item{method}{Optimization method that is passed to \code{\link{optim}}.}
\item{control}{Control list which is passed to \code{\link{optim}}.}
\item{constr}{Named list with elements \code{lower} and \code{upper} that
are vectors the same length as theta giving the box constraints for the
parameters}
\item{prior}{A function returning the log-density function of the parameter
prior distribution. THIS MUST BE A FUNCTION OF ONLY THE FREE PARAMETERS. Any
fixed parameters should not be included.}
\item{need.hess}{A logical value which decides whether or not to evaluate
the Hessian for parameter standard errors}
\item{initialSANN}{Control list for \code{\link{optim}} when simulated
annealing is used for obtaining start values. See details}
\item{attempts}{The number of times likelihood optimization will be
attempted in cases where the fit does not converge or is otherwise non-valid}
\item{retrySD}{optional user-provided standard deviation for adjusting
starting values when attempts > 1. Default value is 1.}
\item{skip_check}{Skip the likelihood optimization check and return the fitted values.
Can be useful for debugging problem fits.}
}
\value{
A list with the following elements:
\item{par}{Parameter maximum likelihood estimates (including fixed parameters)}
\item{estPar}{MLE without fixed parameters}
\item{se}{Standard error of MLE}
\item{ci}{95\% confidence intervals for parameters}
\item{Cmat}{Parameter covariance matrix}
\item{loglik}{Maximized log-likelihood value}
\item{aic}{Model AIC value}
\item{coord}{Coordinate names provided for fitting}
\item{fixPar}{Fixed parameter values provided}
\item{convergence}{Indicator of convergence (0 = converged)}
\item{message}{Messages given by \code{optim} during parameter optimization}
\item{activity}{Model provided for stopping variable}
\item{drift}{Logical value indicating random drift model}
\item{mov.model}{Model description for movement component}
\item{err.model}{Model description for location error component}
\item{n.par}{number of parameters}
\item{nms}{parameter names}
\item{n.mov}{number of movement parameters}
\item{n.errX}{number or location error parameters for ``longitude'' error model}
\item{n.errY}{number or location error parameters for ``latitude'' error model}
\item{stop.mf}{covariate for stop indication in stopping models}
\item{polar.coord}{Logical indicating coordinates are polar latitude and longitude}
\item{init}{Initial values for parameter optimization}
\item{data}{Original data.frame used to fit the model}
\item{lower}{The lower parameter bounds}
\item{upper}{The upper parameter bounds}
\item{need.hess}{Logical value}
\item{runTime}{Time used to fit model}
}
\description{
The function uses the Kalman filter to estimate movement parameters in a
state-space version of the continuous-time movement model. Separate models
are specified for movement portion and the location error portion. Each
model can depend on time indexed covariates. A \dQuote{haul out} model where
movement is allowed to completely stop, as well as, a random drift model can
be fit with this function.
}
\details{
\itemize{
\item A full model specification involves 4 components: a movement model, an
activity model, 2 location error models, and a drift indication. The
movement model (\code{mov.model}) specifies how the movement parameters
should vary over time. This is a function of specified, time-indexed,
covariates. The movement parameters (sigma for velocity variation and beta
for velocity autocorrelation) are both modeled with a log link as par =
exp(eta), where eta is the linear predictor based on the covariates. The
\code{err.model} specification is a list of 2 such models, one for
\dQuote{X (longitude)} and one for \dQuote{Y (latitude)} (in that order) location
error. If only one location error model is given, it is used for both
coordinates (parameter values as well). If \code{drift.model} is set to
\code{TRUE}, then, 2 additional parameters are estimated for the drift
process, a drift variance and a beta multiplier.
\item \code{theta} and \code{fixPar} are vectors with the appropriate number or
parameters. \code{theta} contains only those parameters which are to be
estimated, while \code{fixPar} contains all parameter values with \code{NA}
for parameters which are to be estimated.
\item The data set specified by \code{data} must contain a numeric or POSIXct column which is
used as the time index for analysis. The column name is specified by the
\code{Time.name} argument and it is strongly suggested that this column be of
POSIXct type and in UTC. If a POSIXct column is used it is internally converted to a
numeric vector with units of \code{time.scale}. \code{time.scale} defaults to
NULL and an appropriate option will be chosen ("seconds","minutes","days","weeks")
based on the median time interval. The user can override this by specifying one
of those time intervals directly. If a numeric time vector is used, then
the \code{time.scale} is ignored and there
is no adjustment to the data. Also, for activity models, the
activity covariate must be between 0 and 1 inclusive, with 0 representing complete stop
of the animal (no true movement, however, location error can still occur) and 1
represent unhindered movement. The coordinate location should have \code{NA} where no
location is recorded, but there is a change in the movement covariates.
\item The CTCRW models can be difficult to provide good initial values for
optimization. If \code{initialSANN} is specified then simulated annealing is
used first to obtain starting values for the specified optimization method.
If simulated annealing is used first, then the returned \code{init} list of
the crwFit object will be a list with the results of the simulated annealing
optimization.
\item The \code{attempts} argument instructs \code{crwMLE} to attempt a fit
multiple times. Each time, the fit is inspected for convergence, whether
the covariance matrix could be calculated, negative values in the diag
of the covariance matrix, or NA values in the standard errors. If, after
n attempts, the fit is still not valid a \code{simpleError} object is
returned. Users should consider increasing the number of attempts OR
adjusting the standard deviation value for each attempt by setting
\code{retrySD}. The default value for \code{retrySD} is 1, but users may
need to increase or decrease to find a valid fit. Adjusting other
model parameters may also be required.
}
}
\author{
Devin S. Johnson, Josh M. London
}
|
bf6178c232134a7e9b7ce106f6bfc920e489b149
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609961505-test.R
|
e64439c585195931d33a4efa547c28fe2d827c57
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 522
|
r
|
1609961505-test.R
|
testlist <- list(x = c(255L, -16776961L, -16777216L, 0L, 15204352L, 65535L, 436267008L, 255L, -15060993L, 637527296L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
7d8e27d32988068509596440103f8c862439e3bd
|
42e34dd5956efe9c1236a7d283cfa340e50ac3a1
|
/R_Time_Series_Data_Analysis/Ch04_Correlation_Analysis_of_Time_Series_Data/07_Trend_test_spearman_ptest.R
|
0eff5d3d19e84c083a8b44138bb2b44c8e594ed9
|
[] |
no_license
|
Fintecuriosity11/Time_Series_Analysis
|
2e8720fd76c2ed8bb3b0e234fd243f4890636fa4
|
7d3c813ec55c61339c4c4acea0f36ac534e056a9
|
refs/heads/master
| 2022-12-24T20:16:56.180854
| 2020-10-08T06:21:11
| 2020-10-08T06:21:11
| 277,093,693
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,106
|
r
|
07_Trend_test_spearman_ptest.R
|
##########################################################################################################################################
#(주의) -> 순차적으로 코드를 실행하는 것을 권함!
#에러 발생 시 github Time_Series_Analysis/R_Time_Series_Data_Analysis 경로에 issue를 남기면 확인
##########################################################################################################################################
### 추세검정에 관한 분석 사례: Trend Test-Spearman p test
# 시계열 자료의 추세여부의 검정은, 비모수 분석기업인 spearman상관계수 p test를 적용.
# Spearman의 상관계수 p test는 시간과 조사자료의 순위에 대한 상관관계를 토대로 추세를 파악하는데 적용.
library(forecast)
library(tsDyn)
library(tseries)
# dd2 = matrix(c(1142, 1242, 1452, 1543, 1125, 1262, 1456,1572, # 계절변동 시계열자료(dd2)
# 1143, 1259, 1462, 1553, 1121, 1258, 1472, 1546,
# 1154, 1249, 1477, 1548))
#
#
# dd2.ts = ts(data=dd1, start=c(2006,1), frequency=4) # 4 분기 설정해서 display.
# dd2.ts
# 분석자료 우연변동 시계열(dd1) 및 계절 , 추세변동의 시계열(dd4)
tt<-seq(1:20) # 시간자료 변환.
rr1=rank(dd1) # dd1 자료 순위
rr2=rank(dd4) # dd4 자료 순위
(cbind(tt,rr1,rr2)) # 입력자료 출력.
cor.test(tt,rr1, method=c("spearman")) # dd1 상관성 검정.
cor.test(tt,rr2, method = c("spearman")) # dd4 상관성 검정.
graphics.off() # 그래프를 지워주는 함수.
# Trend test 검정결과:
# 입력자료는 시간 "tt"와 조사자료 순위 "rr1", "rr2"로 구성.
# 순위는 rank로 산정.
# 시간과 조사자료 순위에 대한 상관분석 결과, 우연변동 시계열 자료 "dd1" 는 p=-0.13, p=0.57로서 시간과 순위가 독립이라는 귀무가설.
# 기각을 할 수 없음. 이에 따라 시계열 자료에 추세가 없고 정상성이 존재한다고 판단.
# 그러나 계절, 추세변동자료"dd4"는 p=0.76, p=0.0001로서 시간과 순위가 독립이라는 귀무가설을 기각하며 추세가 존재한다고 판단.
############################################################결과값(print)#################################################################
# # # > [Workspace loaded from ~/.RData]
# > tt<-seq(1:20)
# > rr1=rank(dd1)
# >
# > rr2=rank(dd4)
# > (cbind(tt,rr1,rr2))
# tt rr1 rr2
# [1,] 1 10 1
# [2,] 2 17 3
# [3,] 3 4 7
# [4,] 4 11 9
# [5,] 5 16 2
# [6,] 6 13 5
# [7,] 7 18 11
# [8,] 8 7 15
# [9,] 9 3 4
# [10,] 10 12 8
# [11,] 11 15 14
# [12,] 12 5 16
# [13,] 13 1 6
# [14,] 14 19 12
# [15,] 15 9 17
# [16,] 16 14 18
# [17,] 17 6 10
# [18,] 18 8 13
# [19,] 19 20 19
# [20,] 20 2 20
# > cor.test(tt,rr1, method=c("spearman"))
#
# Spearman's rank correlation rho
#
# data: tt and rr1
# S = 1508, p-value = 0.5725
# alternative hypothesis: true rho is not equal to 0
# sample estimates:
# rho
# -0.1338346
#
# >
# > cor.test(tt,rr2, method = c("spearman"))
#
# Spearman's rank correlation rho
#
# data: tt and rr2
# S = 306, p-value = 0.000108
# alternative hypothesis: true rho is not equal to 0
# sample estimates:
# rho
# 0.7699248
##########################################################################################################################################
|
62aa7652e5357bd37eba4da15b815c841c2fd9b5
|
abb44776a7aacefd81bb60432ee711c8964d35e9
|
/R/ddi_generation_shiny.R
|
1654ee6d9a1d872154cb283b92869d2d2baf780c
|
[
"MIT"
] |
permissive
|
nyuglobalties/diyddi
|
0a79dc24f5704e53eb101e2d23673fc8c28b36cb
|
3b2124dfad9c1e06a99a11de6cab001029b8a020
|
refs/heads/main
| 2023-04-10T04:54:26.161496
| 2022-12-06T18:46:53
| 2022-12-06T18:46:53
| 492,913,716
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,825
|
r
|
ddi_generation_shiny.R
|
ddi_generation_ui <- function(id) {
ns <- NS(id)
tabPanel("DDI Codebook",
downloadButton(ns("download_DDI"), "Download codebook"),
tags$br(),
tags$br(),
verbatimTextOutput(ns("XML"))
)
}
ddi_generation_server <- function(id, dat, filepth) {
moduleServer(id, function(input, output, session) {
output$XML <- renderText({
req(dat())
ddi <- generate_ddi_codebook(dat())
ddi <- rddi::as_xml(ddi)
ddi <- gsub("(<relPubl[^>]*>)((?!<).+)(<)", "\\1\\2\n <", ddi, perl = TRUE)
ddi <- gsub("(<relMat)([^>]*>)((?!<).+)(<)", "\\1\\2\\3\n <", ddi, perl = TRUE)
ddi <- gsub("(<relStdy)([^>]*>)((?!<).+)(<)", "\\1\\2\\3\n <", ddi, perl = TRUE)
ddi <- gsub("(<othRefs)([^>]*>)((?!<).+)(<)", "\\1\\2\\3\n <", ddi, perl = TRUE)
ddi <- gsub("(>)(</relPubl>)", ">\n \\2", ddi)
ddi <- gsub("(>)(</relMat>)", ">\n \\2", ddi)
ddi <- gsub("(>)(</relStdy>)", ">\n \\2", ddi)
ddi <- gsub("(>)(</othRefs>)", ">\n \\2", ddi)
# match < where the < is on the same line as the >
ddi <- gsub("([^ \n])(<)", "\\1<", ddi)
# double check what happens when it's non-mixed content
ddi <- gsub("<", " <", ddi)
ddi <- gsub(">", ">", ddi)
#ddi <- xml2::read_xml(ddi)
ddi <- as.character(ddi)
})
output$download_DDI <- downloadHandler(
filename = "codebook.xml",
content = function(file) {
ddi <- rddi::as_xml(generate_ddi_codebook(dat()))
ddi <- gsub("<", "<", ddi)
ddi <- gsub(">", ">", ddi)
ddi <- xml2::read_xml(ddi)
xml2::write_xml(ddi, file)
},
contentType = "text/xml"
)
})
}
|
3d0b8fb7f36da689c2ac0f495871fb84dd26026b
|
a85e536f8cbe2af99fab307509920955bd0fcf0a
|
/tests/testthat/test-corTest.R
|
5e7d99df22df10c028f232d13993b2da60b7986e
|
[] |
no_license
|
ProjectMOSAIC/mosaic
|
87ea45d46fb50ee1fc7088e42bd35263e3bda45f
|
a64f2422667bc5f0a65667693fcf86d921ac7696
|
refs/heads/master
| 2022-12-13T12:19:40.946670
| 2022-12-07T16:52:46
| 2022-12-07T16:52:46
| 3,154,501
| 71
| 27
| null | 2021-02-17T21:52:00
| 2012-01-11T14:58:31
|
HTML
|
UTF-8
|
R
| false
| false
| 1,363
|
r
|
test-corTest.R
|
context("cor.test()")
testthat::test_that("Cor test works", {
require(graphics)
testcase1 <- structure(list(statistic = c(t = -0.860504567943113), parameter = c(df = 41L),
p.value = 0.394515192722409, estimate = c(cor = -0.133190890463189),
null.value = c(correlation = 0), alternative = "two.sided",
method = "Pearson's product-moment correlation", data.name = "CONT and INTG",
conf.int = structure(c(-0.41685910878005, 0.174118233649785
), conf.level = 0.95)), class = "htest")
testcase2 <- structure(list(statistic = c(t = -0.860504567943113), parameter = c(df = 41L),
p.value = 0.394515192722409, estimate = c(cor = -0.133190890463189),
null.value = c(correlation = 0), alternative = "two.sided",
method = "Pearson's product-moment correlation", data.name = "CONT and INTG",
conf.int = structure(c(-0.41685910878005, 0.174118233649785
), conf.level = 0.95)), class = "htest")
expect_equivalent(testcase1, cor.test(~ CONT + INTG, data = USJudgeRatings))
expect_equivalent(testcase2, cor.test(CONT ~ INTG, data = USJudgeRatings))
})
|
929d864bfc025625986b9a4b6e53e80d6b6b5e0b
|
a9e051485379fb7e569a7c8458045e9eb56d4cf8
|
/awesome/R/DataScienceR/useful_commands.R
|
d4d0c5e77aa2463ef528f26dbe35e92d9db7b9c3
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
liujiamingustc/phd
|
7634056500c481d39fa036bf0ed744c1d13b0035
|
4f815a738abad43531d02ac66f5bd0d9a1def52a
|
refs/heads/master
| 2020-05-17T07:02:56.000146
| 2019-04-24T15:04:19
| 2019-04-24T15:04:19
| 183,567,207
| 4
| 0
| null | 2019-04-26T06:04:37
| 2019-04-26T06:04:37
| null |
UTF-8
|
R
| false
| false
| 1,758
|
r
|
useful_commands.R
|
#Useful R commands
#taken from https://github.com/FlorianMuellerklein/R
#clear console
clc <- function() cat(rep("\n",50))
clc()
#Read SPSS
read.spss(file, use.value.labels = TRUE, to.data.frame = TRUE)
#Subset based on factor
Newdataframe = subest(maindataframe, maindataframe$factor == whatever)
#Label factors (categories)
dataframe$whatevercolumn = factor(dataframe$whatevercolumn, labels = c('blah', 'blah'))
#Change column names
colnames(data.fram)[x] = "newname"
#plot with factor colors
qplot(dataframe$whatevercolumn, dataframe$whatevercolumn, color = dataframe$factor)
#Pretty Histograms
ggplot(data.frame, aes(x = datacolumm, fill = factorcolumn)) + geom_bar()
#Standard Deviation of column
sapply(dataframe$column, sd)
#T-test using a factor
t.test(dataframe$whatevercolumn ~ dataframe$factor)
#Count values in a factor
table(dataframe$factor)
#Summary stats
summary(dataframe$whatevercolumn)
#Check if data is normal
shapiro.test(x, y)
#ANOVA 1-way
anova = aov(data~factor)
summary(anova)
#ANOVA 2-way
anova = aov(data~factor*factor)
summary(anova)
#After running anova you can see the pair-wise comparison
TukeyHSD('nameofanova')
#Fit a linear regression
fit = lm(y ~ x, data = data.frame)
fit = lm(y ~ x1 + x2 + ... + xn, data = data.frame)
#predict using fitted regression (variable must match the ones used to fit)
predict(fit, newdata = data.frame(variable(x) = listofnewvalues))
#plotting and subsetting two time-series data sets on the same graph
ts.plot(ts(a.ts[100:150]), ts(b.ts[100:150]), gpars = list(col = c('black', 'red')))
#or
ggplot(df,aes(x=timevariable,y=value,color=variable,group=variable)) + geom_line()
#Check if a specific value is present in an array
'value' %in% array #or
is.element('value', array)
|
415f50627a8aa3cd04080003880658df85648e8a
|
580eb05c82c46724defaa72508b2fcf44340a5de
|
/man/cv.lasso.compreg.Rd
|
ae68e168e68ec243cad91f530a97d49ff7d0ea32
|
[] |
no_license
|
cran/Compositional
|
5bf22542a6242cc9c4ff66d5e081332e65f4fc76
|
608fa713f4c50933b580d1b3097bc81051777f37
|
refs/heads/master
| 2023-07-06T09:05:20.638372
| 2023-06-29T18:10:02
| 2023-06-29T18:10:02
| 51,369,033
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,582
|
rd
|
cv.lasso.compreg.Rd
|
\name{Cross-validation for the LASSO log-ratio regression with compositional response}
\alias{cv.lasso.compreg}
\title{
Cross-validation for the LASSO log-ratio regression with compositional response
}
\description{
Cross-validation for the LASSO log-ratio regression with compositional response.
}
\usage{
cv.lasso.compreg(y, x, alpha = 1, nfolds = 10,
folds = NULL, seed = NULL, graph = FALSE)
}
\arguments{
\item{y}{
A numerical matrix with compositional data. Zero values are not allowed as the additive
log-ratio transformation (\code{\link{alr}}) is applied to the compositional response prior to implementing
the LASSO algortihm.
}
\item{x}{
A matrix with the predictor variables.
}
\item{alpha}{
The elastic net mixing parameter, with \eqn{0 \leq \alpha \leq 1}. The penalty is defined as a weighted
combination of the ridge and of the Lasso regression. When \eqn{\alpha=1} LASSO is applied, while
\eqn{\alpha=0} yields the ridge regression.
}
\item{nfolds}{
The number of folds for the K-fold cross validation, set to 10 by default.
}
\item{folds}{
If you have the list with the folds supply it here. You can also leave it NULL and it will create folds.
}
\item{seed}{
You can specify your own seed number here or leave it NULL.
}
\item{graph}{
If graph is TRUE (default value) a filled contour plot will appear.
}
}
\details{
The K-fold cross validation is performed in order to select the optimal value for \eqn{\lambda}, the
penalty parameter in LASSO.
}
\value{
The outcome is the same as in the R package glmnet. The extra addition is that if "graph = TRUE", then the
plot of the cross-validated object is returned. The contains the logarithm of \eqn{\lambda} and the mean
squared error. The numbers on top of the figure show the number of set of coefficients for each component,
that are not zero.
}
\references{
Aitchison J. (1986). The statistical analysis of compositional data. Chapman & Hall.
Friedman, J., Hastie, T. and Tibshirani, R. (2010) Regularization Paths for Generalized Linear Models via
Coordinate Descent. Journal of Statistical Software, Vol. 33(1), 1-22.
}
\author{
Michail Tsagris.
R implementation and documentation: Michail Tsagris \email{mtsagris@uoc.gr}.
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{\link{lasso.compreg}, \link{lasso.klcompreg}, \link{lassocoef.plot}, \link{cv.lasso.klcompreg},
\link{comp.reg}
}
}
\examples{
library(MASS)
y <- rdiri( 214, runif(4, 1, 3) )
x <- as.matrix( fgl[, 2:9] )
mod <- cv.lasso.compreg(y, x)
}
|
4d967928d38641b5d0ddf19b96760386665ce486
|
efb67b529095add05d77312f981305690655b45a
|
/ggplot2/Scales/expansion/example2.R
|
92caf10c15f1f8036abbd00e660d88184a89b574
|
[] |
no_license
|
plotly/ssim_baselines
|
6d705b8346604004ae16efdf94e425a2989b2401
|
9d7bec64fc286fb69c76d8be5dc0899f6070773b
|
refs/heads/main
| 2023-08-14T23:31:06.802931
| 2021-09-17T07:19:01
| 2021-09-17T07:19:01
| 396,965,062
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 133
|
r
|
example2.R
|
p <-
ggplot(subset(diamonds, carat > 2), aes(cut, clarity)) +
geom_jitter() +
scale_x_discrete(expand = expansion(add = 2))
|
304d48b463da8eeff172de7a4ae9d2be170297eb
|
39c61416e546d10d30ad024856380f220c7f4794
|
/man/sim_mte.Rd
|
74d5a2ba64b4a9aa84d3314702c3ec56c61b7925
|
[] |
no_license
|
theogab/bite
|
7eaf90720551e4766231e86e085fd5efe9c56ad4
|
eb5f5c2e426997f2aa20a91c73074198b7d8ee7e
|
refs/heads/master
| 2023-08-05T17:33:32.524641
| 2023-07-28T15:26:31
| 2023-07-28T15:26:31
| 149,416,803
| 3
| 1
| null | 2022-04-26T12:43:29
| 2018-09-19T08:21:00
|
R
|
UTF-8
|
R
| false
| true
| 2,379
|
rd
|
sim_mte.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sim_mte.R
\encoding{UTF-8}
\name{sim_mte}
\alias{sim_mte}
\title{Simulate MTE process}
\usage{
sim_mte(phy, map = NULL, model = "OU", pars = c(root = 2, theta = 1,
sigma_sq = 0.1, alpha = 1), sampling = c(1, 7), bounds = c(-Inf,
Inf))
}
\arguments{
\item{phy}{Phylogenetic tree}
\item{map}{list containing the mapping of regimes over each edge (see details).}
\item{model}{model specification for the simulation of trait mean evolution. Supported models are c("OU", "BM", "WN")}
\item{pars}{parameters used for the simulation of trait mean evolution (see details).}
\item{sampling}{vector of size 2 giving the min and max number of individual per species}
\item{bounds}{vector of size 2 giving the bounds of the mean}
}
\value{
returns a numeric vector giving the simulated mean value of the trait for each species of the tree.
}
\description{
Generate random values of trait mean simulated under a MTE process along a phylogenetic tree
}
\details{
map : the list must be ordered in the same order than phy$edge. Each element represents an edge and contains a vector indicating the time spent under each regime in the branch. The name of the regimes must appear on the map
pars : list containing parameters depending on the chosen model. Elements of that lists must be vectors of size 1 or n, with n = number of regimes in the map.
Each element of pars must be named with the corresponding parameter abbreviation.
Parameters used in the different models:
White Noise model (WN):
\itemize{
\item root: root value
\item sigma_sq: evolutionary rate, n regimes if "sigma" is specified in models
}
Brownian Motion model (BM):
\itemize{
\item root: root value
\item sigma_sq: evolutionary rate, n regimes if "sigma" is specified in models
}
Ornstein Uhlenbeck model (OU):
\itemize{
\item root: root value. Only used if "root" is specified in models
\item sigma_sq: evolutionary rate, n regimes if "sigma" is specified in models
\item theta: optimal value, n regimes if "theta" is specified in models
\item alpha: strength of selection, n regimes if "alpha" is specified in models
}
}
\examples{
library(phytools)
phy <- pbtree(n = 50)
Q <- cbind(c(-.002, .002), c(.002, -.002))
phy <- sim.history(phy, Q = Q)
# MBM and VOU
mte_phy <- sim_mte(phy, phy$maps)
}
\author{
Theo Gaboriau
}
|
13d8837e1a9e27ced29b4a69c8c7c908ad5c9f10
|
425599e93049043bcbd9bdd3b87d68513e314236
|
/01 SQL Crosstabs/9 LR.R
|
4223ba099cb78948df6c2702a6c83dbf951cc8df
|
[] |
no_license
|
godot107/DV_TableauProject2
|
c33bad9c92f214c8aef6999dd4255f046234957f
|
fda4e6d6ff1fda9e4a41d5d62566ff3ccd6a24b7
|
refs/heads/master
| 2016-09-06T02:24:47.135304
| 2015-04-14T21:29:20
| 2015-04-14T21:29:20
| 33,649,639
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 801
|
r
|
9 LR.R
|
baby <- data.frame(eval(parse(text=substring(getURL(URLencode('http://129.152.144.84:5001/rest/native/?query="select * from BABY"'), httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521:ORCL', USER='C##cs329e_lar2478', PASS='orcl_lar2478', MODE='native_mode', MODEL='model', returnFor = 'R', returnDimensions = 'False'), verbose = TRUE), 1, 2^31-1))))
df <- data.frame(fromJSON(getURL(URLencode(gsub("\n", " ", '129.152.144.84:5001/rest/native/?query=
"select AGE, RACE, BWT, cume_dist()
OVER (PARTITION BY Race order by BWT) AS cume_dist
from BABY
order by 2,3 desc"
')),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521:ORCL', USER='C##cs329e_lar2478', PASS='orcl_lar2478', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE))); tbl_df(df)
|
ce7934eb3f76220e2efb6a4fdadd8e0b93cca073
|
f831092f94dfe1e14c95ee92b844ebda17f4b034
|
/package/man/splitLib.Rd
|
173032076b6ad9919277045d6db54d17c8a3c4a2
|
[] |
no_license
|
znoor/iSwathX
|
9c0e35c452899a958302900527b8e1bb349efeb9
|
aef0afa6ebb86ce6083173abffd08bb8637583a8
|
refs/heads/master
| 2021-07-09T04:22:50.812797
| 2020-06-14T12:11:13
| 2020-06-14T12:11:13
| 128,870,640
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,221
|
rd
|
splitLib.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/splitLib.R
\name{splitLib}
\alias{splitLib}
\title{Split spectra libraries into common and new spectra}
\usage{
splitLib(datBaseLib, datExtLib, alignSpecies = NA, nomod = TRUE)
}
\arguments{
\item{datBaseLib}{a data frame for base library}
\item{datExtLib}{a data frame for external/addon library}
\item{alignSpecies}{a character string or NA (default) representing the
species of the alignment base in the base library}
\item{ignoreCharge}{a logic value indicating if using precursor charge
states as a splitting factor}
}
\value{
a list of data frames composed of BaseCommon, ExtCommon and
ExtNew, corresponding to the common spectra of base library, common
spectra of external library and new spectra of external library
}
\description{
Split spectra libraries into common and new spectra
}
\examples{
libfiles <- paste(system.file("files",package="iSwathX"),
c("Lib2.txt","Lib3.txt"),sep="/")
datBaseLib <- readLibFile(libfiles[1], clean=TRUE, nomod=FALSE, nomc=FALSE)
datExtLib <- readLibFile(libfiles[2], clean=TRUE, nomod=FALSE, nomc=FALSE)
list.datLibs <- splitLib(datBaseLib, datExtLib, nomod=FALSE)
}
|
17b370593a62e11e4f0ce2e9b967562cfebbb4b3
|
5f591428924619dc53f9959fcb3e648c7d0f4758
|
/R/ppls.splines.cv.R
|
54c0fa83b18dd4db4d9b12424267710c55c3d263
|
[] |
no_license
|
cran/ppls
|
86d2c5d142d483b0e6b29306bcd8aa271d0de83f
|
bebd1679d288a93edb14e24e869efee385db3dbe
|
refs/heads/master
| 2021-05-15T02:19:09.482608
| 2018-07-20T13:38:39
| 2018-07-20T13:38:39
| 17,698,694
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,653
|
r
|
ppls.splines.cv.R
|
`ppls.splines.cv` <-
function(X,y,lambda=1,ncomp=NULL,degree=3,order=2,nknot=NULL,k=5,kernel=FALSE,scale=FALSE,reduce.knots=FALSE,select=FALSE){
n<-nrow(X)
p<-ncol(X)
if (is.null(ncomp)) ncomp=min(n-1,p)
lambda=as.vector(lambda)
all.folds <- split(sample(1:n), rep(1:k,length=n))
# ensure that ncomp does not exceed the sample size on the cv splits
ntrain=vector(length=k)
for (i in 1:k){
ntrain[i]=n-length(all.folds[[i]])
}
ntrain.min=min(ntrain)
ncomp=min(ncomp,ntrain.min-1)
#
error.cv=matrix(0,length(lambda),ncomp)
for (i in seq(k)){
omit <- all.folds[[i]]
Xtrain=X[-omit,,drop=FALSE]
ytrain=y[-omit]
Xtest=X[omit,,drop=FALSE]
ytest=y[omit]
Z<-X2s(Xtrain,Xtest,degree,nknot,reduce.knots=reduce.knots)
Ztrain=Z$Z
Ztest<-Z$Ztest
P<-Penalty.matrix(m=Z$sizeZ,order=order)
blocks=c()
for (b in 1:length(Z$sizeZ)){
blocks=c(blocks,rep(b,Z$sizeZ[b]))
}
for (j in 1:length(lambda)){
penpls=penalized.pls(Ztrain,ytrain,lambda[j]*P,ncomp,kernel,blocks=blocks,select=select,scale=scale)
error.cv[j,]=error.cv[j,]+ length(ytest)*(new.penalized.pls(penpls,Ztest,ytest)$mse)
}
}
#cat(paste("cv completed \n"))
error.cv=error.cv/n
value1=apply(error.cv,1,min)
lambda.opt=lambda[which.min(value1)]
ncomp.opt=which.min(error.cv[lambda==lambda.opt,])
min.ppls=min(value1)
return(list(error.cv=error.cv,min.ppls=min.ppls,lambda.opt=lambda.opt,ncomp.opt=ncomp.opt))
}
|
fa15740d7ebf24d0dc96ed18a33c2682639ef416
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/BDgraph/examples/plot.bdgraph.Rd.R
|
5a29399a98c0c82f7d146288ebf34804bae03afc
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 546
|
r
|
plot.bdgraph.Rd.R
|
library(BDgraph)
### Name: plot.bdgraph
### Title: Plot function for 'S3' class '"bdgraph"'
### Aliases: plot.bdgraph
### ** Examples
## Not run:
##D # Generating multivariate normal data from a 'random' graph
##D data.sim <- bdgraph.sim( n = 50, p = 6, size = 7, vis = TRUE )
##D
##D bdgraph.obj <- bdgraph( data = data.sim )
##D
##D plot( bdgraph.obj )
##D
##D bdgraph.obj <- bdgraph( data = data.sim, save.all = TRUE )
##D
##D plot( bdgraph.obj, number.g = 4 )
##D
##D plot( bdgraph.obj, cut = 0.4 )
## End(Not run)
|
f88f353851f4051041173682bf08091fb84f4090
|
3124609cb9e1a76260a39fc96f56587058791ebe
|
/SimPopModelsPaper.R
|
31eb9864c7548b2e9508f5611b0807db5aa8f95e
|
[
"BSD-3-Clause"
] |
permissive
|
NorwegianVeterinaryInstitute/Hunting_wildlife_for_disease_detection
|
2c7e6710c0181d9f66b62c8390b54250ce560d88
|
3fdd0dfa557731f30133171533a33a2c276fd3dc
|
refs/heads/master
| 2022-11-24T21:54:11.145360
| 2020-06-26T10:15:56
| 2020-06-26T10:15:56
| 271,781,557
| 0
| 1
|
BSD-3-Clause
| 2020-06-26T10:15:57
| 2020-06-12T11:26:10
| null |
UTF-8
|
R
| false
| false
| 17,141
|
r
|
SimPopModelsPaper.R
|
#Function for obtaining alpha and beta ofa Beta-distribution from given mean (mu) and variance (var)
estBetaParams <- function(mu, var) {
alpha <- ((1 - mu) / var - 1 / mu) * mu ^ 2
beta <- alpha * (1 / mu - 1)
return(params = list(alpha = alpha, beta = beta))
}
#Harvest strategy 1 ‘ordinary’ (with threshold of maximum sex ratio):
#Number of adult males is specified by the harvest rate (H1[6]), but constrained so that there is
#a maximum number of adult famales to each adult male.
#If mRatio=20, at least 5% of post-harvest adults should be male.
#Harvest strategy starts from first year (2018)
SimPop18Ktot_h <- function(P=11, T_Adf=100,K=Ktot,mRatio=10,phi3_m=phi3.m,phi3_sd=phi3.sd, f_m=f.m, f_sd=f.sd, phi1_m = phi1.m, phi1_sd=phi1.sd,H1=h1,hadf.max=hadf_max,hadf.m=hadf_m,Nmean=N.mean,Nsd=N.sd){
# P: Number of time steps to simulate
# T_Adf: threshold of adult females. Do not hunt additional females if N.adf<= T_Adf
# H (h): scenario of harvest (harvest rates) for P time steps
# mRatio: set the upper limit of number of adult females per adult male
# K: In order to stabilize the population size, harvest rate of adult females (H1[5]) was for each year
# deterimined by the total population size compared to K (carrying capacity)
# hadf.max and hadf.m: paramters that decides the rate of change of adult female harvest in relation to population size and K
# Demographic rates are given as mean (_m) and standard deviation (_sd)
# PHI3: Adult annual survival probability
# PHI1: Juvenile summer survival probability
# f: fertility rate
# Nmean: mean pre-harvest population size
# Nsd: sd of pre-harvest population size
############################################################
# Define the priors for the parameters
############################################################
## POPULATION VECTORS
N <- matrix(ncol=P, nrow=6) ## Pre harvest pop. vector. No monitoring
X <- matrix(ncol=P, nrow=6) ## Post harvest pop. vector. No monitoring
H <- matrix(ncol=P, nrow=6) ## Harvest numbers
N_tot <- matrix(ncol=P, nrow=1)
X_tot <- matrix(ncol=P, nrow=1)
H_tot <- matrix(ncol=P, nrow=1)
HU_tot <- matrix(ncol=P, nrow=1)
HAd_tot <- matrix(ncol=P, nrow=1)
# Initial pre-harvest population sizes
N[1,1] <- max(round(rnorm(1,Nmean[1], Nsd[1]),0),10) # calves females
N[2,1] <- max(round(rnorm(1,Nmean[2], Nsd[2]),0),10) # calves males
N[3,1] <- max(round(rnorm(1,Nmean[3], Nsd[3]),0),10) # yearling females
N[4,1] <- max(round(rnorm(1,Nmean[4], Nsd[4]),0),10) # yearling males
N[5,1] <- max(round(rnorm(1,Nmean[5], Nsd[5]),0),10) # adult females
N[6,1] <- max(round(rnorm(1,Nmean[6], Nsd[6]),0),10) # adult males
## DEMOGRAPHIC PARAMETERS
# fecundity;
f <- matrix(ncol=P, nrow=1)
# Juvenile summer survival
PHI1 <- matrix(ncol=P, nrow=1)
phi3_var=phi3_sd*phi3_sd
dp3<-estBetaParams(phi3_m,phi3_var)
PHI3<-rbeta(1,dp3$alpha,dp3$beta)
phi1_var=phi1_sd*phi1_sd
f_var=f_sd*f_sd
for(i in 1:P){
dp1<-estBetaParams(phi1_m,phi1_var)
df<-estBetaParams(f_m,f_var)
PHI1[i]<-rbeta(1,dp1$alpha,dp1$beta)
f[i]<-rbeta(1,df$alpha,df$beta)
}
h<-H1
#############################
# SYSTEM PROCESS
#############################
for (t in 1:(P-1)){
###########################################################
# STATE PROCESS;
# PRE-HARVEST POPULATION VECTORS IN T+1
hadf=hadf.max-hadf.m*(K-(N[1,t]+N[2,t]+N[3,t]+N[4,t]+N[5,t]+N[6,t]))/K
H5<-round(N[5,t]*hadf) #B?r legge p? variasjon rundt hadf?
hlow5<-ifelse(N[5,t]-T_Adf>0,N[5,t]-T_Adf,0)
H[5,t] <- ifelse(N[5,t]-H5>T_Adf,H5,hlow5)
T_Adm<-round((N[5,t]-H[5,t])/mRatio) #mratio=20 , dvs 5 %
#B?r vi legge inn minimum number of males required?
#H[6,t] <- ifelse(N[6,t]-T_Adm>1,N[6,t]-T_Adm,0)
H[6,t] <- ifelse(N[6,t]*(1-h[6])>T_Adm,round(N[6,t]*h[6]),max((N[6,t]-T_Adm),0))
N[3,t+1] <- rbinom(1, round(N[1,t]*(1-h[1])), PHI3)
N[4,t+1] <- rbinom(1, round(N[2,t]*(1-h[2])), PHI3)
N[5,t+1] <- rbinom(1, round(N[3,t]*(1-h[3])+N[5,t]-H[5,t]), PHI3)
N[6,t+1] <- rbinom(1, round(N[4,t]*(1-h[4])+N[6,t]-H[6,t]), PHI3)
N[1,t+1] <- rbinom(1, N[5, t+1], PHI1[t]*f[t]/2)
N[2,t+1] <- rbinom(1, N[5, t+1], PHI1[t]*f[t]/2)
H[1,t] <- round(N[1,t]*h[1])
H[2,t] <- round(N[2,t]*h[2])
H[3,t] <- round(N[3,t]*h[3])
H[4,t] <- round(N[4,t]*h[4])
}
hadf=hadf.max-hadf.m*(K-(N[1,P]+N[2,P]+N[3,P]+N[4,P]+N[5,P]+N[6,P]))/K
H5<-round(N[5,P]*hadf)
hlow5<-ifelse(N[5,P]-T_Adf>0,N[5,P]-T_Adf,0)
H[5,P] <- ifelse(N[5,P]-H5>T_Adf,H5,hlow5)
T_Adm<-round((N[5,P]-H[5,P])/mRatio) #mratio=20 , dvs 5 %
#H[6,P] <- ifelse(N[6,P]-T_Adm>1,N[6,P]-T_Adm,0)
#H[6,P] <- ifelse(N[6,P]-H[6,P]>T_Adm,H[6,P],max((N[6,P]-T_Adm),0))
H[6,P] <- ifelse(N[6,P]*(1-h[6])>T_Adm,round(N[6,P]*h[6]),max((N[6,P]-T_Adm),0))
H[1,P] <- round(N[1,P]*h[1])
H[2,P] <- round(N[2,P]*h[2])
H[3,P] <- round(N[3,P]*h[3])
H[4,P] <- round(N[4,P]*h[4])
for (t in 1:P){
#############################################################
# POST-HARVEST POPULATION VECTORS IN T+1
X[1:4,t] <- (N[1:4,t]-H[1:4,t])
X[5:6,t] <- (N[5:6,t]-H[5:6,t])
#############################################################
# DERIVED HARVEST NUMBERS
#H[,t] <- round(N[,t]*h[,t])
X_tot[t] <- sum(X[,t]) # POST-HARVEST POPULATION size
N_tot[t] <- sum(N[,t]) # summing up population vector to population size
H_tot[t] <- sum(H[,t])
HU_tot[t] <- sum(H[3:6,t])
HAd_tot[t] <- sum(H[5:6,t])
}
out <- list(N, X, H, N_tot, X_tot, H_tot,HU_tot, HAd_tot,
f, PHI1, PHI3)
names(out) <- c("N", "X","H", "N_tot","X_tot","H_tot","HU_tot",
"HAd_tot",
"f", "phi1", "phi3")
out
}
#Harvest strategy 2 ‘proactive’ (with operational sex ratio):
#Number of adult males harvested (Hadm[t]) in year t is set to obtain a specified operational
#sex ratio (SR = m:f = 1:mRatio) after harvest. The aim for number of adult males
#after harvest are then determined by mRatio and the number of post-harvest number of adult females.
#Harvest strategy starts from first year (2018)
SimPop18RKtot_h <- function(P=11, T_Adf=100,K=Ktot,mRatio=10,phi3_m=phi3.m,phi3_sd=phi3.sd, f_m=f.m, f_sd=f.sd, phi1_m = phi1.m, phi1_sd=phi1.sd,H1=h1,hadf.max=hadf_max,hadf.m=hadf_m,Nmean=N.mean,Nsd=N.sd){
# P: Number of time steps to simulate
# H (h): scenario of harvest (harvest rates) for P time steps
# mRatio: the operational sex ratio, the number of adult females per adult male to be obtained after harvest
# T_Adf: threshold of adult females. Do not hunt additional females if N.adf<= T_Adf
# K: In order to stabilize the population size, harvest rate of adult females (H1[5]) was for each year
# deterimined by the total population size compared to K (carrying capacity)
# hadf.max and hadf.m: paramters that decides the rate of change of adult female harvest in relation to population size and K
# Demographic rates are given as mean (_m) and standard deviation (_sd)
# PHI3: Adult annual survival probability
# PHI1: Juvenile summer survival probability
# f: fertility rate
# Nmean: mean pre-harvest population size
# Nsd: sd of pre-harvest population size
############################################################
# Define the priors for the parameters
############################################################
## POPULATION VECTORS
N <- matrix(ncol=P, nrow=6) ## Pre harvest pop. vector. No monitoring
X <- matrix(ncol=P, nrow=6) ## Post harvest pop. vector. No monitoring
H <- matrix(ncol=P, nrow=6) ## Harvest numbers
N_tot <- matrix(ncol=P, nrow=1)
X_tot <- matrix(ncol=P, nrow=1)
H_tot <- matrix(ncol=P, nrow=1)
HU_tot <- matrix(ncol=P, nrow=1)
HAd_tot <- matrix(ncol=P, nrow=1)
# Initial pre-harvest population sizes
N[1,1] <- max(round(rnorm(1,Nmean[1], Nsd[1]),0),10) # calves females
N[2,1] <- max(round(rnorm(1,Nmean[2], Nsd[2]),0),10) # calves males
N[3,1] <- max(round(rnorm(1,Nmean[3], Nsd[3]),0),10) # yearling females
N[4,1] <- max(round(rnorm(1,Nmean[4], Nsd[4]),0),10) # yearling males
N[5,1] <- max(round(rnorm(1,Nmean[5], Nsd[5]),0),10) # adult females
N[6,1] <- max(round(rnorm(1,Nmean[6], Nsd[6]),0),10) # adult males
## DEMOGRAPHIC PARAMETERS
# fecundity;
f <- matrix(ncol=P, nrow=1)
# Juvenile summer survival
PHI1 <- matrix(ncol=P, nrow=1)
phi3_var=phi3_sd*phi3_sd
dp3<-estBetaParams(phi3_m,phi3_var)
PHI3<-rbeta(1,dp3$alpha,dp3$beta)
phi1_var=phi1_sd*phi1_sd
f_var=f_sd*f_sd
for(i in 1:P){
dp1<-estBetaParams(phi1_m,phi1_var)
df<-estBetaParams(f_m,f_var)
PHI1[i]<-rbeta(1,dp1$alpha,dp1$beta)
f[i]<-rbeta(1,df$alpha,df$beta)
}
h<-H1
#############################
# SYSTEM PROCESS
# STATE PROCESS;
# PRE-HARVEST POPULATION VECTORS IN T+1
#############################
for (t in 1:(P-1)){
###########################################################
# STATE PROCESS;
# PRE-HARVEST POPULATION VECTORS IN T+1
hadf=hadf.max-hadf.m*(K-(N[1,t]+N[2,t]+N[3,t]+N[4,t]+N[5,t]+N[6,t]))/K
H5<-round(N[5,t]*hadf)
hlow5<-ifelse(N[5,t]-T_Adf>0,N[5,t]-T_Adf,0)
H[5,t] <- ifelse(N[5,t]-H5>T_Adf,H5,hlow5)
T_Adm<-round((N[5,t]-H[5,t])/mRatio) #mratio=20 -> 5 %
H[6,t] <- ifelse(N[6,t]-T_Adm>1,N[6,t]-T_Adm,0)
N[3,t+1] <- rbinom(1, round(N[1,t]*(1-h[1])), PHI3)
N[4,t+1] <- rbinom(1, round(N[2,t]*(1-h[2])), PHI3)
N[5,t+1] <- rbinom(1, round(N[3,t]*(1-h[3])+N[5,t]-H[5,t]), PHI3)
N[6,t+1] <- rbinom(1, round(N[4,t]*(1-h[4])+N[6,t]-H[6,t]), PHI3)
N[1,t+1] <- rbinom(1, N[5, t+1], PHI1[t]*f[t]/2)
N[2,t+1] <- rbinom(1, N[5, t+1], PHI1[t]*f[t]/2)
H[1,t] <- round(N[1,t]*h[1])
H[2,t] <- round(N[2,t]*h[2])
H[3,t] <- round(N[3,t]*h[3])
H[4,t] <- round(N[4,t]*h[4])
}
hadf=hadf.max-hadf.m*(K-(N[1,t]+N[2,t]+N[3,t]+N[4,t]+N[5,t]+N[6,t]))/K
H5<-round(N[5,P]*hadf)
hlow5<-ifelse(N[5,P]-T_Adf>0,N[5,P]-T_Adf,0)
H[5,P] <- ifelse(N[5,P]-H5>T_Adf,H5,hlow5)
T_Adm<-round((N[5,P]-H[5,P])/mRatio)
H[6,P] <- ifelse(N[6,P]-T_Adm>1,N[6,P]-T_Adm,0)
H[1,P] <- round(N[1,P]*h[1])
H[2,P] <- round(N[2,P]*h[2])
H[3,P] <- round(N[3,P]*h[3])
H[4,P] <- round(N[4,P]*h[4])
for (t in 1:P){
#############################################################
# POST-HARVEST POPULATION VECTORS IN T+1
X[1:4,t] <- (N[1:4,t]-H[1:4,t])
X[5:6,t] <- (N[5:6,t]-H[5:6,t])
#############################################################
# DERIVED HARVEST NUMBERS
#H[,t] <- round(N[,t]*h[,t])
X_tot[t] <- sum(X[,t]) # POST-HARVEST POPULATION size
N_tot[t] <- sum(N[,t]) # summing up population vector to population size
H_tot[t] <- sum(H[,t])
HU_tot[t] <- sum(H[3:6,t])
HAd_tot[t] <- sum(H[5:6,t])
}
out <- list(N, X, H, N_tot, X_tot, H_tot,HU_tot, HAd_tot,
f, PHI1, PHI3)
names(out) <- c("N", "X","H", "N_tot","X_tot","H_tot","HU_tot",
"HAd_tot",
"f", "phi1", "phi3")
out
}
#Harvest strategy 2 ‘proactive’ (with operational sex ratio).
#Same as above ("SimPop18RKtot_h"), but number of harvested calves is determined as a proportion of harvested adult females
SimPop18RKtot_hcalf <- function(P=11, T_Adf=100,K=Ktot,mRatio=10,phi3_m=phi3.m,phi3_sd=phi3.sd, f_m=f.m, f_sd=f.sd, phi1_m = phi1.m, phi1_sd=phi1.sd,H1=h1,hc=hcalf,hadf.max=hadf_max,hadf.m=hadf_m,Nmean=N.mean,Nsd=N.sd){
# P: Number of time steps to simulate
# H (h): scenario of harvest (harvest rates) for P time steps
# mRatio: the operational sex ratio, the number of adult females per adult male to be obtained after harvest
# T_Adf: threshold of adult females. Do not hunt additional females if N.adf<= T_Adf
# K: In order to stabilize the population size, harvest rate of adult females (H1[5]) was for each year
# deterimined by the total population size compared to K (carrying capacity)
# hadf.max and hadf.m: paramters that decides the rate of change of adult female harvest in relation to population size and K
# hc=hcalf: proportion of adult females harvested for which there is also harvested a calf
# Demographic rates are given as mean (_m) and standard deviation (_sd)
# PHI3: Adult annual survival probability
# PHI1: Juvenile summer survival probability
# f: fertility rate
# N.mean: mean pre-harvest population size
# N.sd: sd of pre-harvest population size
# h: scenario of harvest rates for T time steps
# Do not hunt additional adult males if N.adm<= minAdm
############################################################
# Define the priors for the parameters
############################################################
## POPULATION VECTORS
N <- matrix(ncol=P, nrow=6) ## Pre harvest pop. vector. No monitoring
X <- matrix(ncol=P, nrow=6) ## Post harvest pop. vector. No monitoring
H <- matrix(ncol=P, nrow=6) ## Harvest numbers
N_tot <- matrix(ncol=P, nrow=1)
X_tot <- matrix(ncol=P, nrow=1)
H_tot <- matrix(ncol=P, nrow=1)
HU_tot <- matrix(ncol=P, nrow=1)
HAd_tot <- matrix(ncol=P, nrow=1)
# Initial pre-harvest population sizes
N[1,1] <- max(round(rnorm(1,Nmean[1], Nsd[1]),0),10) # calves females
N[2,1] <- max(round(rnorm(1,Nmean[2], Nsd[2]),0),10) # calves males
N[3,1] <- max(round(rnorm(1,Nmean[3], Nsd[3]),0),10) # yearling females
N[4,1] <- max(round(rnorm(1,Nmean[4], Nsd[4]),0),10) # yearling males
N[5,1] <- max(round(rnorm(1,Nmean[5], Nsd[5]),0),10) # adult females
N[6,1] <- max(round(rnorm(1,Nmean[6], Nsd[6]),0),10) # adult males
## DEMOGRAPHIC PARAMETERS
# fecundity;
f <- matrix(ncol=P, nrow=1)
# Juvenile summer survival
PHI1 <- matrix(ncol=P, nrow=1)
phi3_var=phi3_sd*phi3_sd
dp3<-estBetaParams(phi3_m,phi3_var)
PHI3<-rbeta(1,dp3$alpha,dp3$beta)
phi1_var=phi1_sd*phi1_sd
f_var=f_sd*f_sd
for(i in 1:P){
dp1<-estBetaParams(phi1_m,phi1_var)
df<-estBetaParams(f_m,f_var)
PHI1[i]<-rbeta(1,dp1$alpha,dp1$beta)
f[i]<-rbeta(1,df$alpha,df$beta)
}
#############################
# SYSTEM PROCESS
#############################
h=H1
for (t in 1:(P-1)){
###########################################################
# STATE PROCESS;
# PRE-HARVEST POPULATION VECTORS IN T+1
hadf=hadf.max-hadf.m*(K-(N[1,t]+N[2,t]+N[3,t]+N[4,t]+N[5,t]+N[6,t]))/K
H5<-round(N[5,t]*hadf)
hlow5<-ifelse(N[5,t]-T_Adf>0,N[5,t]-T_Adf,0)
H[5,t] <- ifelse(N[5,t]-H5>T_Adf,H5,hlow5)
T_Adm<-round((N[5,t]-H[5,t])/mRatio)
H[6,t] <- ifelse(N[6,t]-T_Adm>1,N[6,t]-T_Adm,0)
H[1,t] <- ifelse(round(H[5,t]*hc/2)<N[1,t],round(H[5,t]*hc/2),0)
H[2,t] <- ifelse(round(H[5,t]*hc/2)<N[2,t],round(H[5,t]*hc/2),0)
N[3,t+1] <- rbinom(1, N[1,t]-H[1,t], PHI3)
N[4,t+1] <- rbinom(1, N[2,t]-H[2,t], PHI3)
N[5,t+1] <- rbinom(1, round(N[3,t]*(1-h[3])+N[5,t]-H[5,t]), PHI3)
N[6,t+1] <- rbinom(1, round(N[4,t]*(1-h[4])+N[6,t]-H[6,t]), PHI3)
N[1,t+1] <- rbinom(1, N[5, t+1], PHI1[t]*f[t]/2)
N[2,t+1] <- rbinom(1, N[5, t+1], PHI1[t]*f[t]/2)
H[3,t] <- round(N[3,t]*h[3])
H[4,t] <- round(N[4,t]*h[4])
}
hadf=hadf.max-hadf.m*(K-(N[1,t]+N[2,t]+N[3,t]+N[4,t]+N[5,t]+N[6,t]))/K
H5<-round(N[5,P]*hadf)
hlow5<-ifelse(N[5,P]-T_Adf>0,N[5,P]-T_Adf,0)
H[5,P] <- ifelse(N[5,P]-H5>T_Adf,H5,hlow5)
T_Adm<-round((N[5,P]-H[5,P])/mRatio)
H[6,P] <- ifelse(N[6,P]-T_Adm>1,N[6,P]-T_Adm,0)
H[1,P] <- ifelse(round(H[5,P]*hc/2)<N[1,P],round(H[5,P]*hc/2),0)
H[2,P] <- ifelse(round(H[5,P]*hc/2)<N[2,P],round(H[5,P]*hc/2),0)
H[3,P] <- round(N[3,P]*h[3])
H[4,P] <- round(N[4,P]*h[4])
for (t in 1:P){
#############################################################
# POST-HARVEST POPULATION VECTORS IN T+1
X[1:4,t] <- (N[1:4,t]-H[1:4,t])
X[5:6,t] <- (N[5:6,t]-H[5:6,t])
#############################################################
# DERIVED HARVEST NUMBERS
#H[,t] <- round(N[,t]*h[,t])
X_tot[t] <- sum(X[,t]) # POST-HARVEST POPULATION size
N_tot[t] <- sum(N[,t]) # summing up population vector to population size
H_tot[t] <- sum(H[,t])
HU_tot[t] <- sum(H[3:6,t])
HAd_tot[t] <- sum(H[5:6,t])
}
out <- list(N, X, H, N_tot, X_tot, H_tot,HU_tot, HAd_tot,
f, PHI1, PHI3)
names(out) <- c("N", "X","H", "N_tot","X_tot","H_tot","HU_tot",
"HAd_tot",
"f", "phi1", "phi3")
out
}
|
0201ae0692f9c3e4c82e5dfecccd60109258efee
|
a72a6d84757be8d52b6aa688d9105bdb94d2a709
|
/scripts/get-data/ipedsIdentifiers.R
|
c4bbaa88d475bed51383077c78e2d8c4143bda3e
|
[] |
no_license
|
UrbanInstitute/ed-data
|
ff192c45d9b1b6abf134a8efc958197f30947322
|
6c4f6361d7be9742f41fc6f046799b9b74dde299
|
refs/heads/master
| 2020-04-06T19:07:44.973267
| 2017-03-24T20:13:46
| 2017-03-24T20:13:46
| 47,789,463
| 8
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,865
|
r
|
ipedsIdentifiers.R
|
# Create IPEDS institutions identifiers dataset
# Get IPEDS data using scraper https://github.com/UrbanInstitute/ipeds-scraper
library("dplyr")
library("tidyr")
# Functions to return ipeds data
source("scripts/ipedsFunctions.R")
########################################################################################################
# Get main insitutional characteristics
#######################################################################################################
# Institutional characteristics vars
instvars <- c("fips", "stabbr", "instnm", "sector", "pset4flg", "instcat", "ccbasic", "control", "deggrant", "opeflag", "opeind", "opeid", "carnegie", "hloffer")
institutions <- returnData(instvars)
########################################################################################################
# For years < 2004, we need % of degrees granted that are bachelor's degrees or higher
# Data from completions CDFs
########################################################################################################
#CIPCODE is used in many places - get where it's 6 digits, and keep all the variables besides flags
cipdt <- ipeds %>% filter(grepl("cipcode", columns, ignore.case = T)) %>%
filter(grepl("6-digit CIP", title, ignore.case = T)) %>%
filter(year <= 2003)
dl <- split(cipdt, cipdt$name)
allvars <- tolower(c("cipcode", "unitid", "year"))
for (i in seq_along(dl)) {
csvpath <- dl[[i]]$path
fullpath <- paste(ipedspath, csvpath, sep="")
name <- dl[[i]]$name
d <- read.csv(fullpath, header=T, stringsAsFactors = F)
# Give it a year variable
d$year <- dl[[i]]$year
# All lowercase colnames
colnames(d) <- tolower(colnames(d))
assign(name, d)
}
ipeds_list <- lapply(cipdt$name, get)
completions <- bind_rows(ipeds_list)
completions <- completions %>% arrange(year, unitid) %>%
select(-starts_with("x"))
# cipcode==99: all degrees
cip99 <- completions %>% filter(cipcode==99)
# Create a variable that sums all the "crace"i variables - all the subgroups
cip99$degrees <- rowSums(cip99[,grep("crace", names(cip99)),], na.rm=T)
# Later years use majornum column - is this the student's first or second major
# Restrict to majornum = na or 1
cip99 <- cip99 %>% filter(is.na(majornum) | majornum==1)
# Reshape to by unitid, year, and then # of degrees by award level
degrees <- cip99 %>% select(unitid, year, degrees, awlevel) %>%
spread(awlevel, degrees)
# Then total degrees = sum of award levels >= 1
# Bachelor's+ degrees = sum of award levels >= 5
degrees[is.na(degrees)] <- 0
degrees <- degrees %>% mutate(degrees_total = `1` + `2` + `3` + `4` + `5` + `6` + `7` + `8` + `9` + `10` + `11`,
degrees_bachplus = `5` + `6` + `7` + `8` + `9` + `10` + `11`) %>%
select(unitid, year, degrees_total, degrees_bachplus) %>%
mutate(degrees_pctbachplus = degrees_bachplus/degrees_total)
# Add to institutions dataset
institutions <- left_join(institutions, degrees, by = c("unitid", "year"))
########################################################################################################
# Level variables
# 2014 definitions
# LEVEL1 N 2 Disc Less than one year certificate
# LEVEL2 N 2 Disc One but less than two years certificate
# LEVEL3 N 2 Disc Associate's degree
# LEVEL4 N 2 Disc Two but less than 4 years certificate
# LEVEL5 N 2 Disc Bachelor's degree
# LEVEL6 N 2 Disc Postbaccalaureate certificate
# LEVEL7 N 2 Disc Master's degree
# LEVEL8 N 2 Disc Post-master's certificate
# LEVEL12 N 2 Disc Other degree
# LEVEL17 N 2 Disc Doctor's degree - research/scholarship
# LEVEL18 N 2 Disc Doctor's degree - professional practice
# LEVEL19 N 2 Disc Doctor's degree - other
########################################################################################################
# Additional institutional characteristics
levelsdt <- returnData(c("level3", "level5", "level7", "level9", "level10"))
# Add to institutions dataset
institutions <- left_join(institutions, levelsdt, by = c("unitid", "year"))
########################################################################################################
# finaid9 for <1996
# replace zeros with 1 if institution is NOT 'not eligible for any of the above' federal financial aid programs
########################################################################################################
finaid9 <- returnData("finaid9")
# Add to institutions dataset
institutions <- left_join(institutions, finaid9, by = c("unitid", "year"))
write.csv(institutions, "data/ipeds/institutions_raw.csv", row.names=F, na="")
rm(list=setdiff(ls(), c("institutions", "ipeds", "ipedspath")))
########################################################################################################
# Format institutions dataset
# Fun: in 1986, unitids are all screwy
# As of 06/13/16 - using 1994+
########################################################################################################
institutions <- read.csv("data/ipeds/institutions_raw.csv", stringsAsFactors = F)
carnegievar <- as.data.frame(table(institutions$year, institutions$carnegie))
institutions <- institutions %>% group_by(unitid) %>%
mutate(yearsin = n())
table(institutions$yearsin)
# Keep 50 states + DC
institutions <- institutions %>% filter(fips <= 56)
institutions <- institutions %>% select(year, unitid, everything()) %>%
arrange(year, unitid)
########################################################################################################
# Not all identifiers are available for early years - define as laid out by Martha Johnson
# deggrant
########################################################################################################
# 2014 value labels
labels2014 <- readWorkbook(paste(ipedspath, "dict/2014/hd2014.xlsx", sep=""), sheet="Frequencies")
# Define deggrant for <2000
table(institutions$deggrant)
# DEGGRANT 1 Degree-granting
# DEGGRANT 2 Nondegree-granting, primarily postsecondary
# DEGGRANT -3 {Not available}
# 3 and 4 used rarely in early 2000s for technical/vocational schools
# 1: highest level offering is at least an associate's degree
# NAs are being treated as logical, not numeric - need to replace with something other than NA for variable defintions
institutions$level3[is.na(institutions$level3)] <- -99
institutions$level5[is.na(institutions$level5)] <- -99
institutions$level7[is.na(institutions$level7)] <- -99
institutions$level9[is.na(institutions$level9)] <- -99
institutions$level10[is.na(institutions$level10)] <- -99
institutions <- as.data.frame(institutions)
institutions <- institutions %>% mutate(deggrant2 = deggrant) %>%
mutate(deggrant2 = replace(deggrant2,
(year < 2000 & (level3==1 | level5==1 | level7==1 | level9==1 | level10==1)),
1)) %>%
mutate(deggrant2 = replace(deggrant2,
(year < 2000 & !(level3==1 | level5==1 | level7==1 | level9==1 | level10==1)),
2))
########################################################################################################
# pset4flg - Postsecondary and Title IV institution indicator
# This is near impossible to make <1994
########################################################################################################
# PSET4FLG 1 Title IV postsecondary institution
# PSET4FLG 2 Non-Title IV postsecondary institution
# PSET4FLG 3 Title IV NOT primarily postsecondary institution
# PSET4FLG 4 Non-Title IV NOT primarily postsecondary institution
# PSET4FLG 6 Non-Title IV postsecondary institution that is NOT open to the public
# PSET4FLG 9 Institution is not active in current universe
table(institutions$pset4flg)
institutions <- institutions %>% mutate(pset4flg2 = pset4flg) %>%
# institution is NOT 'not eligible for any of the above' federal financial aid programs
mutate(pset4flg2 = replace(pset4flg2,
(year < 1997 & finaid9 != 1) | (year < 1996 & is.na(finaid9)),
1)) %>%
# participates in Title IV federal financial aid programs
mutate(pset4flg2 = replace(pset4flg2,
(year %in% c(1996, 1997, 1998, 1999) & (opeind == 1 | opeflag == 1)),
1)) %>%
# 1995: if opeid exists
mutate(pset4flg2 = replace(pset4flg2,
(year == 1995 & !is.na(opeid)),
1))
########################################################################################################
# instcat - institutional category
########################################################################################################
# INSTCAT 1 Degree-granting, graduate with no undergraduate degrees
# INSTCAT 2 Degree-granting, primarily baccalaureate or above
# INSTCAT 3 Degree-granting, not primarily baccalaureate or above
# INSTCAT 4 Degree-granting, associate's and certificates
# INSTCAT 5 Nondegree-granting, above the baccalaureate
# INSTCAT 6 Nondegree-granting, sub-baccalaureate
# INSTCAT -1 Not reported
# INSTCAT -2 Not applicable
table(institutions$instcat)
institutions <- institutions %>% mutate(instcat2 = instcat) %>%
# Master's, Doctor's, or first-professional, no bachelor's, no associate's
mutate(instcat2 = replace(instcat2,
(year < 2004 & deggrant2==1 & (level7==1 | level9==1 | level10==1) & level5!=1 & level3!=1),
1)) %>%
# Bachelor's or higher, primarily bachelor's or higher
mutate(instcat2 = replace(instcat2,
(year < 2004 & deggrant2==1 & (level7==1 | level9==1 | level10==1) & degrees_pctbachplus > 0.5),
2)) %>%
# Bachelor's or higher, primarily below bachelor's
mutate(instcat2 = replace(instcat2,
(year < 2004 & deggrant2==1 & (level7==1 | level9==1 | level10==1) & degrees_pctbachplus <= 0.5),
3)) %>%
# Highest degree is associate's (may have post-bacc certificates)
mutate(instcat2 = replace(instcat2,
(year < 2004 & deggrant2==1 & level3==1 & level5!=1 & level7!=1 & level9!=1 & level10!=1),
4)) %>%
# post-bacc certificates, no degrees
mutate(instcat2 = replace(instcat2,
(year < 2004 & deggrant2==2 & hloffer >= 6),
5)) %>%
# no post-bacc certificates, no degrees
mutate(instcat2 = replace(instcat2,
(year < 2004 & deggrant2==2 & hloffer < 6),
6)) %>%
# Sector 0 = NA instcat (-2)
mutate(instcat2 = replace(instcat2,
(year < 2004 & sector==0),
-2))
table(institutions$instcat2)
########################################################################################################
# ccbasic for 1994 - 2004
# can't make but don't need earlier years so not an issue
# We'll generally be using carnegie for the latest year but sector over time
########################################################################################################
institutions <- institutions %>% mutate(ccbasic2 = ccbasic) %>%
# doctoral and research level
mutate(ccbasic2 = replace(ccbasic2, (year < 2000 & year >= 1994 & carnegie %in% c(11, 12, 13, 14)), 15)) %>%
mutate(ccbasic2 = replace(ccbasic2, (year < 2005 & year >= 2000 & carnegie %in% c(15, 16)), 15)) %>%
# masters level
mutate(ccbasic2 = replace(ccbasic2, (year < 2005 & year >= 1994 & carnegie %in% c(21, 22)), 20)) %>%
# special focus
mutate(ccbasic2 = replace(ccbasic2, (year < 2005 & year >= 1994 & carnegie > 50), 25))
table(institutions$ccbasic2)
########################################################################################################
# Redefined carnegie for this project - carnegie_urban
# 1 "public research" 2 "public masters" 3 "public associates" 4 "private nonprofit research" 5 "private nonprofit masters"
# 6 "private nonprofit bachelors" 7 "for profit" 8 "small groups" 9 "special focus"
########################################################################################################
# Special institutions - graduate-students only or other special focus
institutions <- institutions %>% mutate(specialty = 0) %>%
# doctoral and research level
mutate(specialty = replace(specialty, (instcat2==1 | ccbasic2 > 23), 1))
institutions <- institutions %>% mutate(carnegie_urban = 0) %>%
### PUBLIC
# public research
mutate(carnegie_urban = replace(carnegie_urban, ccbasic2 %in% c(15, 16, 17) & control==1, 1)) %>%
# public masters
mutate(carnegie_urban = replace(carnegie_urban, ccbasic2 %in% c(18, 19, 20) & control==1, 2)) %>%
# public associates
mutate(carnegie_urban = replace(carnegie_urban, ccbasic2 %in% c(3, 4) & control==1, 3)) %>%
### PRIVATE
# private nonprofit research
mutate(carnegie_urban = replace(carnegie_urban, ccbasic2 %in% c(15, 16, 17) & control==2, 4)) %>%
# private nonprofit masters
mutate(carnegie_urban = replace(carnegie_urban, ccbasic2 %in% c(18, 19, 20) & control==2, 5)) %>%
# private nonprofit bachelors
mutate(carnegie_urban = replace(carnegie_urban, instcat2==2 & control==2 & carnegie_urban==0, 6)) %>%
# for profit
mutate(carnegie_urban = replace(carnegie_urban, control==3, 7)) %>%
#### SMALL GROUPS
# public bachelors
mutate(carnegie_urban = replace(carnegie_urban, instcat2==2 & control==1 & carnegie_urban==0, 8)) %>%
# private nonprofit associates
mutate(carnegie_urban = replace(carnegie_urban, instcat2 %in% c(3,4) & control==2, 8)) %>%
# Special focus
mutate(carnegie_urban = replace(carnegie_urban, specialty==1, 9)) %>%
# non degree granting excluded
mutate(carnegie_urban = replace(carnegie_urban, deggrant2==2, NA))
# String variables
institutions <- institutions%>% mutate(carnegie_label = ifelse(carnegie_urban == 1, "Public research",
ifelse(carnegie_urban == 2, "Public master's",
ifelse(carnegie_urban == 3, "Public associate's",
ifelse(carnegie_urban == 4, "Private nonprofit research",
ifelse(carnegie_urban == 5, "Private nonprofit master's",
ifelse(carnegie_urban == 6, "Private nonprofit bachelor's",
ifelse(carnegie_urban == 7, "For profit",
ifelse(carnegie_urban == 8, "Small groups",
ifelse(carnegie_urban == 9, "Special focus",
""))))))))))
########################################################################################################
# Basic sector - sector_urban (named sectorv2 in Stata draft do files)
# 1 "public two-year" 2 "public four-year" 3 "private nonprofit four-year" 4 "for profit" 5 "other" 6 "non-degree-granting"
########################################################################################################
# IPEDS version
# SECTOR 0 Administrative Unit
# SECTOR 1 Public, 4-year or above
# SECTOR 2 Private not-for-profit, 4-year or above
# SECTOR 3 Private for-profit, 4-year or above
# SECTOR 4 Public, 2-year
# SECTOR 5 Private not-for-profit, 2-year
# SECTOR 6 Private for-profit, 2-year
# SECTOR 7 Public, less-than 2-year
# SECTOR 8 Private not-for-profit, less-than 2-year
# SECTOR 9 Private for-profit, less-than 2-year
institutions <- institutions %>% mutate(sector_urban = 0) %>%
# public and fewer than 50% of degrees/certificates are bachelor's or higher
mutate(sector_urban = replace(sector_urban, instcat2 %in% c(3,4) & control==1, 1)) %>%
# public and more than 50% of degrees/certificates are bachelor's or higher
mutate(sector_urban = replace(sector_urban, instcat2==2 & control==1, 2)) %>%
# private and more than 50% of degrees/certificates are bachelor's or higher
mutate(sector_urban = replace(sector_urban, instcat2==2 & control==2, 3)) %>%
# for profit, any level
mutate(sector_urban = replace(sector_urban, control==3, 4)) %>%
# other degree-granting (small groups and special focus)
# NOTE: We cannot include public bachelor's and special focus institutions in 'other' prior to 1994 because
# defining those categories requires ccbasic. We can only include private nonprofit associate's institutions.
mutate(sector_urban = replace(sector_urban,
(carnegie_urban %in% c(8,9) & year>=1994) | (instcat2 %in% c(3, 4) & control==2 & year < 1994),
5)) %>%
# non-degree-granting
mutate(sector_urban = replace(sector_urban, deggrant2==2, 6))
# String variables
institutions <- institutions%>% mutate(sector_label = ifelse(sector_urban == 1, "Public two-year",
ifelse(sector_urban == 2, "Public four-year",
ifelse(sector_urban == 3, "Private nonprofit four-year",
ifelse(sector_urban == 4, "For-profit",
ifelse(sector_urban == 5, "Other",
ifelse(sector_urban == 6, "Non-degree-granting",
"")))))))
# Check
table(institutions$carnegie_label, institutions$sector_label)
table(institutions$sector_label, institutions$year)
# Remove some of the original variables (all saved in institutions_raw if needed again)
colnames(institutions)
institutions <- institutions %>% select(-hloffer, -opeid, -opeind, -opeflag, -sector, -control, -carnegie, -deggrant, -pset4flg, -instcat, -ccbasic, -starts_with("level"), -finaid9,
-instcat2, -ccbasic2, -yearsin)
write.csv(institutions, "data/ipeds/institutions.csv", row.names=F, na="")
institutionskeep <- institutions %>% filter(sector_urban > 0 & pset4flg2==1)
table(institutionskeep$year)
table(institutionskeep$sector_urban, institutionskeep$year)
|
0a75704c3fe1135d6f507656ddc1cca00c05eb2e
|
497153f9a15f53b5b2b4ce0d375ea7f9848d75bb
|
/munge/04-Interaction_Data.R
|
1da5f653a181fd0fdca70694d6e7a583f47e62b1
|
[] |
no_license
|
joshbiology/pan-meyers-et-al
|
b43b4299e56ff979fe4bce751574e7bbca8244d8
|
2d72ea626e2c8f4422cc5413bbd835f462a20a62
|
refs/heads/master
| 2022-01-07T00:16:14.916776
| 2018-05-18T19:26:57
| 2018-05-18T19:26:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,201
|
r
|
04-Interaction_Data.R
|
cache("huttlin2017_bioplex_ppi", {
read_tsv("./data/raw/nature22366-s2.tsv",
col_types = "----cc--n") %>%
set_colnames(c("Gene.x", "Gene.y", "Confidence")) %>%
alphabetize_genes(Gene.x, Gene.y) %>%
distinct(Gene.x, Gene.y) %>%
filter(Gene.x != Gene.y)
})
cache("thul2017_subcellular_y2h", {
read_tsv("./data/raw/aal3321_Thul_SM_table_S17.tsv",
col_types = "cc--c") %>%
set_colnames(c("Gene.x", "Gene.y", "Localization")) %>%
de_excelify_genes(Gene.x) %>%
de_excelify_genes(Gene.y) %>%
alphabetize_genes(Gene.x, Gene.y) %>%
distinct(Gene.x, Gene.y) %>%
filter(Gene.x != Gene.y)
})
cache("rolland2014_interactome_y2h", {
read_tsv("./data/raw/rolland2014_mmc3.tsv",
col_types = "--cc-") %>%
set_colnames(c("Gene.x", "Gene.y")) %>%
de_excelify_genes(Gene.x) %>%
de_excelify_genes(Gene.y) %>%
alphabetize_genes(Gene.x, Gene.y) %>%
distinct(Gene.x, Gene.y) %>%
filter(Gene.x != Gene.y)
})
cache("wan2015_complexes_cofrac", {
raw <- read_tsv("./data/raw/wan2015_table_S4.tsv",
col_types = 'cicc') %>%
mutate(Gene = str_split(GeneName, ";")) %>%
select(ComplexID, Gene) %>%
unnest()
inner_join(raw, raw, by="ComplexID") %>%
alphabetize_genes() %>%
distinct(Gene.x, Gene.y) %>%
filter(Gene.x != Gene.y)
})
cache("hein2015_interactome_qubic", {
mann <- read_tsv("./data/raw/hein2015_mmc3.tsv") %>%
select(Gene.x = bait.Gene.name, Gene.y = prey.Gene.name) %>%
alphabetize_genes() %>%
distinct(Gene.x, Gene.y) %>%
filter(Gene.x != Gene.y)
})
cache("pdb_surfaces", {
pdb_surfaces <- read_tsv("./data/raw/human_ixns_pdb.txt",
col_names = c("Gene.x",
"Gene.y",
"Interaction",
"PDB1",
"Surface1",
"PDB2",
"Surface2"))
})
|
aaf27c70d842c3e435dfde210ef6ce5f6671faca
|
80a3908146756e6386bd14b8a7d9df54dc759e15
|
/plot4.R
|
789f0d1fcbc9f7ca1448b9d0663e9e4641e38dec
|
[] |
no_license
|
matt21511/ExData_Plotting1
|
42fbe50aa1febb3c13546ad02c6b4da44bb18233
|
2f6d3126de3c2510937a988c40afa4e9d4d769ee
|
refs/heads/master
| 2021-01-16T21:31:04.098764
| 2015-02-08T04:03:10
| 2015-02-08T04:03:10
| 30,469,794
| 0
| 0
| null | 2015-02-07T20:51:40
| 2015-02-07T20:51:40
| null |
UTF-8
|
R
| false
| false
| 999
|
r
|
plot4.R
|
#Import Data
mytable <- read.table("household_power_consumption.txt",header=TRUE,sep=";",na.strings="?",colClasses=c(rep("character",2),rep("numeric",7)))
ftable <- mytable[grepl("^[12]/2/2007",mytable$Date),]
#Create plot
png(filename="plot4.png")
ftable$FullTime <- paste(ftable$Date,ftable$Time)
ftable$FullTime <- as.POSIXlt(ftable$FullTime,format="%d/%m/%Y $H:$M:$S")
par(mfcol=c(2,2))
plot(ftable$FullTime,ftable$Global_active_power,ylab="Global Active Power",xlab="",type="l")
plot(ftable$FullTime,ftable$Sub_metering_1,xlab="",ylab="Energy sub metering",type="l")
lines(ftable$FullTime,ftable$Sub_metering_2,col="Red")
lines(ftable$FullTime,ftable$Sub_metering_3,col="Blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=c(1,1,1),col=c("Black","Red","Blue"))
plot(ftable$FullTime,ftable$Voltage,type="l",xlab="datetime",ylab="Voltage")
plot(ftable$FullTime,ftable$Global_reactive_power,type="l",xlab="datetime",ylab="Global_reactive_power")
dev.off()
|
e3353530cf0e7ea03ce73d2191f692c78b72c82d
|
4cec958a92d800c1391b8b50a43041c6d91806a5
|
/man/stm_wrapper.Rd
|
ba36eb7b54c4d17cecd9de30e921dc95c0da7031
|
[] |
no_license
|
lrq3000/themetagenomics
|
84f91c3eda5478cd732af926106daa0b9f5b421a
|
99b73ffd5430f8e080635e28bebbe57a8aae6a28
|
refs/heads/master
| 2020-11-30T08:37:01.009289
| 2017-06-06T17:06:26
| 2017-06-06T17:06:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 494
|
rd
|
stm_wrapper.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find_topics.R
\name{stm_wrapper}
\alias{stm_wrapper}
\title{Wrapper for stm}
\usage{
stm_wrapper(K, docs, vocab, formula = NULL, metadata = NULL,
sigma_prior = 0, model = NULL, iters = 500, tol = 1e-05,
batches = 1, seed = sample.int(.Machine$integer.max, 1),
verbose = FALSE, verbose_n = 5, init_type = c("Spectral", "LDA",
"Random"), control = control)
}
\description{
Wrapper for stm
}
\keyword{internal}
|
661ed8bb3d04236154e18b4bc988f7dfa755b6c7
|
5aab0c7c5a35775f6278c1e307886df103c74e91
|
/man/get_edges_from_structure.Rd
|
d88b95bf36046d9d6f64d5b1e10c250667e857ef
|
[] |
no_license
|
ims-fhs/badhacker
|
070bdf5c8d7e8fc50d2c05812a85683abeb411aa
|
54cc42204aadcb239e6b39e265a9cc2a48ab722d
|
refs/heads/master
| 2022-03-13T14:12:53.800997
| 2022-03-03T08:30:00
| 2022-03-03T08:30:00
| 117,236,946
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 416
|
rd
|
get_edges_from_structure.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visNetwork.R
\name{get_edges_from_structure}
\alias{get_edges_from_structure}
\title{Caluclate edges}
\usage{
get_edges_from_structure(my_structure)
}
\arguments{
\item{my_structure}{A list, the functional structure calculated in
create_list_of_functional_structure().}
}
\value{
A data.frame, the edges
}
\description{
Caluclate edges
}
|
0817d1cd38c4ae4b56b655c0902334ef4188d9e8
|
6ca0f032a12bdf84dea9acb3df348d695c314788
|
/R/useful_functions/make_dictionary.R
|
8e88d6aafc74521a6be408821f1a399787fb3782
|
[] |
no_license
|
schliebs/ZU_teaching_applied_statistics
|
d51134a0f0db9e29014e11a7619e1af32c9a3a96
|
ae9be8f79e5424ef35f237548c2d5c78e725fdf5
|
refs/heads/master
| 2021-08-23T06:24:38.965760
| 2017-12-03T21:57:01
| 2017-12-03T21:57:01
| 112,834,667
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,160
|
r
|
make_dictionary.R
|
#' Title
#'
#' @param dataset SPSS dataframe in tbl_df format imported with haven.
#' @param format specify whether you want a wide (each variable one row) or wide format (each value one row).
#'
#' @return a data.fram with meta information which can by querried with R-Studio functionalities.
#' @export
#'
#' @examples
make_dictionary <- function(dataset,format = c("wide","long")){
#Function extracts metainformation from haven imported datasets
#dataset: spss dataset imported with the library haven
#format: specifies if wide or long format should be created
#require("magrittr")
#require("plyr")
# Check input
if(!("tbl_df" %in% class(dataset))) stop("function needs a dataset in 'tbl_df' format imported with 'haven'.")
if(!(format %in% c("long","wide"))) stop("please specify either 'long' or 'wide' format (default)")
# Helper functions
helper1 <- function(x) paste(x,names(x),sep = ":")
helper2 <- function(x) helper1(x) %>% paste(collapse = " | ")
helper3 <- function(x){
if(length(attr(x,"label")) == 0) attr(x,"label") <- NA
if(length(attr(x,"labels")) == 0) attr(x,"labels") <- NA
return(x)
}
# Assign NA to empty Labels
dataset %<>% llply(helper3)
# Basic information
variables <- names(dataset)
variable_label <- laply(dataset,attr,"label")
value_labels <- llply(dataset,attr,"labels")
repeat_vars <- laply(value_labels,length)
# Combine information
if(format == "wide"){
meta_info <- data.frame("variables" = variables,
"variable_label" = variable_label,
"value_labels" = value_labels %>% laply(helper2)
)
}else if(format == "long"){
meta_info <- data.frame("variables" = rep(variables,repeat_vars),
"variable_label" = rep(variable_label,repeat_vars),
"value_labels" = value_labels %>%
llply(helper1) %>%
unlist()
)
}
meta_info$value_labels[meta_info$value_labels == "NA:"] <- NA
return(meta_info)
}
#----------------------------------------------------------------------------#
|
ed8183c19f815bc8dfb1737ed43a95610c15fbd7
|
e3453f8c610ceb4b5022be45b1dc145d690c1358
|
/man/mostFrequent.Rd
|
9e47520356679563ad3e1314abee7b08a8a325f9
|
[] |
no_license
|
kongra/koR
|
2592b69b7525099d2ceeef2686fb4e0f0005f933
|
48f06fd6832ac4ccaa7c66438f9a5668072e50a4
|
refs/heads/master
| 2018-10-08T03:05:41.749217
| 2018-08-30T08:18:29
| 2018-08-30T08:18:29
| 38,744,631
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 358
|
rd
|
mostFrequent.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/core.R
\name{mostFrequent}
\alias{mostFrequent}
\title{Returns a vector xs sorted by the (decreasing) frequency of appearance of
its elements.}
\usage{
mostFrequent(xs, ...)
}
\description{
Returns a vector xs sorted by the (decreasing) frequency of appearance of
its elements.
}
|
b8291569217264ea7f87033da89bb1cce51a05b6
|
868f618f2593e4ed2df51dce8b1d78e1b16948f6
|
/Modelling/Model Building.R
|
b15b11aa0ae17f32728387dfac4b7164cdf3d579
|
[] |
no_license
|
HaripriyaTV/Sales-cure-all...-or-will-they-
|
b980f033cfb6ae6f73711910ca60e561013609a1
|
9618234b0cc586f5198b7dcfc39b0c54e6e0b0af
|
refs/heads/master
| 2020-03-19T06:44:39.522158
| 2018-06-05T18:42:53
| 2018-06-05T18:42:53
| 136,050,965
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,727
|
r
|
Model Building.R
|
#model building
#splitting train data into train and validation set
train_data_split <- sample(2, nrow(BM_train), replace =TRUE, prob = c(0.75, 0.25))
new_BM_train <- BM_train[train_data_split == 1,]
BM_valid <- BM_train[train_data_split == 2,]
#building linear model
lin_reg <- lm(Item_Outlet_Sales ~ ., data = new_BM_train[, -c("Item_Identifier")])
#making prediction on test data
lin_reg_pred <- predict(lin_reg, BM_valid[, -c("Item_Identifier")])
lin_reg_pred
#evaluating the model using rmse
rmse_lin_reg <- RMSE(lin_reg_pred, BM_valid$Item_Outlet_Sales, na.rm = FALSE)
rmse_lin_reg
#performing lasso regression
set.seed(1235)
my_control <- trainControl(method = "CV", number = 5)
Grid = expand.grid(alpha = 1, lambda = seq(0.001,0.1,by = 0.0002))
lasso_reg <- train(x = new_BM_train[, -c("Item_Identifier", "Item_Outlet_Sales")], y = new_BM_train$Item_Outlet_Sales,
method='glmnet', trControl= my_control, tuneGrid = Grid)
pred_lasso <- predict(lasso_reg, BM_valid[, -c("Item_Identifier")])
#evaluating lasso regression
rmse_lasso <- RMSE(pred_lasso, BM_valid$Item_Outlet_Sales, na.rm = FALSE)
rmse_lasso
#performing ridge regression
set.seed(1235)
my_control <- trainControl(method = "CV", number = 5)
Grid = expand.grid(alpha = 0, lambda = seq(0.001,0.1,by = 0.0002))
ridge_reg <- train(x = new_BM_train[, -c("Item_Identifier", "Item_Outlet_Sales")], y = new_BM_train$Item_Outlet_Sales,
method='glmnet', trControl= my_control, tuneGrid = Grid)
pred_ridge <- predict(ridge_reg, BM_valid[, -c("Item_Identifier")])
#evaluating ridge regression
rmse_ridge <- RMSE(pred_ridge, BM_valid$Item_Outlet_Sales, na.rm = FALSE)
rmse_ridge
#building random forest
set.seed(1237)
my_control = trainControl(method="cv", number=5) # 5-fold CV
tgrid = expand.grid(
.mtry = sqrt(ncol(new_BM_train)),
.splitrule = "variance",
.min.node.size = c(10,15,20)
)
rf_mod = train(x = new_BM_train[, -c("Item_Identifier", "Item_Outlet_Sales")],
y = new_BM_train$Item_Outlet_Sales,
method='ranger',
trControl= my_control,
tuneGrid = tgrid,
num.trees = 400,
importance = "permutation")
pred_rf <- predict(rf_mod, BM_valid[, -c("Item_Identifier")])
#evaluating the random forest model
rmse_rf <- RMSE(pred_rf, BM_valid$Item_Outlet_Sales, na.rm = FALSE)
rmse_rf
#modelling xgboost
param_list = list(
objective = "reg:linear",
eta=0.01,
gamma = 1,
max_depth=6,
subsample=0.8,
colsample_bytree=0.5
)
dtrain = xgb.DMatrix(data = as.matrix(new_BM_train[,-c("Item_Identifier", "Item_Outlet_Sales")]), label= new_BM_train$Item_Outlet_Sales)
dtest = xgb.DMatrix(data = as.matrix(BM_valid[,-c("Item_Identifier")]))
#xgboost crossvalidation
set.seed(112)
xgbcv = xgb.cv(params = param_list,
data = dtrain,
nrounds = 1000,
nfold = 5,
print_every_n = 10,
early_stopping_rounds = 30,
maximize = F)
#model training
xgb_model = xgb.train(data = dtrain, params = param_list, nrounds = 430)
pred_xgb_model <- predict(xgb_model, dtest)
#evaluating the model
rmse_xgb <- RMSE(pred_xgb_model, BM_valid$Item_Outlet_Sales, na.rm = FALSE)
rmse_xgb
#model selection and prediction on the test data
#random forest outperformed the other models
test_pred <- predict(rf_mod, newdata = BM_test)
test_pred
#creating the submisssion file to analytics vidhya
BM_submission <- fread("BMSample Submission.csv")
BM_submission$Item_Outlet_Sales <- test_pred
write.csv(BM_submission, "BM_Result.csv", row.names = FALSE)
|
827868564f427df7b098cc3dacc5a03ebca728a7
|
be0b318fe7a20f45c9b9d6b52e8b353fa34dc7a8
|
/Graphing Scripts.R
|
fa84eecf7ccf8cd0ea8f1dbb2165e64f21e03128
|
[
"MIT"
] |
permissive
|
alistairwalsh/3hr_Rcruise
|
24632b3b845aae1d76f34de1a04c227732ec4c02
|
82a592326f9330a167d786692e5d65f86565feba
|
refs/heads/master
| 2021-01-10T14:26:52.009036
| 2015-11-11T06:05:07
| 2015-11-11T06:05:07
| 45,884,511
| 0
| 1
| null | 2015-11-16T01:33:02
| 2015-11-10T03:19:16
|
R
|
UTF-8
|
R
| false
| false
| 6,369
|
r
|
Graphing Scripts.R
|
# www.cyclismo.org tutorial -----------------------------------------------
# http://www.cyclismo.org/tutorial/R/intermediatePlotting.html#print-to-a-file
x <- rnorm(10,sd=5,mean=20)
y <- 2.5*x - 1.0 + rnorm(10,sd=9,mean=0)
cor(x,y)
[1] 0.7400576
plot(x,y,xlab="Independent",ylab="Dependent",main="Random Stuff")
x1 <- runif(8,15,25)
y1 <- 2.5*x1 - 1.0 + runif(8,-6,6)
points(x1,y1,col=2)
x2 <- runif(8,15,25)
y2 <- 2.5*x2 - 1.0 + runif(8,-6,6)
points(x2,y2,col=3,pch=2)
# Add legend --------------------------------------------------------------
plot(x,y,xlab="Independent",ylab="Dependent",main="Random Stuff")
points(x1,y1,col=2,pch=3)
points(x2,y2,col=4,pch=5)
legend(14,70,c("Original","one","two"),col=c(1,2,4),pch=c(1,3,5))
# Set Axis Limits --------------------------------------------------------------
plot(x,y,xlab="Independent",ylab="Dependent",main="Random Stuff",xlim=c(0,30),ylim=c(0,100))
points(x1,y1,col=2,pch=3)
points(x2,y2,col=4,pch=5)
legend(0,100,c("Original","one","two"),col=c(1,2,4),pch=c(1,3,5))
# Add Error Bars ----------------------------------------------------------
plot(x,y,xlab="Independent",ylab="Dependent",main="Random Stuff")
xHigh <- x
yHigh <- y + abs(rnorm(10,sd=3.5))
xLow <- x
yLow <- y - abs(rnorm(10,sd=3.1))
arrows(xHigh,yHigh,xLow,yLow,col=2,angle=90,length=0.1,code=3)
numberWhite <- rhyper(400,4,5,3)
numberChipped <- rhyper(400,2,7,3)
# Multiple Graphs on One Plot ---------------------------------------------
par(mfrow=c(2,3))
# mfrow=c(number of rows,number of columns)
boxplot(numberWhite,main="first plot")
boxplot(numberChipped,main="second plot")
plot(jitter(numberWhite),jitter(numberChipped),xlab="Number White Marbles Drawn",
ylab="Number Chipped Marbles Drawn",main="Pulling Marbles With Jitter")
hist(numberWhite,main="fourth plot")
hist(numberChipped,main="fifth plot")
mosaicplot(table(numberWhite,numberChipped),main="sixth plot")
# Pairwise Plots ----------------------------------------------------------
uData <- rnorm(20)
vData <- rnorm(20,mean=5)
wData <- uData + 2*vData + rnorm(20,sd=0.5)
xData <- -2*uData+rnorm(20,sd=0.1)
yData <- 3*vData+rnorm(20,sd=2.5)
d <- data.frame(u=uData,v=vData,w=wData,x=xData,y=yData)
pairs(d, pch = 21, bg = c("red","blue"))
example(pairs)
# Shaded Areas ------------------------------------------------------------
par(mfrow=c(1,1))
x = c(-1,1,1,-1,-1)
y = c(-1,-1,1,1,-1)
plot(x,y)
polygon(x,y,col='blue')
stdDev <- 0.75;
x <- seq(-5,5,by=0.01)
y <- dnorm(x,sd=stdDev)
right <- qnorm(0.95,sd=stdDev)
plot(x,y,type="l",xaxt="n",ylab="p",
xlab=expression(paste('Assumed Distribution of ',bar(x))),
axes=FALSE,ylim=c(0,max(y)*1.05),xlim=c(min(x),max(x)),
frame.plot=FALSE)
axis(1,at=c(-5,right,0,5),
pos = c(0,0),
labels=c(expression(' '),expression(bar(x)[cr]),expression(mu[0]),expression(' ')))
axis(2)
xReject <- seq(right,5,by=0.01)
yReject <- dnorm(xReject,sd=stdDev)
polygon(c(xReject,xReject[length(xReject)],xReject[1]),
c(yReject,0, 0), col='red')
# Plotting a Surface ------------------------------------------------------
x <- seq(0,2*pi,by=pi/100)
y <- x
xg <- (x*0+1) %*% t(y)
yg <- (x) %*% t(y*0+1)
f <- sin(xg+yg)
persp(x,y,f,theta=-10,phi=40,col="purple")
example(persp)
# Dataset using R’s hypergeometric random number generator ----------------
numberWhite <- rhyper(30,4,5,3)
numberChipped <- rhyper(30,2,7,
# Barplot -----------------------------------------------------------------
numberWhite <- rhyper(30,4,5,3)
numberWhite <- as.factor(numberWhite)
plot(numberWhite)
# barplot command requires a vector of heights, you cannot simply give it raw data
# frequencies for the barplot command can be easily calculated using the table command
numberWhite <- rhyper(30,4,5,3)
totals <- table(numberWhite)
totals
#numberWhite
#0 1 2 3
#4 13 11 2
# you can change the axis labels by setting the row names of the table
totals <- table(numberWhite)
rownames(totals) <- c("none","one","two","three")
totals
#numberWhite
#none one two three
#4 13 11 2
barplot(totals,main="Number Draws",ylab="Frequency",xlab="Draws")
# use the sort command with the decreasing option set to TRUE if you want to sort the frequencies along the axis
barplot(sort(totals,decreasing=TRUE),main="Number Draws",ylab="Frequency",xlab="Draws")
# you can change the order of the frequencies manually
totals
#numberWhite
#none one two three
#4 13 11 2
sort(totals,decreasing=TRUE)
#numberWhite
#one two none three
#13 11 4 2
totals[c(3,1,4,2)]
#numberWhite
#two none three one
#11 4 2 13
barplot(totals[c(3,1,4,2)])
# Multiple Representations on One PLot ------------------------------------
x = rexp(20,rate=4)
hist(x,ylim=c(0,18),main="Multi-plot",xlab="X")
boxplot(x,at=16,horizontal=TRUE,add=TRUE)
rug(x,side=1)
d = density(x)
points(d,type='l',col=3)
# Print to File -----------------------------------------------------------
# to find out which devices are available on your computer
help(device)
# Annotation and Formatting -----------------------------------------------
x <- rnorm(10,mean=0,sd=4)
y <- 3*x-1+rnorm(10,mean=0,sd=2)
summary(x)
#Min. 1st Qu. Median Mean 3rd Qu. Max.
#-6.1550 -1.9280 1.2000 -0.1425 2.4780 3.1630
summary(y)
#Min. 1st Qu. Median Mean 3rd Qu. Max.
#-17.9800 -9.0060 0.7057 -1.2060 8.2600 10.9200
plot(x,y,axes=FALSE,col=2)
axis(1,pos=c(0,0),at=seq(-7,5,by=1))
axis(2,pos=c(0,0),at=seq(-18,11,by=2))
# at is used to specifiy the tick marks
# Drawing a Box around the Plot
x <- rnorm(10,mean=0,sd=4)
y <- 3*x-1+rnorm(10,mean=0,sd=2)
plot(x,y,bty="7")
# bty= "o", "l", "7", "c", "u", "]", or "n" where n is no box
plot(x,y,bty="o")
plot(x,y,bty="n")
# box can also be drawn later using the box command (lty = 1,2,3 can be used to specify line type)
box(lty=3)
# use par command to adjust various parameters
par(bty="l")
par(bg="gray")
par(mex=2)
x <- rnorm(10,mean=0,sd=4)
y <- 3*x-1+rnorm(10,mean=0,sd=2)
plot(x,y)
par(bg="white")
plot(x,y)
# add text to your plots
x <- rnorm(10,mean=0,sd=4)
y <- 3*x-1+rnorm(10,mean=0,sd=2)
plot(x,y)
text(-1,-2,"numbers!")
# give the co-ordinates of where you want to place the text
# default text will cut off any characters outside the plot area, override this using the xpd option
text(-7,-2,"outside the area",xpd=TRUE)
|
d066f7a1ae11a889b57b32cc6056f5686d56ad17
|
f4e497ca42f695675e9459d970b97ae931f93318
|
/a_rules_mining.R
|
ad409c047d7339c5d956e92a499874810f9a60ad
|
[] |
no_license
|
vikaasa/yelp-challenge-identifying-trends
|
46291a32242d4a80ecefb0d6efcaa9789f8b466e
|
1cded345a26e5c34b9f10165eac04de1aa1c026c
|
refs/heads/master
| 2016-09-14T05:28:52.508128
| 2016-05-09T14:10:33
| 2016-05-09T14:10:33
| 57,690,133
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 739
|
r
|
a_rules_mining.R
|
library(arules)
setwd("C:/Users/Vikaasa/IdeaProjects/Spark-Workshop")
input_trans = read.transactions("top_rated_businesses_clustered.csv", format = "basket", sep = ",", rm.duplicates=TRUE, cols = 1)
str(input_trans)
rules=apriori(input_trans, parameter = list(supp=0.015, conf=0.75))
inspect(rules)
rules <- sort(rules, by="support", decreasing=TRUE)
df<-inspect(rules)
write.csv(df, file = "arules_op.csv")
as(head(sort(rules, by = c("support")), n=100), "data.frame")
## printing out a subset of rules which indicate the relation between genre and cast, with lhs as "cast1" and rhs as "genre":
rules2<- subset(rules, subset = (lhs %pin% "Pizza"))
inspect(rules2)
rules2<-sort(rules2, by="support", decreasing=TRUE)
inspect(rules2)
|
eb47c951d9cf6551b4435a4da33f0190a1e379d8
|
20b1b50f86dd29003c560c2e375086946e23f19a
|
/clinal_seasonal/sign_test_universal_threshold_dropmissing_bottomquantiles.R
|
4c60b7a1dde3d4763d3324528f58de57d52a7c90
|
[] |
no_license
|
ericksonp/diapause-scripts-clean
|
cfc3ed0433114ee756019105b364f40e03b2419d
|
c93a052e9d63b9f7c60f7c18c1ad004385b59351
|
refs/heads/master
| 2021-07-09T07:16:03.326322
| 2020-07-31T17:00:37
| 2020-07-31T17:00:37
| 170,798,493
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,148
|
r
|
sign_test_universal_threshold_dropmissing_bottomquantiles.R
|
library(data.table)
library(ggplot2)
library(cowplot)
theme_set(theme_cowplot())
library(foreach)
library(doMC)
registerDoMC(16)
library(viridis)
library(ggbeeswarm)
library(Rmisc)
print("bergland 2014")
load("/scratch/pae3g/oldscratch_recovered/evolution/6d_data.Rdata")
b<-as.data.table(p)
b[,maf:=pmin(f.hat, 1-f.hat)]
b<-b[maf>=0.05]
b<-b[clinal.beta<3&clinal.beta>(-3)]
b<-b[sfsfsfX.beta<3&sfsfsfX.beta>(-3)]
b[,clinal.q:=log10(frank(clinal.p)/(length(clinal.p)+1))]
b[,sfsfsfX.q:=log10(frank(sfsfsfX.p)/(length(sfsfsfX.p)+1))]
files<-fread("/scratch/pae3g/genome-reconstruction/universal_input.txt")
#read in files
y<-foreach(pop=files$V1[1:3000], phenotype=files$V2[1:3000],draw=files$V3[1:3000], perm=files$V4[1:3000], model=files$V6[1:3000])%dopar%{
print(paste(draw, perm, sep=","))
#read gwas adn do some fixes
load(paste("/scratch/pae3g/revisions/genesis_", phenotype, "_draw", draw, "_perm", perm, "_pop", pop, "_" , model, "_allsnpgrm_wolbachia_dropmissing.Rdat", sep=""))
gwas<-assoc.results
gwas[,maf:=pmin(freq, 1-freq)]
gwas<-gwas[maf>=0.05]
gwas[,q:=log10(frank(Score.pval)/(length(Score.pval)+1))]
gwas<-merge(gwas, b[,.(chr, pos, clinal.beta, clinal.p,clinal.q, maf, sfsfsfX.p, sfsfsfX.beta, sfsfsfX.q)], by=c("chr", "pos"))
gwas[,TT.cline:=ifelse(sign(Score)==1 & sign(clinal.beta)==(-1), T, F)]
gwas[,TF.cline:=ifelse(sign(Score)==1 & sign(clinal.beta)==1, T, F)]
gwas[,FT.cline:=ifelse(sign(Score)==(-1) & sign(clinal.beta)==(-1), T, F)]
gwas[,FF.cline:=ifelse(sign(Score)==(-1) & sign(clinal.beta)==1, T, F)]
gwas[,TT.seas:=ifelse(sign(Score)==1 & sign(sfsfsfX.beta)==1, T, F)]
gwas[,TF.seas:=ifelse(sign(Score)==1 & sign(sfsfsfX.beta)==(-1), T, F)]
gwas[,FT.seas:=ifelse(sign(Score)==(-1) & sign(sfsfsfX.beta)==1, T, F)]
gwas[,FF.seas:=ifelse(sign(Score)==(-1) & sign(sfsfsfX.beta)==(-1), T, F)]
#bergland cline is based on alternate alleles and needs to be flipped.
gwas[,ps.cline:=-1*Score.Stat*clinal.beta]
#bergland positive clinal beta means alternate allele is higher in the fall (same direction as gwas)
gwas[,ps.seas:=1*Score.Stat*sfsfsfX.beta]
cline.top<-foreach (top = seq(from=-5, to=0, by=1)) %do% {
a<-gwas[q>top,.(
n=.N,
TT=sum(TT.cline, na.rm=T),
TF=sum(TF.cline, na.rm=T),
FT=sum(FT.cline, na.rm=T),
FF=sum(FF.cline, na.rm=T),
poly=sum(ps.cline)
)]
a[,top:=top]
}
cline.top<-rbindlist(cline.top)
cline.top[,or:=as.numeric(TT)*as.numeric(FF)/(as.numeric(FT)*as.numeric(TF))]
cline.top[, perm:=perm]
cline.top[,draw:=draw]
cline.top[,test:="clinal"]
cline.top[,pheno:=phenotype]
cline.top[,model:=model]
cline.top[,pop:=pop]
seas.top<-foreach (top = seq(from=-5, to=0, by=1)) %do% {
a<-gwas[q>top,.(
n=.N,
TT=sum(TT.seas, na.rm=T),
TF=sum(TF.seas, na.rm=T),
FT=sum(FT.seas, na.rm=T),
FF=sum(FF.seas, na.rm=T),
poly=sum(ps.seas)
)]
a[,top:=top]
}
seas.top<-rbindlist(seas.top)
seas.top[,or:=as.numeric(TT)*as.numeric(FF)/(as.numeric(FT)*as.numeric(TF))]
seas.top[, perm:=perm]
seas.top[,draw:=draw]
seas.top[,test:="seasonal"]
seas.top[,pheno:=phenotype]
seas.top[,model:=model]
seas.top[,pop:=pop]
return(rbind(cline.top, seas.top))
}
y<-rbindlist(y)
save(y, file="/scratch/pae3g/revisions/evolution/bergland2014_sign_universal_threshold_dropmissing_bottom_quantiles.Rdata")
# print("machado 2019")
#
# cline<-fread("/scratch/pae3g/revisions/evolution/east_coast_cline_V2_clean.txt")
# seas<-fread("/scratch/pae3g/revisions/evolution/seas_glm_NOTswitch_clean.txt")
# freqs<-fread("/scratch/pae3g/oldscratch_recovered/evolution/east_coast_cline_V2_allele_freqs.txt")
#
# x<-merge(cline, seas, by=c("chr", "pos"))
# x<-merge(x, freqs, by=c("chr", "pos"))
#
# x<-x[f.hat>=0.05&f.hat<=0.95]
# x<-x[clinal.beta<3&clinal.beta>(-3)]
# x<-x[seas.beta<3&seas.beta>(-3)]
#
#
#
#
#
# y<-foreach(pop=files$V1[1:3000], phenotype=files$V2[1:3000],draw=files$V3[1:3000], perm=files$V4[1:3000], model=files$V6[1:3000], .errorhandling="remove")%dopar%{
# print(paste(draw, perm, sep=","))
# #read gwas adn do some fixes
# load(paste("/scratch/pae3g/revisions/genesis_", phenotype, "_draw", draw, "_perm", perm, "_pop", pop, "_" , model, "_allsnpgrm_wolbachia_dropmissing.Rdat", sep=""))
# gwas<-assoc.results
# gwas[,maf:=pmin(freq, 1-freq)]
# gwas<-gwas[maf>=0.05]
# gwas[,q:=log10(frank(Score.pval)/(length(Score.pval)+1))]
# gwas<-merge(gwas, x[,.(chr, pos, clinal.beta, clinal.p, seas.beta)], by=c("chr", "pos"))
# gwas[,TT.cline:=ifelse(sign(Score)==1 & sign(clinal.beta)==(-1), T, F)]
# gwas[,TF.cline:=ifelse(sign(Score)==1 & sign(clinal.beta)==1, T, F)]
# gwas[,FT.cline:=ifelse(sign(Score)==(-1) & sign(clinal.beta)==(-1), T, F)]
# gwas[,FF.cline:=ifelse(sign(Score)==(-1) & sign(clinal.beta)==1, T, F)]
#
# gwas[,TT.seas:=ifelse(sign(Score)==1 & sign(seas.beta)==(-1), T, F)]
# gwas[,TF.seas:=ifelse(sign(Score)==1 & sign(seas.beta)==(1), T, F)]
# gwas[,FT.seas:=ifelse(sign(Score)==(-1) & sign(seas.beta)==(-1), T, F)]
# gwas[,FF.seas:=ifelse(sign(Score)==(-1) & sign(seas.beta)==(1), T, F)]
#
# gwas[,ps.cline:=-1*Score.Stat*clinal.beta]
# #note that the seasonal polygenic score should have had a *-1 in it and needs to be flipped for plotting
# gwas[,ps.seas:=1*Score.Stat*seas.beta]
#
#
# cline.top<-foreach (top = seq(from=-5, to=0, by=1)) %do% {
# a<-gwas[q<top,.(
# n=.N,
# TT=sum(TT.cline, na.rm=T),
# TF=sum(TF.cline, na.rm=T),
# FT=sum(FT.cline, na.rm=T),
# FF=sum(FF.cline, na.rm=T),
# poly=sum(ps.cline)
# )]
# a[,top:=top]
# }
#
# cline.top<-rbindlist(cline.top)
# cline.top[,or:=as.numeric(TT)*as.numeric(FF)/(as.numeric(FT)*as.numeric(TF))]
# cline.top[, perm:=perm]
# cline.top[,draw:=draw]
# cline.top[,test:="clinal"]
# cline.top[,pheno:=phenotype]
# cline.top[,model:=model]
# cline.top[,pop:=pop]
#
# seas.top<-foreach (top = seq(from=-5, to=0, by=1)) %do% {
# a<-gwas[q<top,.(
# n=.N,
# TT=sum(TT.seas, na.rm=T),
# TF=sum(TF.seas, na.rm=T),
# FT=sum(FT.seas, na.rm=T),
# FF=sum(FF.seas, na.rm=T),
# poly=sum(ps.seas)
# )]
# a[,top:=top]
# }
#
# seas.top<-rbindlist(seas.top)
# seas.top[,or:=as.numeric(TT)*as.numeric(FF)/(as.numeric(FT)*as.numeric(TF))]
# seas.top[, perm:=perm]
# seas.top[,draw:=draw]
# seas.top[,test:="seasonal"]
# seas.top[,pheno:=phenotype]
# seas.top[,model:=model]
# seas.top[,pop:=pop]
#
#
# return(rbind(cline.top, seas.top))
# }
#
# y<-rbindlist(y)
#
# save(y, file="/scratch/pae3g/revisions/evolution/bergland2019_sign_universal_threshold_dropmissing.Rdata")
#
#
# print("individual populations")
# load("/scratch/pae3g/oldscratch_recovered/evolution/core20delta.rdat")
# pops<-names(deltas)
#
#
#
#
# y<-foreach(pop=files$V1[1:3000], phenotype=files$V2[1:3000],draw=files$V3[1:3000], perm=files$V4[1:3000], model=files$V6[1:3000], .errorhandling="remove")%dopar%{
# print(paste(draw, perm, sep=","))
# #read gwas adn do some fixes
# load(paste("/scratch/pae3g/revisions/genesis_", phenotype, "_draw", draw, "_perm", perm, "_pop", pop, "_" , model, "_allsnpgrm_wolbachia_dropmissing.Rdat", sep=""))
# gwas<-assoc.results
# gwas[,maf:=pmin(freq, 1-freq)]
# gwas<-gwas[maf>=0.05]
# gwas[,q:=log10(frank(Score.pval)/(length(Score.pval)+1))]
#
# pop.test<-foreach(p=pops)%do%{
# print(p)
# seas.top<-foreach (top = c(-5:0)) %do% {
#
# l<-merge(gwas[q<=top], deltas[[p]][is.finite(diff.logit)], by=c("chr", "pos"))
# l[,TT:=ifelse(sign(Score.Stat)==1 & sign(diff.logit)==(1), T, F)]
# l[,TF:=ifelse(sign(Score.Stat)==(-1) & sign(diff.logit)==1, T, F)]
# l[,FT:=ifelse(sign(Score.Stat)==(1) & sign(diff.logit)==(-1), T, F)]
# l[,FF:=ifelse(sign(Score.Stat)==(-1) & sign(diff.logit)==(-1), T, F)]
# l[,ps:=Score.Stat*diff.logit]
# l[,top:=top]
# return(l[,.(or=sum(as.numeric(TT), na.rm=T)*sum(as.numeric(FF), na.rm=T)/(sum(as.numeric(TF), na.rm=T)*sum(as.numeric(FT), na.rm=T)),
# poly=sum(ps)), .(population, top)])
#
# }
# return(rbindlist(seas.top))
# }
#
#
#
# pop.test<-rbindlist(pop.test)
# pop.test[,pheno:=phenotype]
# pop.test[,pop:=pop]
# pop.test[, perm:=perm]
# pop.test[,draw:=draw]
# pop.test[,model:=model]
# return(pop.test)
# }
#
# y<-rbindlist(y)
#
# save(y, file="/scratch/pae3g/revisions/evolution/single_population_sign_universal_threshold_dropmissing.Rdata")
#
#
#
#
load("/scratch/pae3g/revisions/evolution/bergland2014_sign_universal_threshold_dropmissing_bottom_quantiles.Rdata")
b2014<-copy(y)
b2014[,permuted:=ifelse(perm==0, F, T)]
b2014[perm!=0&pop=="both", group:="both-permuted"]
b2014[perm==0&pop=="both", group:="both-observed"]
b2014[perm==0&pop=="A", group:="A-observed"]
b2014[perm==0&pop=="B", group:="B-observed"]
b2014[perm!=0&pop=="A", group:="A-permuted"]
b2014[perm!=0&pop=="B", group:="B-permuted"]
b2014[,pheno2:=ifelse(pheno=="diapause.bin", "st. 8", "st. 10")]
b2014[,pheno2:=factor(b2014$pheno2, levels=c("st. 8", "st. 10"))]
b2014[top==-5, th:="Bottom 99.999%"]
b2014[top==-4, th:="Bottom 99.99%"]
b2014[top==-3, th:="Bottom 99.9%"]
b2014[top==-2, th:="Bottom 99%"]
b2014[top==-1, th:="Bottom 90%"]
b2014[top==0, th:="all SNPs"]
b2014[top=="lasso", th:="LASSO"]
b2014[,th:=factor(th, levels=c("all SNPs", "Bottom 90%", "Bottom 99%", "Bottom 99.9%", "Bottom 99.99%", "Bottom 99.999%"))]
b2014.sum<-b2014[,.(med=median(poly), q.025=quantile(poly, 0.025), q.975=quantile(poly, .975)), .(th,group, pheno, pheno2, pop, test,top, permuted)]
b2014.sum[,pheno2:=factor(b2014.sum$pheno2, levels=c("st. 8", "st. 10"))]
stats<-merge(b2014[permuted==F], b2014.sum[permuted==T], by=c("th", "pheno", "pheno2", "pop", "test", "top"))
stats[,over:=poly>q.975]
stats[,under:=poly<q.025]
stats.sum<-stats[,.(n=.N, prop.over=sum(over)/.N*100, prop.under=sum(under)/.N*100, max=max(poly)), .(th, pheno, pheno2, pop, test, top)]
a.plot<-ggplot(b2014.sum[test=="clinal" &top!=(-5)])+
geom_point(data=b2014.sum[test=="clinal"&top!=(-5)], aes(x=pop, y=med, color=group), position=position_dodge(width=0.5))+
geom_errorbar(data=b2014.sum[test=="clinal"&top!=(-5)], aes(x=pop, ymax=q.975, ymin=q.025, color=group), width=0.2, position=position_dodge(width=0.5))+
labs(x="", y="sum(GWAS coefficient*model coefficient)", color="", title="clinal")+
scale_color_manual(values=c("dodgerblue2", "grey80", "darkorchid", "grey80", "lightseagreen", "grey80"))+
facet_grid(th~pheno2, scales ="free_y")+
theme(legend.position = "none")+
geom_text(data=stats.sum[test=="clinal" & prop.over>50&top!=(-5)], aes(x=pop, y=max+0.1*max, label=prop.over))
# b.plot<-ggplot(b2014.sum[test=="seasonal" ])+
# geom_point(data=b2014.sum[test=="seasonal"], aes(x=pop, y=med, color=group), position=position_dodge(width=0.5))+
# geom_errorbar(data=b2014.sum[test=="seasonal"], aes(x=pop, ymax=q.975, ymin=q.025, color=group), width=0.2, position=position_dodge(width=0.5))+
# labs(x="", y="", color="", title="seasonal")+
# scale_color_manual(values=c("dodgerblue2", "grey80", "darkorchid", "grey80", "lightseagreen", "grey80"))+
# facet_grid(th~pheno2, scales ="free_y")+
# theme(legend.position = "none")+
# geom_text(data=stats.sum[test=="seasonal" & prop.over>50], aes(x=pop, y=max+0.1*max, label=prop.over))
pdf("/scratch/pae3g/revisions/figures/bergland2014_main_dropmissing_bottomquantiles.pdf", height=8, width=5)
a.plot
dev.off()
|
f54d660daf34238ab657cf5b43a5d673b6ee7e87
|
5a00f6a5ae90db27d77862c1caf9ac8c2ab261b7
|
/data.R
|
746e4603e84353531c16ac0b7b1d1970be559e05
|
[] |
no_license
|
fxchalet/Diabetes-Analysis
|
0af61f6cbdd49b825c9b09679c02e03ad4320237
|
48a293a2b77ef8726708afb665efd1d1e967c1df
|
refs/heads/master
| 2021-02-23T23:31:31.117903
| 2020-03-06T12:30:59
| 2020-03-06T12:30:59
| 245,413,858
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,099
|
r
|
data.R
|
####--SERVER DATA--------------------------------------------------------------------------------------------
#
# DATA
#
#### Datasets ####
###Function to filter data with the inputs widgets
my_data_filtered<-reactive({
my_data<-read_my_data()
#We temp variables for input values
samplesize<-input$slider_sample_size
minage<-input$slider_age[1]
maxage<-input$slider_age[2]
minBMI<-input$slider_BMI[1]
maxBMI<-input$slider_BMI[2]
minHbA1c<-input$slider_hba1c[1]
maxHbA1c<-input$slider_hba1c[2]
minstabilizedglucose<-input$slider_stabilizedglucose[1]
maxstabilizedglucose<-input$slider_stabilizedglucose[2]
mincholesterol<-input$slider_cholesterol[1]
maxcholesterol<-input$slider_cholesterol[2]
minHDL<-input$slider_HDL[1]
maxHDL<-input$slider_HDL[2]
minbloodpressuresystolic<-input$slider_BPS[1]
maxbloodpressuresystolic<-input$slider_BPS[2]
minbloodpressurediastolic<-input$slider_BPD[1]
maxbloodpressurediastolic<-input$slider_BPD[2]
#Apply filters sample size, BMI,Cholesterol,HbA1c,stabilized glucose,age,blood pressure systolic, blood pressure diastolic
my_data<- my_data %>%
filter(
n<=samplesize,
age>=minage,
age<=maxage,
BMI>=minBMI,
BMI<=maxBMI,
glyhb>=minHbA1c,
glyhb<=maxHbA1c,
stab.glu>=minstabilizedglucose,
stab.glu<=maxstabilizedglucose,
chol>=mincholesterol,
chol<=maxcholesterol,
hdl>=minHDL,
hdl<=maxHDL,
bp.1s>=minbloodpressuresystolic,
bp.1s<=maxbloodpressuresystolic,
bp.1d>=minbloodpressurediastolic,
bp.1d<=maxbloodpressurediastolic
)
#filter by gender
if (input$gender != "All") {
my_gender <- paste0("%", input$gender, "%")
my_data <- my_data %>% filter(gender %like% my_gender)
}
#filter by obesity status
if (input$obesity != "All") {
obesity <- paste0("%", input$obesity, "%")
my_data <- my_data %>% filter(obcat %like% obesity)
}
#filter by diabetic status
if (input$diabetic != "All") {
diabetic <- paste0("%", input$diabetic, "%")
my_data <- my_data %>% filter(glyhbcat %like% diabetic)
}
# Rename variables and remove column "frame"
my_data<-rename(my_data,'Obesity status'=obcat, 'Diabetic status'=glyhbcat,'Subject id'=id,'Cholesterol'=chol,'Stabilized Glucose'=stab.glu,'HDL'=hdl,'Cholesterol/HDL Ratio'=ratio,HbA1c=glyhb,Location=location,Age=age,Gender=gender,'Height (inches)'=height,'Weight (pounds)'=weight,frame=frame,'First Systolic Blood Pressure'=bp.1s,'First Diastolic Blood Pressure'=bp.1d,'Second Systolic Blood Pressure'=bp.2s,'Second Diastolic Blood Pressure'=bp.2d,'Waist (inches)'=waist,'Hip (inches)'=hip,'Postprandial time (minutes)'=time.ppn)
df<-select(my_data,-frame,-'Subject id')
df<-select(df,n,Gender, Age,Location,BMI,'Obesity status','Diabetic status',HbA1c,everything())
df
})
|
3e10214a1f22da443a1b9617df6b396bf3bbca0c
|
0e37285e7a020b09509151a2be35c0a65ca30840
|
/interakcija.R
|
e9d6b91d731201a0b2247b37c560ae911bd75e65
|
[] |
no_license
|
Biometrika/Data
|
4203a9c583f73446076bb12348e2d0c87d5dd6e9
|
b5fad24ea18afa4a8c6e45c2ef1b6a84681d19d2
|
refs/heads/master
| 2022-03-17T06:57:57.370637
| 2022-02-16T14:31:00
| 2022-02-16T14:31:00
| 155,161,120
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,543
|
r
|
interakcija.R
|
# Izvor podataka: Allard and Bradshaw (1964)
library(ggplot2)
tip <- c("Tip 1","Tip 1","Tip 1","Tip 1","Tip 2","Tip 2","Tip 2","Tip 2",
"Tip 3","Tip 3","Tip 3","Tip 3","Tip 4","Tip 4","Tip 4","Tip 4",
"Tip 5","Tip 5","Tip 5","Tip 5","Tip 6","Tip 6","Tip 6","Tip 6")
lok <- c("Y","Y","X","X","Y","Y","X","X",
"Y","Y","X","X","Y","Y","X","X",
"Y","Y","X","X","Y","Y","X","X")
gen <- c("A","B","A","B","A","B","A","B",
"A","B","A","B","A","B","A","B",
"A","B","A","B","A","B","A","B")
y <- c(3,1,4,2,2,1,4,3,3,2,4,1,2,3,4,1,1,3,4,2,1,2,4,3)
lok <- factor(lok, levels = c("Y", "X"), ordered = TRUE)
df <- data.frame(tip,lok,gen,y)
df
ggplot(data = df,
aes(x = factor(lok),
y = y)) +
geom_point(shape = 16,
size = 3.5,
color = "tomato") +
geom_line(aes(group = factor(gen)),
color = "tomato") +
facet_wrap(~ tip, ncol = 2) +
labs(x = "Lokalitet",
y = "Prinos zrna",
title = "Allard and Bradshaw (1964)") +
theme_light() +
theme(axis.title.x = element_text(size = 15, face = "bold"),
axis.text.x = element_text(size = 12),
axis.title.y = element_text(size = 15, face = "bold"),
axis.text.y = element_text(size = 12),
plot.title = element_text(hjust = 0.5, size = 15, face = "bold"),
strip.background = element_rect(fill = "tomato", size = 0.2),
strip.text = element_text(size = 12, face = "bold", colour = "white")
)
|
45326900df7a3cf23efcbe26ce76d59a4c842030
|
556166f8262cb8341af908281191a877f8c406a1
|
/Rscript/days.R
|
d6cb1d4ccb06106e69a903cf4b8bf0bf936073f1
|
[] |
no_license
|
rsharp-lang/COVID-19
|
36eb3a8a9980f5f123b247a953c0fe8a4e5de0eb
|
2a1639551bb37413c1206599e0d86750b35ac40b
|
refs/heads/master
| 2023-04-16T14:32:22.862176
| 2021-06-16T09:58:56
| 2021-06-16T09:58:56
| 241,013,898
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 805
|
r
|
days.R
|
imports "GIS_render.R";
print(dates.all);
# 自定义数据可视化: 治愈人数 / 确诊人数
let custom = function(day) {
lapply(raw[[day]], function(region) {
if (region$cured == 0.0) {
0;
} else {
region$cured / region$confirmed
}
});
};
for(day in dates.all) {
day
:> COVID_19.map_render.china(
levels = 30,
source = "confirmed",
color.schema = ["white", "yellow", "red"]
)
:> save.graphics(file = `./viz/days/${day}/confirmed.svg`)
;
day
:> COVID_19.map_render.china(
levels = 30,
source = custom,
color.schema = ["red", "yellow", "green"],
log.scale = FALSE
)
:> save.graphics(file = `./viz/days/${day}/cured_vs_confirmed.svg`)
;
}
|
eeb094971e9b90b515ed5b51dfbb10dbc88ac5cd
|
91c35aca2930e60581b57b694e85b7040246b96f
|
/public/mosquito/rainAndTemp/d_modelRefinement.R
|
d73c81b9297ed3f189208da64e041b44f19d2d7c
|
[] |
no_license
|
waughsh/fdoh
|
b5abe8d2a30d4a6d60427f055cae5f3c71ece1a4
|
30ecab89f5916ba8baa962aafb71cf819cbb61a4
|
refs/heads/master
| 2020-04-05T14:58:12.156605
| 2015-09-09T03:32:54
| 2015-09-09T03:32:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,140
|
r
|
d_modelRefinement.R
|
library(pingr)
#########################################
# READ IN DATA FOR TEMP AND RAINFALL FOR LAST 6 YEARS
#########################################
ts <- read.csv("E:/workingdirectory/mosquito/rainAndTemp/b_rainAndTemp.csv")
#########################################
#LOAD THE MODEL VARIATIONS WITH THEIR R-SQUARED VALUES
#########################################
pred <- read.csv("E:/workingdirectory/mosquito/rainAndTemp/c_rainAndTempDone.csv")
#########################################
#Select best predicition model
#########################################
best <- as.character(pred$posibs[which(pred$r.squared == max(pred$r.squared))])
#########################################
#GET MODEL DETAILS
#########################################
bestModel <- lm(ts$total ~ ts$rain16.36 + ts$minTemp16.31)
summary(bestModel)
#########################################
# USING BEST MODEL, MAKE A PREDICTED COLUMN IN TS
#########################################
ts$predicted <- -234.105 + (ts$rain16.36*53.74) + (ts$minTemp16.31*3.496)
#########################################
#PLAY AROUND WITH A FEW ALTERNATIVE MODELS
#########################################
#SQRT
sqrtModel <- lm(sqrt(ts$total) ~ ts$rain16.36 + ts$minTemp16.31)
summary(sqrtModel) #R.squared == 0.5094
#LOG
logModel <- lm(log(ts$total) ~ ts$rain16.36 + ts$minTemp16.31)
summary(logModel) #R.squared == 0.5325
#ALTMODEL
plot(seq(6.3,6.5,0.01), seq(6.3,6.5,0.01)/6.5, type="n", ylim=c(0,1))
for (i in seq(6.3,6.5,0.01)){
altModel <- summary(lm((ts$total)^(1/i) ~ ts$rain16.36 + ts$minTemp16.31))
#summary(altModel) #R.squared == 0.5094
bla <- cbind(i, altModel$r.squared)
points(i, altModel$r.squared, pch=".")
print(unlist(bla))
} #THIS FINDS BEST R-SQUARED USING ts$total^(1/6.4)
altModel <- lm((ts$total)^(1/6.4) ~ ts$rain16.36 + ts$minTemp16.31)
summary(altModel) #R.squared == 0.5408
# Though the 6.4 model is slightly better, I'm sticking with log
# But if taking the log of total works best, maybe other predictors would have done a better job?
# Time to re-run the 65536 simulations, this time taking the log of total
########### CREATING LOG MODEL
#########################################
#### CREATE A VECTOR OF ALL THE POSSIBLE COMBINATIONS OF MIN TEMP RANGES AND RAINFALL RANGES
#########################################
rainPosibs <- rep(colnames(ts)[grepl("rain", colnames(ts))][-1],
length(colnames(ts)[grepl("minTemp", colnames(ts))][-1]))
minTempPosibs <- sort(rep(colnames(ts)[grepl("minTemp", colnames(ts))][-1],
length(colnames(ts)[grepl("rain", colnames(ts))][-1])))
posibs <- paste0(rainPosibs, "AND", minTempPosibs)
#########################################
#CREATE A DATA FRAME FROM MY POSIBS VECTOR
# THIS IS WHERE I'LL PUT MY MODEL QUALITY INDICATORS
#########################################
predLog <- as.data.frame(posibs)
#########################################
#Test the r-squared for each column (RAIN ONLY)
#########################################
predLog$r.squared <- NA
for (i in 1:length(predLog$posibs)){
mylm <- summary(lm(log(ts[,"total"]) ~
ts[,unlist(strsplit(posibs, "AND")[i])[1]] +
ts[,unlist(strsplit(posibs, "AND")[i])[2]]
))
predLog$r.squared[i] <- mylm$r.squared
}
#########################################
#Select best predicition model
#########################################
predLog <- predLog[rev(order(predLog$r.squared)),]
bestLog <- as.character(predLog$posibs[which(predLog$r.squared == max(predLog$r.squared))])
bestModelLog <- lm(log(ts$total) ~ ts[, unlist(strsplit(bestLog, "AND"))[1]] +
ts[,unlist(strsplit(bestLog, "AND"))[2]])
summary(bestModelLog)
summary(lm(log(ts$total)~
ts$rain17.37 +
ts$minTemp14.32))
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#########################################
# USING BEST LOG MODEL, MAKE A PREDICTEDLOG COLUMN IN TS
#########################################
ts$predictedLog <- exp(-1.121293 + (ts$rain17.37*.200055) + (ts$minTemp14.32*0.074435))
save.image("E:/workingdirectory/mosquito/rainAndTemp/d_rainAndTempDoneLog.Rdata")
write.csv(predLog, "E:/workingdirectory/mosquito/rainAndTemp/d_rainAndTempDoneLog.csv")
write.csv(ts, "E:/workingdirectory/mosquito/rainAndTemp/d_tsRainTempPredPredLog.csv")
ping(2)
#The above is a simulation in which the log(total) is regressed against all the 512
#possible combinations of minTemp and cum rain.
#When this is done running, I'll compare it's r-squared values with those of before
#(before being the linear, not log model)
#########################################
#
#########################################
#########################################
#
#########################################
#########################################
#
#########################################
#########################################
#
#########################################
#########################################
#
#########################################
#########################################
#
#########################################
|
fdb3722fc7f66fda5011d92e551f5f2b42e142ff
|
570abc2b93f05cbce92d95f6b9bffbe48708bb6c
|
/simulacao/script_p_cluster.R
|
432f55b8167eb143fc4a12ff715e9c7df526eff9
|
[] |
no_license
|
Danhisco/artigo_mestrado
|
ed871edab0e89e28a5569668a670ea43d3f548e3
|
1ff761608ea7312c533ef216b51fd67fb8268b00
|
refs/heads/master
| 2023-02-03T00:15:42.720220
| 2023-01-30T02:13:25
| 2023-01-30T02:13:25
| 121,779,367
| 0
| 1
| null | 2018-11-01T11:05:29
| 2018-02-16T17:28:00
|
HTML
|
UTF-8
|
R
| false
| false
| 9,626
|
r
|
script_p_cluster.R
|
## Rodar as Simulações ##
### configuracao
library(doMC)
library(GUILDS)
library(lme4)
library(merTools)
library(magrittr)
library(tidyverse)
library(plyr)
setwd("/home/danilo/Documentos/Doutorado/artigo_mestrado/simulacao/")
#funcao coalescente
source("dinamica_coalescente_beta.R")
# dados
df_referencia <- read.csv(file="df_referencia.csv",row.names = 1,as.is = TRUE)
# df_referencia %<>% filter(S %in% c(195,230,226,26,45,52)) # sítios com S extremos em cada terço do gradiente de p
df_referencia %<>% filter(k %in% c(0.99,0.5,0.05))
# ddply("quantil_p",summarise,S_max=max(S),S_min=min(S))
# df_referencia %>% str
# df_referencia %>% ggplot(aes(x=k,y=d,group=k)) + geom_boxplot() + geom_jitter() +
# labs(y="dist. média (metros)")
##### padronização do sistema #####
n_cores <- 2 # número de cores do computador
n_rep <- 10 # nũmero de SADs replicas
######################################################
#################### MNEE ############################
######################################################
### estimar U ###
# número de réplicas
func1 <- function(x,replicas=10) {
x$U <- NA
x <- x[rep(1:dim(x)[1],each=replicas),]
}
df_referencia %<>% func1()
### simulação ##
# valores de k
k_factor <- unique(df_referencia$k)
for(a in 1:length(k_factor)){
# a <- 1
# i <- 1
# por k
df_simU <- df_referencia %>% filter(k == k_factor[a])
### funcao para paralelizar o programa
op <- options(digits.secs=6)
funcao_imigracao <- function(i,df_temp=df_simU){
aviao <- list()
aviao <- dinamica_coalescente(U = 1.25e-06,
S = df_temp[i,"S"],
N_simul = 1,
seed = as.numeric(Sys.time()),
disp_range = df_temp[i,"d"],
disp_kernel = df_temp[i,"kernel_code"],
landscape = df_temp[i,"txt.file"])
return(aviao$U_est)
}
# paralelização da simulacao
registerDoMC(n_cores)
replica.sim <- as.list(1:dim(df_simU)[1])
sim.coal_U <- llply(.data = replica.sim, .fun = funcao_imigracao, .parallel = TRUE)
df_simU[,"U"] <- unlist(sim.coal_U)
write.csv(df_simU,
file=paste0("./U/","df_simU__k",k_factor[a],".csv"),row.names = FALSE)
}
# Leitura e preparação para simulação da SAD
df_simulacao <- map_df(Sys.glob("./U/*.csv"),read.csv)
df_simulacao %<>% ddply(names(.)[-13],summarise,U_med=mean(U),U_var=var(U))
df_simulacao$txt.file %<>% as.character()
df_simulacao %<>% mutate(k_prop=k) %>% group_by(SiteCode,k_prop) %>% nest
#funcao para simulacao
f_simulacao <- function(i,df_=df_simulacao){
X <- df_[i,][["data"]] %>% as.data.frame()
mat_sim <- dinamica_coalescente(U = X[,"U_med"],
S = 0,
N_simul = n_rep,
seed = as.numeric(Sys.time()),
disp_range = X[,"d"],
disp_kernel = X[,"kernel_code"],
landscape = X[,"txt.file"])
}
registerDoMC(2)
simulacao <- as.list(1:dim(df_simulacao)[1])
df_simulacao$SADs.EE <- llply(simulacao,f_simulacao,.parallel = TRUE)
#funcao para escrita das SADs em .csv
f_d_ply.EE <- function(X){
# df_name <- df_simulacao[1,]$data %>% as.data.frame
# l_SADs <- df_simulacao[1,]$SADs.EE[[1]]
df_name <- X$data %>% as.data.frame
l_SADs <- X$SADs.EE[[1]] %>% alply(.,1,function(X) sort(as.integer(table(X))) )
file_name <- paste0("./SADs_preditas/",gsub(".txt","",df_name[,"txt.file"]),"__k",df_name[,"k"],".EE.", "rep_",1:length(l_SADs),".csv")
for(i in 1:length(l_SADs)){
write.csv(l_SADs[[i]],
file=file_name[i],
row.names = FALSE,col.names = FALSE)
}
}
d_ply(df_simulacao,c("SiteCode","k_prop"),f_d_ply.EE,.parallel = TRUE)
######################################################
#################### MNEI ############################
######################################################
# Leitura e preparação para simulação da SAD
df_simulacao <- map_df(Sys.glob("./U/*.csv"),read.csv)
df_simulacao %<>% ddply(names(.)[-13],summarise,U_med=mean(U),U_var=var(U))
df_simulacao$txt.file %<>% as.character()
## Conversão dos Parâmetros de MNEE para MNEI ##
df_simulacao %<>% mutate(L_plot = 100/sqrt(J/DA),
m = d * ( 1 - exp(-L_plot/d) ) / L_plot,
m_ = m * p / (1 - (1-p) * m),
I = m_ * (J-1)/(1-m_),
J_M=p*DA*2500,
theta=(U_med*(J_M-1))/(1-U_med))
df_simulacao %<>% mutate(k_prop=k) %>% group_by(SiteCode,k_prop) %>% nest
## Predição da SAD
registerDoMC(2)
df_simulacao$SADs <- llply(df_simulacao[["data"]],function(X) replicate(n_rep,generate.ESF(theta = X$theta, I = X$I, J = X$J)),.parallel = TRUE)
f_d_ply <- function(X){
# df_name <- df_simulacao[1,]$data %>% as.data.frame
# l_SADs <- df_simulacao[1,]$SADs[[1]]
df_name <- X$data %>% as.data.frame
l_SADs <- X$SADs[[1]]
file_name <- paste0("./SADs_preditas/",gsub(".txt","",df_name[,"txt.file"]),"__k",df_name[,"k"],".EI.", "rep_",1:length(l_SADs),".csv")
for(i in 1:length(l_SADs)){
write.csv(l_SADs[[i]],
file=file_name[i],
row.names = FALSE,col.names = FALSE)
}
}
d_ply(df_simulacao,c("SiteCode","k_prop"),f_d_ply,.parallel = TRUE)
######################################################
############## Sintese dos dados #####################
######################################################
## df_geral ##
df_resultados <- map_df(Sys.glob("./U/*.csv"),read.csv)
df_resultados %<>% ddply(names(.)[-13],summarise,U_med=mean(U),U_var=var(U))
df_resultados$txt.file %<>% as.character()
df_resultados %<>% mutate(file.tag=gsub(".txt","",txt.file))
## df_SAD.obs
df_SAD.obs <- read.csv(file = "/home/danilo/Documentos/Doutorado/artigo_mestrado/simulacao/SAD_observada/abundances.csv")
# df_SAD.obs %>% dim
df_SAD.obs %<>% group_by(SiteCode) %>% nest
names(df_SAD.obs)[2] <- "SAD.obs"
## df_SAD.predita ##
df_SAD.predita <- data.frame(SAD_MN.name=as.character(Sys.glob("./SADs_preditas/*.csv")))
find.string <- paste(c("./SADs_preditas/",".csv"),collapse = "|")
df_SAD.predita %<>% mutate(SAD_obs.name=gsub("__k","__SAD.txt",str_match(SAD_MN.name,"ref(.*?)__k")[,1]),
MN=str_match(SAD_MN.name,"EE|EI")[,1],
k=str_match(SAD_MN.name,"__k(.*?).E")[,2],
file.tag=gsub("__k","",str_match(SAD_MN.name,"ref(.*?)__k")[,1]),
rep=str_match(SAD_MN.name,"rep_(.*?).csv")[,2])
# df_SAD.predita %>% str
# df_resultados %>% str
# Merges #
df_SAD.predita$SAD_MN.name %<>% as.character()
df_SAD.predita %<>% left_join(x=.,y=unique(df_resultados[,c("file.tag","SiteCode")]),by="file.tag") %>%
group_by(SiteCode) %>% nest
df_SAD.predita %<>% left_join(x=.,y=df_SAD.obs,by="SiteCode")
## função para os resultados ##
f_resultados <- function(X){
# df_referencia <- df_SAD.predita[1,]$data %>% as.data.frame()
# v_SAD.obs <- df_SAD.predita[1,]$SAD.obs %>% as.data.frame() %>% .$N
df_referencia <- X$data %>% as.data.frame()
v_SAD.obs <- X$SAD.obs %>% as.data.frame() %>% .$N
f_KSeS <- function(OBS = v_SAD.obs,df_predicao){
# df_predicao <- df_referencia[1,]
v_SAD.predita <- read.csv(file = df_predicao[,"SAD_MN.name"]) %>% .$x
# teste de Kolmogoro-Smirnov #
a <- suppressWarnings(ks.test(x = OBS,
y = v_SAD.predita))
a <- data.frame(KS.D = a$statistic, KS.p = a$p.value)
a$S_SAD.predita <- length(v_SAD.predita)
a$S_SAD.obs <- length(v_SAD.obs)
return(a)
}
# teste de KS e armazenamento #
df_resultados <- adply(df_referencia,1,function(X) f_KSeS(df_predicao = X))
return(df_resultados)
}
registerDoMC(n_cores)
df_SAD.predita %<>% ddply(.,"SiteCode",f_resultados,.parallel = TRUE)
# registro
write.csv(df_SAD.predita,file="./resultados/df_replicas.csv",row.names = F)
# df_SAD.predita <- read.csv("./resultados/df_replicas.csv")
################################################
############ auditoria f_resultados ############
################################################
df_SAD.predita %>% head
df_SAD.predita %>% ggplot(aes(x=KS.D,y=KS.p)) +
geom_point() +
facet_wrap(MN~k,ncol=3,scales = "free") +
labs(x="estatística D, teste KS",y="p-valor")
df_SAD.predita %>% ggplot(aes(x=S_SAD.obs,y=KS.p)) +
geom_point() +
facet_wrap(MN~k,ncol=3,scales = "free")
df_SAD.predita %>% ggplot(aes(x=S_SAD.predita,y=KS.p)) +
geom_point() +
facet_wrap(MN~k,ncol=3,scales = "free")
df_SAD.predita %<>% ddply(.,c("SiteCode","MN","k","S_SAD.obs"),summarise,
GOF=sum(KS.p>=0.05),
p.value_mean=mean(KS.p),p.value_var=var(KS.p),
S_mean=mean(S_SAD.predita),S_var=var(S_SAD.predita))
df_SAD.predita %<>% left_join(x=.,y=df_resultados[,c(1:6,11,13:14)],by=c("SiteCode","k"))
write.csv(df_SAD.predita,file="./resultados/df_resultados.csv",row.names = F)
######################################################
############## Analise dos Dados #####################
######################################################
# leitura
df_resultados <- read.csv(file="./resultados/df_resultados.csv")
# z score
f_z <- function(x){
m <- base::mean(x,na.rm=TRUE)
sd <- sd(x,na.rm=TRUE)
output <- (x-m)/sd
return(output)
}
df_resultados %<>% mutate(p.z = f_z(p),S.z = f_z(S))
names(df_resultados)[1] <- "Site"
|
6df307eb28e79fa7e7ba64d8b85353d3bac8b7a5
|
4c1f46b66916e0b151b4f5e4de8882f6c8f744fc
|
/man/MTCSzero.Rd
|
412374e4c95094b8fe2a742720a41c490db59aaa
|
[] |
no_license
|
Atan1988/rothello
|
37911c3e236a06b1718271639f7fbe0fd08c973d
|
79d08b9d1a96547ea1670a4e52535d91d284c08f
|
refs/heads/master
| 2020-04-18T16:32:23.483414
| 2019-04-08T00:26:18
| 2019-04-08T00:26:18
| 167,636,214
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 296
|
rd
|
MTCSzero.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MTCS_zero.R
\docType{data}
\name{MTCSzero}
\alias{MTCSzero}
\title{MTCSzero class}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
MTCSzero
}
\description{
MTCSzero class
}
\keyword{datasets}
|
5dd909ca383891f3895210f5a7120284c485847c
|
2ddff9eb4db54e20c83d3c4fc5332b6341409c6a
|
/Shiny/compound_fuzzy_proposition_module.R
|
1013ac05285f9e17790f28df783a0bedacc7ff88
|
[] |
no_license
|
RitoJosephDominado/FuzzyInferenceSystem
|
e7761106fcb4fcfa456f1816fde9046b54549efb
|
8856dadd74ce3f431c8b5790de3889e5c73542d8
|
refs/heads/master
| 2020-09-01T06:12:23.610174
| 2019-12-01T16:41:54
| 2019-12-01T16:41:54
| 218,896,988
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,999
|
r
|
compound_fuzzy_proposition_module.R
|
compound_fuzzy_proposition_ui <- function(ui_name, main, parent, index){
ns <- NS(ui_name)
fuzzy_proposition_type <- if(parent[[index]]$type == 'union_fuzzy_proposition'){
'Union'
}else if(parent[[index]]$type == 'intersection_fuzzy_proposition'){
'Intersection'
}
box(
width = 12, title = fuzzy_proposition_type, status = 'primary', solidHeader = TRUE,
fluidRow(
column(3, selectInput(ns('fuzzy_proposition_type_select'), 'Type', choices = c(
Simple = 'simple_fuzzy_proposition',
Intersection = 'intersection_fuzzy_proposition',
Union = 'union_fuzzy_proposition'
))),
column(3, br(), actionButton(ns('add_fuzzy_proposition_btn'), 'Add')),
column(
3, br(),
shinyWidgets::materialSwitch(
ns('negate_switch'), strong('Negate'), status = 'primary', value = parent[[index]]$negated
)
)
),
tags$div(id = ns('fuzzy_proposition_ui_div'))
)
}
compound_fuzzy_proposition_server <- function(input, output, session, main, triggers, parent = NULL, index){
fuzzy_proposition <- parent[[index]]
lapply(seq_along(fuzzy_proposition$argument_list), function(child_index){
fuzzy_proposition_type <- fuzzy_proposition$argument_list[[child_index]]$type
fuzzy_proposition_ui <- switch(
fuzzy_proposition_type,
'simple_fuzzy_proposition' = simple_fuzzy_proposition_ui,
'intersection_fuzzy_proposition' = compound_fuzzy_proposition_ui,
'union_fuzzy_proposition' = compound_fuzzy_proposition_ui
)
insertUI(
selector = paste0('#', session$ns('fuzzy_proposition_ui_div')),
ui = fuzzy_proposition_ui(
session$ns(child_index),
main = main,
parent = parent[[index]]$argument_list,
index = child_index
)
)
fuzzy_proposition_server = switch (
fuzzy_proposition_type,
'simple_fuzzy_proposition' = simple_fuzzy_proposition_server,
'intersection_fuzzy_proposition' = compound_fuzzy_proposition_server,
'union_fuzzy_proposition' = compound_fuzzy_proposition_server
)
callModule(
module = fuzzy_proposition_server,
id = child_index,
main = main, triggers = triggers,
parent = parent[[index]]$argument_list, index = child_index
)
})
observeEvent(input$add_fuzzy_proposition_btn, {
child_index <- length(parent[[index]]$argument_list) + 1
fuzzy_proposition <- switch(
input$fuzzy_proposition_type_select,
'simple_fuzzy_proposition' = simple_fuzzy_proposition(NULL, NULL),
'intersection_fuzzy_proposition' = intersection_fuzzy_proposition(),
'union_fuzzy_proposition' = union_fuzzy_proposition()
)
parent[[index]]$argument_list[[child_index]] <- fuzzy_proposition %>% convert_fuzzy_proposition_to_environment
fuzzy_proposition_ui <- switch(
input$fuzzy_proposition_type_select,
'simple_fuzzy_proposition' = simple_fuzzy_proposition_ui,
'intersection_fuzzy_proposition' = compound_fuzzy_proposition_ui,
'union_fuzzy_proposition' = compound_fuzzy_proposition_ui
)
insertUI(
selector = paste0('#', session$ns('fuzzy_proposition_ui_div')),
ui = fuzzy_proposition_ui(
session$ns(child_index),
main = main,
parent = parent[[index]]$argument_list,
index = child_index
)
)
fuzzy_proposition_server = switch (
input$fuzzy_proposition_type_select,
'simple_fuzzy_proposition' = simple_fuzzy_proposition_server,
'intersection_fuzzy_proposition' = compound_fuzzy_proposition_server,
'union_fuzzy_proposition' = compound_fuzzy_proposition_server
)
callModule(
module = fuzzy_proposition_server,
id = child_index,
main = main, triggers = triggers,
parent = parent[[index]]$argument_list, index = child_index
)
})
observeEvent(input$negate_switch, {
parent[[index]]$negated <- input$negate_switch
})
}
|
07da0ce28ff55623358735dfd88adaa718f2bf75
|
146b3f63e30de7b6cccd35b27d9a830a7d3ca364
|
/R/shinyHelper.R
|
7068ae2242e89983c3bdef3b88812541fe280377
|
[] |
no_license
|
codeForReviewer/kMajorityRule
|
cc749239f3de6c883cb17847a88eba8a8794e23f
|
307b53aa961a6f981c1d7776fb1ac3729ea29c03
|
refs/heads/master
| 2021-01-21T12:50:11.424292
| 2016-04-15T20:46:30
| 2016-04-15T20:46:30
| 30,224,968
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 194
|
r
|
shinyHelper.R
|
#' kMajorityGUI
#'
#' Runs the kMajority GUI built with Shiny Server.
#'
#'
kMajorityGUI <- function(){shiny::runApp(system.file('kMajorityGUI', package='kMajorityRule'),launch.browser = TRUE)}
|
bc9acde59b82def9bb412baa1509ceb0892194f8
|
817947076f61744c2adce055f2383a223e232550
|
/R/morning_lab.R
|
e2c4c7de522025524f0063db0ab03072953cc3c8
|
[] |
no_license
|
NicolasDurrande/ENBIS-2016-workshop
|
f35efed755ae7f361bfebba70c81e892c4875f5e
|
edae4ee4082c834449d58870ba6db4acd3971b74
|
refs/heads/master
| 2020-09-20T05:44:13.524589
| 2016-09-11T12:27:50
| 2016-09-11T12:27:50
| 67,894,657
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 843
|
r
|
morning_lab.R
|
# Lab session: file to be completed
#########################################
# Part 1 : catapult numerical simulator
#########################################
library(shiny)
runGitHub("shinyApps",username="NicolasDurrande",subdir="catapult")
#########################################
# Part 2: Design of experiments
#########################################
## 1.
## 2.
## 3.
## 4. plot Y against the inputs
par(mfrow=c(2,2)) # split the plotting area in 4
plot(X[,1],Y)
plot(X[,2],Y)
plot(X[,3],Y)
plot(X[,4],Y)
par(mfrow=c(1,1))
library(rgl)
plot3d(x = X[,1], y = X[,2], z = Y)
## 5. bests inputs so far
#########################################
## Kriging
#########################################
## 1. create GP model
library(DiceKriging)
?km # or help("km")
## 2. create GP model
## 3. visualization with DiceView
## 4.
|
82c52da0f4e08b4bae6202a063b02e5ae917df96
|
192055ad46fd97c0eff06c6eb44ef88695f3571e
|
/npl_sing.R
|
14f7d9c04adf613031b02a55bc60cb63f30c0527
|
[] |
no_license
|
josiex/Single-Agent-Dynamic-Choice
|
923575931d1aaf4c9d97209bdb36d2cac16dd2ff
|
4e926b8108e8ab889189d244ddfcfb0db488667d
|
refs/heads/master
| 2022-04-14T00:30:39.222589
| 2020-03-18T16:11:31
| 2020-03-18T16:11:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,350
|
r
|
npl_sing.R
|
library(pracma)
npl_sing=function(inda,indx,zmat,pini,bdisc,fmat,names){
"
Maximum Likelihood Estimates of structural parameters
of a discrete choice single-agent dynamic programming
model using the NPL algorithm in Aguirregabiria and Mira (Econometrica, 2002)
Original code in GAUSS by Victor Aguirregabiria
Converted to R by Wayne Taylor
Version 12/7/2015
---------------------------------------------------------------
INPUTS:
inda - (nobs x 1) vector with indexes of discrete decision variable (values of 1,...,J)
indx - (nobs x 1) vector with indexes of the state vector x (values of 1,..,S)
zmat - (zmat1,zmat2,...,zmatJ) matrix with the values of the variables z(a=j,x)
note: each zmat has J columns to represent the utility of choice j given action a
pini - (numx x J) vector with the initial estimates of the choice probabilities Pr(a=j|x)
bdisc - Discount factor (between 0 and 1)
fmat - (fmat1,fmat2,...,fmatJ) matrix with the conditional choice transition probs
names - (npar x 1) vector with names of parameters
OUTPUTS:
A list of size K where the k'th entry contains:
tetaest - (npar x 1) matrix with estimates of structural parameters of the k'th stage estimate
varest - (npar x npar) matrix with asymptotic covariance matrices of estimates for the k'th stage
pest - (numx x J) matrix with the estimated choice probabilities Pr(d=1|x),...,Pr(d=J|x) for the k'th stage
---------------------------------------------------------------"
npar = length(names)
nobs = length(inda)
nchoice = max(inda)
if(ncol(zmat)!=(npar*nchoice)){
print("Error: The number of columns in 'zmat' does not agree",fill=TRUE)
print("with the number of 'choices * number of parameters'",fill=TRUE)
}
myzero = 1e-12
eulerc = 0.5772
numx = nrow(pini)
convcrit = 1000
convcons = 1e-6
tetaest0 = matrix(0,npar,1)
out = NULL
#---------------------------------------------------------
# ESTIMATION OF STRUCTURAL PARAMETERS
#---------------------------------------------------------
ks=1
while(convcrit>=convcons){
cat("-----------------------------------------------------",fill=TRUE)
cat("POLICY ITERATION ESTIMATOR: STAGE =",ks,fill=TRUE)
cat("-----------------------------------------------------",fill=TRUE)
#1. Obtaining matrices "A=(I-beta*Fu)" and "Bz=sumj{Pj*Zj}" and vector Be=sumj{Pj*ej}
#-----------------------------------------------------------------------------------
i_fu = matrix(0,numx,numx)
sumpz = matrix(0,numx,npar)
sumpe = matrix(0,numx,1)
j=1
while (j<=nchoice){
i_fu = i_fu + pini[,j]*fmat[,(numx*(j-1)+1):(numx*j)] #notice the column references
sumpz = sumpz + pini[,j]*zmat[,(npar*(j-1)+1):(npar*j)]
sumpe = sumpe + pini[,j]*(eulerc - log(pini[,j]+myzero)) #NOTE I ADDED +MYZERO so log() works
j=j+1 ;
}
i_fu = diag(numx) - bdisc * i_fu
#2. Solving the linear systems "A*Wz = Bz" and "A*We = Be" using CROUT decomposition
#-----------------------------------------------------------------------------------
i_fu = lu(i_fu)
wz = solve(i_fu$L,cbind(sumpz,sumpe))
wz = solve(i_fu$U,wz)
we = wz[,npar+1]
wz = wz[,1:npar]
#OR:
# we=solve(i_fu,sumpe)
# wz=solve(i_fu,sumpz)
#3. Computing "ztilda(a,x) = z(a,x) + beta * F(a,x)'*Wz" and "etilda(a,x) = beta * F(a,x)'*We"
#-----------------------------------------------------------------------------------
ztilda = matrix(0,numx,nchoice*npar)
etilda = matrix(0,numx,nchoice)
j=1
while(j<=nchoice){
ztilda[,(npar*(j-1)+1):(npar*j)] = zmat[,(npar*(j-1)+1):(npar*j)]+bdisc*fmat[,(numx*(j-1)+1):(numx*j)]%*%wz
etilda[,j] = bdisc * fmat[,(numx*(j-1)+1):(numx*j)]%*%we
j=j+1
}
#4. Sample observations of "ztilda" and "etilda"
#-----------------------------------------------------------------------------------
zobs = ztilda[indx,]
eobs = etilda[indx,]
#-----------------------------------------------------------------------------------
#5. Pseudo Maximum Likelihood Estimation
clogitout=clogit(inda,zobs,eobs,names)
tetaest1=clogitout$b0
varest=clogitout$Avarb
#6. Re-Computing probabilities
#-----------------------------------------------------------------------------------
pini = matrix(0,numx,nchoice)
j=1
while(j<=nchoice){
pini[,j] = ztilda[,(npar*(j-1)+1):(npar*j)]%*%tetaest1 + etilda[,j]
j=j+1
}
pini = pini - apply(pini,1,max)
pini = exp(pini)
pini = pini/rowSums(pini)
#7. Convergence Criterion
#-----------------------------------------------------------------------------------
convcrit = max(abs(tetaest1-tetaest0))
tetaest0 = tetaest1
cat("NPL Criterion =",convcrit,fill=TRUE)
#8. Save output from current k'th stage
#------------------------------------------------------------------------------------
out[[ks]]=list(tetaest=tetaest1,varest=varest,pini=pini)
ks=ks+1
}
out
}
|
e5dfb5e79aac7123bbdbbf8887f965580fa57fe4
|
4058e1f1b7e807caf65376b08ea7e30a39b9fe51
|
/man/aggregate.stars.Rd
|
c2157b8bbcb95207b2754e20a5bbf0d52f9df4dc
|
[
"Apache-2.0"
] |
permissive
|
przell/stars
|
80334d7ef64312966159cdc2f39fc90fb882e2a5
|
59ac11d893601a34ce0e0d8d919bac1ed4643c57
|
refs/heads/master
| 2020-09-16T23:22:48.754915
| 2019-11-25T17:05:51
| 2019-11-25T17:05:51
| 223,918,620
| 0
| 0
|
Apache-2.0
| 2019-11-25T17:05:53
| 2019-11-25T10:11:25
| null |
UTF-8
|
R
| false
| true
| 1,542
|
rd
|
aggregate.stars.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aggregate.R
\name{aggregate.stars}
\alias{aggregate.stars}
\title{spatially or temporally aggregate stars object}
\usage{
\method{aggregate}{stars}(x, by, FUN, ..., drop = FALSE,
join = st_intersects, as_points = any(st_dimension(by) == 2, na.rm =
TRUE), rightmost.closed = FALSE, left.open = FALSE)
}
\arguments{
\item{x}{object of class \code{stars} with information to be aggregated}
\item{by}{object of class \code{sf} or \code{sfc} for spatial aggregation, for temporal aggregation a vector with time values (\code{Date}, \code{POSIXct}, or \code{PCICt}) that is interpreted as a sequence of left-closed, right-open time intervals or a string like "months", "5 days" or the like (see \link{cut.POSIXt}); if by is an object of class \code{stars}, it is converted to sfc by \code{st_as_sfc(by, as_points = FALSE)} thus ignoring its time component.}
\item{FUN}{aggregation function, such as \code{mean}}
\item{...}{arguments passed on to \code{FUN}, such as \code{na.rm=TRUE}}
\item{drop}{logical; ignored}
\item{join}{join function to find matches of x to by}
\item{as_points}{see \link[stars]{st_as_sf}: shall raster pixels be taken as points, or small square polygons?}
\item{rightmost.closed}{see \link{findInterval}}
\item{left.open}{logical; used for time intervals, see \link{findInterval} and \link{cut.POSIXt}}
}
\description{
spatially or temporally aggregate stars object, returning a data cube with lower spatial or temporal resolution
}
|
3a708c9da882d584b674d4b2a773bbf3adc22d61
|
31b51aeb96000318df21351b2ef6cb55f7c269c2
|
/HiNT/externalScripts/getBreakPoints2steps.R
|
c231ee12d9455a262e462f3c783b7092277d81ca
|
[
"Artistic-1.0-Perl",
"MIT"
] |
permissive
|
suwangbio/HiNT
|
a26062f6c39929803a4aded5465743768a81b8c5
|
99f0aa91d10c5fccabcbd56035f6e3125650a720
|
refs/heads/master
| 2022-04-08T23:13:25.344196
| 2020-02-17T22:22:13
| 2020-02-17T22:22:13
| 240,967,519
| 0
| 0
|
NOASSERTION
| 2020-02-16T21:19:00
| 2020-02-16T21:18:59
| null |
UTF-8
|
R
| false
| false
| 6,068
|
r
|
getBreakPoints2steps.R
|
Args <- commandArgs()
#print(Args)
divisionMatrixDir = Args[6]
matrix = Args[7]
outputfile = Args[8]
#start_time <- Sys.time()
library('strucchange')
#library(pastecs)
library(foreach)
library(doParallel)
quantileFunc <- function(x,q) {
x <- sort(x)
n <- length(x)
ln = floor(n*q)
res = x[ln]
return(res)
}
svtype <- function(matrixf,chrompair){
traltype="unbalancedTRAL"
mat <- read.table(matrixf)
rowsum = rowSums(mat,na.rm=T)
colsum = colSums(mat,na.rm=T)
maxRIndex = which(rowsum == max(rowsum,na.rm=T))
highRIndex = which(rowsum == quantileFunc(rowsum,0.999))
maxCIndex = which(colsum == max(colsum,na.rm=T))
highCIndex = which(colsum == quantileFunc(colsum,0.999))
if(abs(maxRIndex-highRIndex)<=length(rowsum)*0.005 && (median(rowsum[max(0,(highRIndex-5)):max(0,(highRIndex-1))],na.rm=T) > quantileFunc(rowsum,0.9)) && (median(rowsum[min((maxRIndex+1),length(rowsum)):min((maxRIndex+5),length(rowsum))],na.rm=T) > quantileFunc(rowsum,0.9)) && abs(maxCIndex-highCIndex)<=length(colsum)*0.005 && (median(colsum[max(0,(maxCIndex-5)):max(0,(maxCIndex-1))],na.rm=T) > quantileFunc(colsum,0.9)) && (median(colsum[min((maxCIndex+1),length(colsum)):min((maxCIndex+5),length(colsum))],na.rm=T) > quantileFunc(colsum,0.9))){
traltype = "balancedTRAL"
#print("Probably a balanced translocation")
}
return(traltype)
}
filtering <- function(breakpoints, array){
bps = c()
cutoff <- quantile(array[array>0],0.9)
bcutoff <- median(array[array>0])
#cutoff <- sort(array)[ceiling(0.9*length(sort(array)))]
for (bp in breakpoints){
if(bp==length(array)|(bp==1)|bp==length(array)-1|(bp==2)){bps <- bps}
else if((array[bp]<=bcutoff & array[bp+1]>cutoff)|(array[bp]>cutoff & array[bp+1]<=bcutoff)){bps <- c(bps,bp)}
#else if((array[bp]<=bcutoff & array[bp+2]>cutoff)|(array[bp]>cutoff & array[bp+2]<=bcutoff)){bps <- c(bps,bp)}
else if((array[bp-1]>=cutoff & array[bp]<bcutoff)|(array[bp-1]<bcutoff & array[bp]>=cutoff)){bps <- c(bps,bp)}
#else if((array[bp-2]>=cutoff & array[bp]<bcutoff)|(array[bp-2]<bcutoff & array[bp]>=cutoff)){bps <- c(bps,bp)}
else if((array[bp-1]>=cutoff & array[bp+1]<bcutoff)|(array[bp-1]<bcutoff & array[bp+1]>=cutoff)){bps <- c(bps,bp)}
else if((min(array[bp],array[bp+1])!=0) & (abs(array[bp+1]-array[bp])/(max(array[bp+1],array[bp])+1)>0.3)){bps <- c(bps,bp)}
#else if((min(array[bp],array[bp+2])!=0) & (abs(array[bp+2]-array[bp])/(max(array[bp+2],array[bp])+1)>0.3)){bps <- c(bps,bp)}
else if((min(array[bp],array[bp-1])!=0) & (abs(array[bp-1]-array[bp])/(max(array[bp-1],array[bp])+1)>0.3)){bps <- c(bps,bp)}
#else if((min(array[bp],array[bp-2])!=0) & (abs(array[bp-2]-array[bp])/(max(array[bp-2],array[bp])+1)>0.3)){bps <- c(bps,bp)}
else {bps <- bps}
}
if(is.null(bps)){bps <- c(which(array==max(array)))}
else if(length(bps)==1){bps <- bps}
else{
ids <- c()
for(i in 1:(length(bps)-1)){
if((bps[i+1]-bps[i])==1){if(array[bps[i]]<array[bps[i+1]]){ids<-c(ids,i)}else{ids<-c(ids,i+1)}}
else{ids <- ids}
}
if(is.null(ids)){bps <- bps}
else{bps <- bps[-(ids)]}
}
return(bps)
}
searchMatric <- function(matrixf,chrompair,threads){
mat <- read.table(matrixf)
h = dim(mat)[1]
w = dim(mat)[2]
# To check the breakpoints
if(h>w){
mat <- t(mat)
}else{
mat <- mat
}
#print(dim(mat))
rowsum = rowSums(mat,na.rm=T)
colsum = colSums(mat,na.rm=T)
registerDoParallel(cores=strtoi(threads))
breakpoints_row <- breakpoints(rowsum ~ 1, h=5, breaks=10, hpc="foreach")$breakpoints
breakpoints_col <- breakpoints(colsum ~ 1, h=5, breaks=10, hpc="foreach")$breakpoints
if(is.na(breakpoints_row[1]) | is.na(breakpoints_col[1])){
res <- NA
}else{
max_row <- which(rowsum==max(rowsum))
max_col <- which(colsum==max(colsum))
validRowBP <- filtering(breakpoints_row,rowsum)
validColBP <- filtering(breakpoints_col,colsum)
bprow <- paste(validRowBP,collapse = ',')
bpcol <- paste(validColBP,collapse = ',')
#bprow <- paste(breakpoints_row, collapse = ',')
#bpcol <- paste(breakpoints_col, collapse = ',')
res <- cbind(chrompair,bprow,max_row,bpcol,max_col)
}
return(res)
}
searchBalancedMatric <- function(matrixf,chrompair,threads){
mat <- read.table(matrixf)
h = dim(mat)[1]
w = dim(mat)[2]
# To check the breakpoints
if(h>w){
mat <- t(mat)
}else{
mat <- mat
}
#print(dim(mat))
rowsum = rowSums(mat,na.rm=T)
colsum = colSums(mat,na.rm=T)
maxRIndex = which(rowsum == max(rowsum,na.rm=T))
if(sum(is.finite(rowsum[1:maxRIndex]))>sum(is.finite(rowsum[maxRIndex:length(rowsum)]))){
submatrix <- mat[1:(maxRIndex-1),]
}else{
submatrix <- mat[(maxRIndex+1):length(rowsum),]
}
subcolSum <- colSums(submatrix,na.rm=T)
maxCIndex = which(colsum == max(colsum,na.rm=T))
if(sum(is.finite(colsum[1:maxCIndex]))>sum(is.finite(colsum[maxCIndex:length(colsum)]))){
submatrix <- mat[,1:(maxCIndex-1)]
}else{
submatrix <- mat[,(maxCIndex+1):length(colsum)]
}
subrowSum <- rowSums(submatrix,na.rm=T)
registerDoParallel(cores=strtoi(threads))
breakpoints_row <- breakpoints(subrowSum ~ 1, h=5, breaks=10, hpc="foreach")$breakpoints
breakpoints_col <- breakpoints(subcolSum ~ 1, h=5, breaks=10, hpc="foreach")$breakpoints
if(is.na(breakpoints_row)|is.na(breakpoints_col)){
res <- NA
}else{
max_row <- maxRIndex
max_col <- maxCIndex
bprow <- paste(breakpoints_row, collapse = ',')
bpcol <- paste(breakpoints_col, collapse = ',')
res <- cbind(chrompair,bprow,max_row,bpcol,max_col)
}
return(res)
}
#print(divisionMatrixDir)
#print(matrixfiles)
#matrixfiles = c("chr9_chr13_DivisionMatrix.txt")
threads = 8
matrixf <- file.path(divisionMatrixDir,matrix)
chrompair = paste(strsplit(matrix,'_')[[1]][2],strsplit(matrix,'_')[[1]][3],sep="_")
print(chrompair)
traltype <- svtype(matrixf,chrompair)
print(traltype)
#res <- searchMatric(matrixf,chrompair)
if(traltype == "unbalancedTRAL"){
res <- searchMatric(matrixf,chrompair,threads)
}else{
res <- searchBalancedMatric(matrixf,chrompair,threads)
}
write.table(res,file=outputfile,sep="\t",quote=F,row.names=F)
#end_time <- Sys.time()
#print(start_time)
#print(end_time)
|
760d023366af04d116df9a84f366ea9b918bc687
|
c25d1a12f0cb1a1702a627644157c6509f854659
|
/sorteio_sample.R
|
cf4c4ace1867df9ef2a7acac5e7c23b16058be42
|
[] |
no_license
|
francojra/sorteio_function_sample
|
3e6f55560423b295ab824eb17984576a40f08f3c
|
108baca6042803210e69a4b3f404739e482234e8
|
refs/heads/main
| 2023-08-25T20:43:18.500609
| 2021-10-18T03:11:15
| 2021-10-18T03:11:15
| 418,323,553
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 751
|
r
|
sorteio_sample.R
|
# Sorteios usando função sample ------------------------------------------------------------------------------------------------------------
# Definir conjunto de dados a serem sorteados:
# Números
x <- c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
# Strings
y <- c("João", "Maria", "Alice", "Eduardo", "Gabriel", "Elise")
# Sorteios
sample(x, 3, replace = T) # Sortear 3 números com repetição
sample(x, 5, replace = T) # Sortear 5 números com repetição
### Observe que alguns números foram repetidos
sample(x, 4, replace = F) # Sortear 4 números sem repetição
sample(y, 2, replace = T) # Sortear 2 nomes com repetição
sample(y, 5, replace = T) # Sortear 5 nomes sem repetição
sample(y, 1) # Sortear 1 dos nomes
|
923e71ecb5442b35f016e41e4d34eb4d219b3252
|
eaed73b0d58a73c9db291bc758ad24c530a52a66
|
/Scripts/Processing/Seed-Carryover-Clean.R
|
13d032152a20b3e4f130042923e39a5f2f85d132
|
[] |
no_license
|
marinalaforgia/McL_Climate-Competition
|
dc3a9d62cb55f278dab2f37dbebbccace9afb4d6
|
89772f72738828527e7752b1c1c8f766816227b4
|
refs/heads/master
| 2020-04-21T18:45:37.341188
| 2020-02-06T22:20:24
| 2020-02-06T22:20:24
| 169,782,612
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,900
|
r
|
Seed-Carryover-Clean.R
|
# Script for Seed Carryover Data Analysis
rm(list=ls())
#### Load Libraries ####
library(plyr)
library(lme4)
library(Rmisc)
library(ggplot2)
library(pscl)
### Read in Data ####
sb16 <- read.csv("Data/Seed bag/Seed_bags_2016.csv")
sb17 <- read.csv("Data/Seed bag/Seed_bags_2017.csv")
treat <- read.csv("Data/Setup/Marina-Treatment-30.csv")
added <- read.csv("Data/Setup/Exp-Seed-Species.csv")
viab <- read.csv("Data/Viability/Viability-Overall.csv") # seed viability data
trait.w <- read.csv("Data/Post-Processing/final-traits-w.csv")
trait.w <- trait.w[,-7]
#PCA.t <- prcomp(trait.w[,c(15,17)], scale = T) # greenhouse trait
PCA.t <- prcomp(trait.w[,c(7,8)], scale = T)
biplot(PCA.t)
summary(PCA.t)
trait.w$PC1 <- PCA.t$x[,1]
colnames(sb17)[3] <- "n.viable"
sb16$Year <- 2016
sb17$Year <- 2017
# 2016 viability
added16 <- filter(added, Species %in% unique(sb17$Species), exp.year == 2016)[,c(2,4)] # avg number of seed added per subplot in 2016
viab16 <- viab[c(1:5), c(1,9)] # viability data
added16$Species <- revalue(added16$Species, c("PLER" = "Plantago erecta", "AGHE" = "Agoseris heterophylla", "LACA" = "Lasthenia californica", "CLPU" = "Clarkia purpurea", "HECO" = "Hemizonia congesta", "CAPA" = "Calycadenia pauciflora", "AVFA" = "Avena fatua", "BRHO" = "Bromus hordeaceus", "TACA" = "Taeniatherum caput-medusae", "LOMU" = "Lolium multiflorum"))
added16 <- merge(added16, viab16, by = "Species")
added16$viable <- added16$avg.num.per.sub*added16$p.viable
added16$viable <- as.integer(added16$viable)
added16$Year <- 2016
sb16$Species <- revalue(sb16$Species, c("AGHE" = "Agoseris heterophylla", "CLPU" = "Clarkia purpurea", "LACA" = "Lasthenia californica", "PLER" = "Plantago erecta", "HECO" = "Hemizonia congesta"))
sb16 <- merge(sb16, added16[,c(1,4,5)], by = c("Species", "Year"))
# 2017 viability
added17 <- filter(added, Species %in% unique(sb17$Species), exp.year == 2017)[,c(2,4)] # avg number of seed added per subplot in 2017
added17$Species <- revalue(added17$Species, c("AGHE" = "Agoseris heterophylla", "CLPU" = "Clarkia purpurea", "LACA" = "Lasthenia californica", "PLER" = "Plantago erecta", "HECO" = "Hemizonia congesta", "CAPA" = "Calycadenia pauciflora"))
viab17 <- viab[c(1:3,7,8,12), c(1,9)]
added17 <- merge(added17, viab17, by = "Species")
added17$viable <- added17$avg.num.per.sub*added17$p.viable
added17$viable <- as.integer(added17$viable)
added17$Year <- 2017
sb17$Species <- revalue(sb17$Species, c("AGHE" = "Agoseris heterophylla", "CLPU" = "Clarkia purpurea", "LACA" = "Lasthenia californica", "PLER" = "Plantago erecta", "HECO" = "Hemizonia congesta", "CAPA" = "Calycadenia pauciflora"))
sb17 <- merge(sb17, added17[,c(1,4,5)], by = c("Species", "Year"))
sb <- rbind(sb16, sb17)
sb <- merge(sb, unique(treat[,c(1,3)]), by = "Plot", all.y = F)
colnames(sb)[2] <- "Species_Name"
colnames(sb)[4] <- "Count"
sb$avg.num.per.sub <- ifelse(sb$Count > sb$viable, sb$Count, sb$viable) # cap at high
sb$p.surv <- sb$Count/sb$avg.num.per.sub
sb <- merge(sb, trait.w, by.x = "Species_Name", by.y = "Species")
write.table(sb, "Data/Cleaned Data for Analysis/seed-carryover-plot.csv", sep = ",", row.names = F)
#### Model: traits on seed carryover ####
sb$Year <- as.factor(sb$Year)
sb.sum <- ddply(sb, .(Species_Name, PC1), summarize, Count = sum(Count), avg.num.per.sub = sum(avg.num.per.sub))
sb.sum$p.surv <- sb.sum$Count/sb.sum$avg.num.per.sub
m.sb.t <- glm(cbind(Count, avg.num.per.sub - Count) ~ PC1, family = binomial, data = sb)
plot(fitted(m.sb.t), resid(m.sb.t))
qqnorm(resid(m.sb.t))
qqline(resid(m.sb.t), col = 2,lwd=2,lty=2)
summary(m.sb.t)
m.sb.t <- lm(p.surv ~ PC1, data = sb.sum)
plot(fitted(m.sb.t), resid(m.sb.t))
qqnorm(resid(m.sb.t))
qqline(resid(m.sb.t), col = 2,lwd=2,lty=2)
summary(m.sb.t) # using percentage gives more what i would expect
m.sb.t <- glmer(cbind(Count, avg.num.per.sub-Count) ~ PC1 + (1|Year) + (1|Plot), family = binomial, data = sb)
plot(fitted(m.sb.t), resid(m.sb.t))
qqnorm(resid(m.sb.t))
qqline(resid(m.sb.t), col = 2,lwd=2,lty=2)
summary(m.sb.t)
ggplot(sb.sum, aes(x = PC1, y = Count/avg.num.per.sub)) +
geom_point() +
geom_smooth(method = "lm", se = F) +
theme_classic() +
geom_text(aes(label = Species_Name), hjust = .5, vjust = .5) +
labs(x = "Drought tolerance (high to low)", y = "Seed Carryover")# noooo
sb$p.surv <- sb$Count/sb$avg.num.per.sub
sb.summary <- summarySE(sb, measurevar = "p.surv", groupvars = c("Species_Name", "PC1"))
ggplot(sb.summary, aes(x = PC1, y = p.surv)) +
geom_point() +
geom_errorbar(aes(ymin = p.surv - se, ymax = p.surv + se), width = 0.02) +
geom_smooth(method = "lm", se = F) +
theme_classic()
#### Model: Year effect on Forbs ####
sb$Year <- as.factor(sb$Year)
m0.nb <- glmer.nb(Count ~ Year + (1|Species_Name), data = sb[sb$Group == "Forb",])
m0.p <- glmer(Count ~ Year + (1|Species_Name), family = poisson, data = sb[sb$Group == "Forb",])
lrtest(m0.nb, m0.p) # neg binom better
plot(fitted(m0.nb), resid(m0.nb))
qqnorm(resid(m0.nb))
qqline(resid(m0.nb), col = 2,lwd=2,lty=2)
summary(m0.nb) # carryover is lower in general in 2017, don't lump together
# Aggregated binomial model
m0.ab <- glmer(cbind(Count, avg.num.per.sub - Count) ~ Year + (1|Species_Name), family = binomial, data = sb[sb$Group == "Forb",])
plot(fitted(m0.ab), resid(m0.ab))
qqnorm(resid(m0.ab))
qqline(resid(m0.ab), col = 2, lwd = 2, lty = 2)
summary(m0.ab)
#### Model: TACA ####
m.TACA.d <- glm(Count ~ Treat.Code, family = poisson, data = sb[sb$Species_Name == "Taeniatherum caput-medusae" & sb$Treat.Code != "W",])
dispersiontest(m.TACA.d, trafo = 1) # not sig greater than 0, stick to poisson
plot(fitted(m.TACA.d), resid(m.TACA.d))
qqnorm(resid(m.TACA.d))
qqline(resid(m.TACA.d), col = 2,lwd=2,lty=2)
summary(m.TACA.d) # no effect of drought on carryover
m.TACA.w <- glm(Count ~ Treat.Code, family = poisson, data = sb[sb$Species_Name == "Taeniatherum caput-medusae" & sb$Treat.Code != "D",])
dispersiontest(m.TACA.w, trafo = 1) # not sig greater than 0, stick to poisson
plot(fitted(m.TACA.w), resid(m.TACA.w))
qqnorm(resid(m.TACA.w))
qqline(resid(m.TACA.w), col = 2,lwd=2,lty=2)
summary(m.TACA.w) # no effect of watering on carryover
# sum TACA carryover across treatments
#### Model: AVFA ####
m.AVFA.d <- glm(Count ~ Treat.Code, family = poisson, data = sb[sb$Species_Name == "Avena fatua" & sb$Treat.Code != "W",])
dispersiontest(m.AVFA.d, trafo = 1) #poisson good
plot(fitted(m.AVFA.d), resid(m.AVFA.d))
qqnorm(resid(m.AVFA.d))
qqline(resid(m.AVFA.d), col = 2,lwd=2,lty=2)
summary(m.AVFA.d) # no effect of drought on carryover
m.AVFA.w <- glm(Count ~ Treat.Code, family = poisson, data = sb[sb$Species_Name == "Avena fatua" & sb$Treat.Code != "D",])
dispersiontest(m.AVFA.w, trafo = 1) #poisson good
plot(fitted(m.AVFA.w), resid(m.AVFA.w))
qqnorm(resid(m.AVFA.w))
qqline(resid(m.AVFA.w), col = 2,lwd=2,lty=2)
summary(m.AVFA.w) # no effect of watering on carryover
# sum AVFA carryover across treatments
#### Model: BRHO ####
m.BRHO.d <- glm(Count ~ Treat.Code, family = poisson, data = sb[sb$Species_Name == "Bromus hordeaceus" & sb$Treat.Code != "W",])
plot(fitted(m.BRHO.d), resid(m.BRHO.d))
qqnorm(resid(m.BRHO.d))
qqline(resid(m.BRHO.d), col = 2,lwd=2,lty=2)
summary(m.BRHO.d) # no effect of drought on carryover
m.BRHO.w <- glm(Count ~ Treat.Code, family = poisson, data = sb[sb$Species_Name == "Bromus hordeaceus" & sb$Treat.Code != "D",])
plot(fitted(m.BRHO.w), resid(m.BRHO.w))
qqnorm(resid(m.BRHO.w))
qqline(resid(m.BRHO.w), col = 2,lwd=2,lty=2)
summary(m.BRHO.w) # no effect of watering on carryover
# sum BRHO carryover across treatments
#### Model: LOMU ####
m.LOMU.d <- glm(Count ~ Treat.Code, family = poisson, data = sb[sb$Species_Name == "Lolium multiflorum" & sb$Treat.Code != "W",])
dispersiontest(m.LOMU.d, trafo = 1) # overdispersion present, use neg. bin model
m.LOMU.d <- glm.nb(Count ~ Treat.Code, data = sb[sb$Species_Name == "Lolium multiflorum" & sb$Treat.Code != "W",])
plot(fitted(m.LOMU.d), resid(m.LOMU.d))
qqnorm(resid(m.LOMU.d))
qqline(resid(m.LOMU.d), col = 2,lwd=2,lty=2)
summary(m.LOMU.d) # no effect of drought on carryover
m.LOMU.w.nb <- glm.nb(Count ~ Treat.Code, data = sb[sb$Species_Name == "Lolium multiflorum" & sb$Treat.Code != "D",])
m.LOMU.w <- glm(Count ~ Treat.Code, family = poisson, data = sb[sb$Species_Name == "Lolium multiflorum" & sb$Treat.Code != "D",])
dispersiontest(m.LOMU.w, trafo = 1) # overdispersion present, use neg. bin model
plot(fitted(m.LOMU.w.nb), resid(m.LOMU.w.nb))
qqnorm(resid(m.LOMU.w.nb))
qqline(resid(m.LOMU.w.nb), col = 2,lwd=2,lty=2)
summary(m.LOMU.w.nb) # no effect of watering on carryover
# sum LOMU carryover across treatments
#### Model: Differences in grass carryover ####
# If we let intercept vary by species, that parameter should absorb the differences in seed added per species, no?
m.grass.w.nb <- glmer.nb(Count ~ Treat.Code + (1|Species_Name), data = sb[sb$Group == "Grass" & sb$Treat.Code != "D",])
m.grass.w <- glmer(Count ~ Treat.Code + (1|Species_Name), family = poisson, data = sb[sb$Group == "Grass" & sb$Treat.Code != "D",])
lrtest(m.grass.w.nb, m.grass.w) # nb model better
plot(fitted(m.grass.w.nb), resid(m.grass.w.nb))
qqnorm(resid(m.grass.w.nb))
qqline(resid(m.grass.w.nb), col = 2,lwd=2,lty=2)
summary(m.grass.w.nb) # no effect of watering on carryover
# aggregated binomial model better?
# Watering
m.grass.w.ab <- glmer(cbind(Count, avg.num.per.sub - Count) ~ Treat.Code + (1|Species_Name), family = binomial, data = sb[sb$Group == "Grass" & sb$Treat.Code != "D",])
plot(fitted(m.grass.w.ab), resid(m.grass.w.ab))
qqnorm(resid(m.grass.w.ab))
qqline(resid(m.grass.w.ab), col = 2,lwd=2,lty=2)
summary(m.grass.w.ab)
# Drought
m.grass.d.ab <- glmer(cbind(Count, avg.num.per.sub - Count) ~ Treat.Code + (1|Species_Name), family = binomial, data = sb[sb$Group == "Grass" & sb$Treat.Code != "W",])
plot(fitted(m.grass.d.ab), resid(m.grass.d.ab))
qqnorm(resid(m.grass.d.ab))
qqline(resid(m.grass.d.ab), col = 2,lwd=2,lty=2)
summary(m.grass.d.ab) # no species level effects but drought marginally increased carryover and watering marginally decreased carryover
# marginal effects overall, lump together
sb.sum.grass <- summarySE(sb[sb$Group == "Grass",], measurevar = "p.surv", groupvars = "Species_Name")
#### Model 1: Drought effect on seed carryover ####
m1 <- glmer(cbind(Count, avg.num.per.sub - Count) ~ Treat.Code + (1|Year) + (1|Species_Name), family = binomial, data = sb)
plot(fitted(m1), resid(m1))
qqnorm(resid(m1))
qqline(resid(m1), col = 2, lwd = 2, lty = 2)
summary(m1) # no effect of drought or of watering
#### Model 2: Watering effect on seed carryover ####
m2 <- glmer(cbind(Count, avg.num.per.sub - Count) ~ Treat.Code + (1|Year) + (1 + Treat.Code|Species_Name), family = binomial, data = sb[sb$Treat.Code != "D" & sb$Group == "Forb",])
plot(fitted(m2), resid(m2))
qqnorm(resid(m2))
qqline(resid(m2), col = 2,lwd=2,lty=2)
summary(m2) # no effect of water
ranef.m1 <- data.frame(Species_Name = row.names(ranef(m1)[[1]]), Diff.avg = ranef(m1)[[1]][,1])
m1.trait <- merge(sb.summary, ranef.m1, by = "Species_Name")
ggplot(m1.trait, aes(x = PC1, y = Diff.avg)) +
geom_smooth(method = "lm", se = F) +
theme_classic() +
geom_text(aes(label = Species_Name), hjust = .5, vjust = .5)
#### Model: HECO ####
hist(sb[sb$Species_Name == "Hemizonia congesta",]$Count)
m4.h.d <- glmer.nb(Count ~ Treat.Code + (1|Year), data = sb[sb$Species == "Hemizonia congesta",])
#m4.h.d2 <- glmer(Count ~ Treat.Code + (1|Year), family = poisson, data = sb[sb$Species_Name == "Hemizonia congesta" & sb$Treat.Code != "W",])
#anova(m4.h.d2, m4.h.d) # neg binomial better
plot(fitted(m4.h.d), resid(m4.h.d))
qqnorm(resid(m4.h.d))
qqline(resid(m4.h.d), col = 2, lwd = 2, lty = 2)
summary(m4.h.d)
# Watering
#m4.h.w.2 <- glmer(Count ~ Treat.Code + (1|Year), family = "poisson", data = sb[sb$Species_Name == "Hemizonia congesta" & sb$Treat.Code != "D",])
m4.h.w <- glmer.nb(Count ~ Treat.Code + (1|Year), data = sb[sb$Species_Name == "Hemizonia congesta" & sb$Treat.Code != "D",])
#anova(m4.h.w.2, m4.h.w) # negbinom better
plot(fitted(m4.h.w), resid(m4.h.w))
qqnorm(resid(m4.h.w))
qqline(resid(m4.h.w), col = 2,lwd=2,lty=2)
summary(m4.h.w)
#### Model: AGHE ####
hist(sb[sb$Species_Name == "Agoseris heterophylla",]$Count)
# Drought
m4.a.d <- glmer.nb(Count ~ Treat.Code + (1|Year), data = sb[sb$Species_Name == "Agoseris heterophylla" & sb$Treat.Code != "W",])
m4.a.d.2 <- glmer(Count ~ Treat.Code + (1|Year), family = "poisson", data = sb[sb$Species_Name == "Agoseris heterophylla" & sb$Treat.Code != "W",])
anova(m4.a.d.2, m4.a.d) #neg binom better
plot(fitted(m4.a.d), resid(m4.a.d))
qqnorm(resid(m4.a.d))
qqline(resid(m4.a.d), col = 2, lwd = 2, lty = 2)
summary(m4.a.d)
# Watering
m4.a.w.2 <- glmer(Count ~ Treat.Code + (1|Year), family = "poisson", data = sb[sb$Species_Name == "Agoseris heterophylla" & sb$Treat.Code != "D",])
m4.a.w <- glmer.nb(Count ~ Treat.Code + (1|Year), data = sb[sb$Species_Name == "Agoseris heterophylla" & sb$Treat.Code != "D",])
#anova(m4.a.w.2, m4.a.w) #neg binom better
plot(fitted(m4.a.w), resid(m4.a.w))
qqnorm(resid(m4.a.w))
qqline(resid(m4.a.w), col = 2,lwd=2,lty=2)
summary(m4.a.w)
#### Model: PLER ####
hist(sb[sb$Species_Name == "Plantago erecta",]$Count)
# Drought
m4.p.d <- glmer.nb(Count ~ Treat.Code + (1|Year), data = sb[sb$Species_Name == "Plantago erecta" & sb$Treat.Code != "W",])
#m4.p.d.2 <- glmer(Count ~ Treat.Code + (1|Year), family = "poisson", data = sb[sb$Species_Name == "Plantago erecta" & sb$Treat.Code != "W",])
#anova(m4.p.d.2, m4.p.d) #neg binom better
plot(fitted(m4.p.d), resid(m4.p.d))
qqnorm(resid(m4.p.d))
qqline(resid(m4.p.d), col = 2, lwd = 2, lty = 2)
summary(m4.p.d)
# Watering
#m4.p.w.2 <- glmer(Count ~ Treat.Code + (1|Year), family = "poisson", data = sb[sb$Species_Name == "Plantago erecta" & sb$Treat.Code != "D",])
m4.p.w <- glmer.nb(Count ~ Treat.Code + (1|Year), data = sb[sb$Species_Name == "Plantago erecta" & sb$Treat.Code != "D",])
#anova(m4.p.w.2, m4.p.w) #neg binom better
plot(fitted(m4.p.w), resid(m4.p.w))
qqnorm(resid(m4.p.w))
qqline(resid(m4.p.w), col = 2,lwd=2,lty=2)
summary(m4.p.w)
#### Model: CLPU ####
hist(sb[sb$Species_Name == "Clarkia purpurea",]$Count)
# Drought
m4.cl.d <- glmer.nb(Count ~ Treat.Code + (1|Year), data = sb[sb$Species_Name == "Clarkia purpurea" & sb$Treat.Code != "W",])
#m4.cl.d.2 <- glmer(Count ~ Treat.Code + (1|Year), family = "poisson", data = sb[sb$Species_Name == "Clarkia purpurea" & sb$Treat.Code != "W",])
#anova(m4.cl.d.2, m4.cl.d) #neg binom better
plot(fitted(m4.cl.d), resid(m4.cl.d))
qqnorm(resid(m4.cl.d))
qqline(resid(m4.cl.d), col = 2, lwd = 2, lty = 2)
summary(m4.cl.d)
# Watering
#m4.cl.w.2 <- glmer(Count ~ Treat.Code + (1|Year), family = "poisson", data = sb[sb$Species_Name == "Clarkia purpurea" & sb$Treat.Code != "D",])
m4.cl.w <- glmer.nb(Count ~ Treat.Code + (1|Year), data = sb[sb$Species_Name == "Clarkia purpurea" & sb$Treat.Code != "D",])
#anova(m4.cl.w.2, m4.cl.w) #neg binom better
plot(fitted(m4.cl.w), resid(m4.cl.w))
qqnorm(resid(m4.cl.w))
qqline(resid(m4.cl.w), col = 2,lwd=2,lty=2)
summary(m4.cl.w)
#### Model: LACA ####
hist(sb[sb$Species_Name == "Lasthenia californica",]$Count)
# Drought
m4.l.d <- glmer.nb(Count ~ Treat.Code + (1|Year), data = sb[sb$Species_Name == "Lasthenia californica" & sb$Treat.Code != "W",])
# m4.l.d.2 <- glmer(Count ~ Treat.Code + (1|Year), family = "poisson", data = sb[sb$Species_Name == "Lasthenia californica" & sb$Treat.Code != "W",])
# anova(m4.l.d.2, m4.l.d) #neg binom better
plot(fitted(m4.l.d), resid(m4.l.d))
qqnorm(resid(m4.l.d))
qqline(resid(m4.l.d), col = 2, lwd = 2, lty = 2)
summary(m4.l.d)
# Watering
#m4.l.w.2 <- glmer(Count ~ Treat.Code + (1|Year), family = "poisson", data = sb[sb$Species_Name == "Lasthenia californica" & sb$Treat.Code != "D",])
m4.l.w <- glmer.nb(Count ~ Treat.Code + (1|Year), data = sb[sb$Species_Name == "Lasthenia californica" & sb$Treat.Code != "D",])
#anova(m4.l.w.2, m4.l.w) #neg binom better
plot(fitted(m4.l.w), resid(m4.l.w))
qqnorm(resid(m4.l.w))
qqline(resid(m4.l.w), col = 2,lwd=2,lty=2)
summary(m4.l.w)
#### Model: CAPA ####
# Drought
m4.ca.d <- glm.nb(Count ~ Treat.Code, data = sb[sb$Species_Name == "Calycadenia pauciflora" & sb$Treat.Code != "W",])
#m4.ca.d.2 <- glm(Count ~ Treat.Code, family = "poisson", data = sb[sb$Species_Name == "Calycadenia pauciflora" & sb$Treat.Code != "W",])
#dispersiontest(m4.ca.d.2, trafo = 1) #neg binom better
plot(fitted(m4.ca.d), resid(m4.ca.d))
qqnorm(resid(m4.ca.d))
qqline(resid(m4.ca.d), col = 2, lwd = 2, lty = 2)
summary(m4.ca.d)
# Watering
#m4.ca.w.2 <- glm(Count ~ Treat.Code, family = "poisson", data = sb[sb$Species_Name == "Calycadenia pauciflora" & sb$Treat.Code != "D",])
m4.ca.w <- glm.nb(Count ~ Treat.Code, data = sb[sb$Species_Name == "Calycadenia pauciflora" & sb$Treat.Code != "D",])
#dispersiontest(m4.ca.w.2, trafo = 1) #negbinom better
plot(fitted(m4.ca.w), resid(m4.ca.w))
qqnorm(resid(m4.ca.w))
qqline(resid(m4.ca.w), col = 2,lwd=2,lty=2)
summary(m4.ca.w)
#### Graphs and summaries ####
sb.sum <- summarySE(sb, measurevar = "p.surv", groupvars = c("Species_Name"))
write.table(sb.sum, "Seed-survival-soil.csv", row.names = F, sep = ",")
sb.sum.yr <- summarySE(sb, measurevar = "p.surv", groupvars = c("Species_Name", "Year"))
#### extra code ####
# sb.sum$Treat.Code <- revalue(sb.sum$Treat.Code, c("C" = "Control", "D" = "Drought", "W" = "Watered"))
#
# sb.sum$Species_Name <- factor(sb.sum$Species_Name, levels = c("Lasthenia californica", "Clarkia purpurea", "Agoseris heterophylla","Hemizonia congesta","Plantago erecta"))
#
# # Bar plot, species by treatment
# graph1a <- ggplot(sb.sum, aes(x = Treat.Code, y = Count, fill = Treat.Code)) +
# geom_bar(position = "dodge", stat = "identity", color = "black") +
# geom_errorbar(aes(ymin = Count - se, ymax = Count + se), width = 0.5, size = 0.9, position = position_dodge(width = 0.9)) +
# facet_wrap(~Species_Name) +
# theme_bw() +
# theme(legend.title = element_blank(),
# axis.text=element_text(size = 20),
# plot.title = element_text(size = 30, face = "bold", margin = margin(0, 0, 20, 0)),
# axis.title = element_text(size = 30),
# strip.text = element_text(size = 25),
# axis.title.y = element_text(margin = margin(0, 20, 0, 0)),
# axis.title.x = element_blank(),
# legend.position="none") +
# ylim(0,100) +
# labs(y = "Number of Viable Seeds")
#
# ggsave(graph1a, filename = "barplot-sppxtrt.png", path = plots, width = 12, height = 8, dpi = 300)
#
# graph1b <- ggplot(sb.sum, aes(x = Treat.Code, y = Count, fill = Treat.Code)) +
# geom_bar(position = "dodge", stat = "identity", color = "black") +
# geom_errorbar(aes(ymin = Count - se, ymax = Count + se), width = 0.5, size = 0.9, position = position_dodge(width = 0.9)) +
# facet_wrap(~Species_Name, nrow = 1) +
# theme_bw() +
# theme(legend.title = element_blank(),
# axis.text=element_text(size = 15),
# plot.title = element_text(size = 30, face = "bold", margin = margin(0, 0, 20, 0)),
# axis.title = element_text(size = 18),
# strip.text = element_text(size = 15),
# axis.title.y = element_text(margin = margin(0, 20, 0, 0)),
# axis.title.x = element_blank(),
# legend.position="none") +
# ylim(0,100) +
# labs(y = "Number of Viable Seeds")
#
# ggsave(graph1b, filename = "barplot-sppxtrt.png", path = plots, width = 15, height = 3, dpi = 300)
#
# # Hypothetical Graph
# st.c <- data.frame(Count = 30, Treat = "Control", Type = "Stress Tolerator")
# sa.c <- data.frame(Count = 60, Treat = "Control", Type = "Stress Avoider")
# st.d <- data.frame(Count = 30, Treat = "Drought", Type = "Stress Tolerator")
# sa.d <- data.frame(Count = 70, Treat = "Drought", Type = "Stress Avoider")
# st.w <- data.frame(Count = 20, Treat = "Watered", Type = "Stress Tolerator")
# sa.w <- data.frame(Count = 50, Treat = "Watered", Type = "Stress Avoider")
# hyp <- rbind(st.c, sa.c, st.d, st.w, sa.d, sa.w)
#
# graph2 <- ggplot(hyp, aes(x = Treat, y = Count, fill = Type)) +
# geom_bar(position = "dodge", stat = "identity", color = "black") +
# theme_bw() +
# theme(legend.title = element_blank(),
# axis.text=element_text(size = 20),
# plot.title = element_text(size = 30, face = "bold", margin = margin(0, 0, 20, 0)),
# axis.title = element_text(size = 30),
# strip.text = element_text(size = 25),
# axis.title.y = element_text(margin = margin(0, 20, 0, 0)),
# axis.title.x = element_blank(),
# legend.text = element_text(size = 20),
# legend.key.size = unit(3, 'lines')) +
# ylim(0,100) +
# labs(y = "Number of Viable Seeds", title = "Hypothetical Results")
#
# ggsave(graph2, filename = "hypbarplot-typxtrt.png", path = plots, width = 12, height = 8, dpi = 300)
#
# # treatment vs. average count
# graph3 <- ggplot(sb.sum, aes(x = Treat.Code, y = Count)) +
# geom_point(aes(color = Species_Name, shape = Species_Name), size = 6) +
# geom_errorbar(aes(ymin = Count - se, ymax = Count + se), width = 0.1, size = 0.6) +
# theme_bw() +
# theme(legend.title = element_blank(),
# legend.text = element_text(size = 20),
# legend.key.size = unit(3, 'lines'),
# axis.text=element_text(size = 20),
# plot.title = element_text(size = 30, face = "bold", margin = margin(0, 0, 20, 0)),
# axis.title = element_text(size = 30),
# strip.text = element_text(size = 25),
# axis.title.y = element_text(margin = margin(0, 20, 0, 0)),
# axis.title.x = element_blank()) +
# ylim(0, 100) +
# labs(y = "Number of Viable Seeds")
#
# ggsave(graph3, filename = "dotplot-sppxtrt.png", path = plots, width = 12, height = 8, dpi = 300)
#
|
1fabb072f96db8bc176bfd886d8d9cb3deda44a4
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/eseis/R/signal_motion.R
|
645286df9f9b67c75f120cad4a3caa27951cf7ef
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,910
|
r
|
signal_motion.R
|
#' Calculate particle motion parameters
#'
#' The function calculates from a data set of three seismic
#' components of the same signal the following particle motion
#' paramters using a moving window approach: horizontal-vertical
#' eigenvalue ratio, azimuth and inclination.
#'
#' The function code is loosely based on the function GAZI() from
#' the package RSEIS with removal of unnecessary content and updated
#' or rewritten loop functionality.
#'
#' @param data \code{List}, \code{data frame} or \code{matrix}, seismic
#' componenents to be processed. If \code{data} is a matrix, the components
#' must be organised as columns. Also, \code{data} can be a list of
#' \code{eseis} objects.
#'
#' @param time \code{POSIXct} vector, time vector corresponding to the
#' seismic signal components. If omitted, a synthetic time vector will
#' be generated. If omitted, the sampling period (\code{dt}) must be
#' provided.
#'
#' @param dt \code{Numeric} value, sampling period. Only needed if
#' \code{time} is omitted or if \code{data} is no \code{eseis} object.
#'
#' @param window \code{Numeric} value, time window length (given as
#' number of samples) used to calculate the particle motion parameters.
#' If value is even, it will be set to the next smaller odd value. If
#' omitted, the window size is set to 1 percent of the time series length by
#' default.
#'
#' @param step \code{Numeric} value, step size (given as number of samples),
#' by which the window is shifted over the data set. If omitted, the step
#' size is set to 50 percent of the window size by default.
#'
#' @param order \code{Character} value, order of the seismic components.
#' Describtion must contain the letters \code{"x"},\code{"y"} and
#' \code{"z"} in the order according to the input data set. Default is
#' \code{"xyz"} (EW-NS-vertical).
#'
#' @return A List object with eigenvalue ratios (\code{eigen}),
#' azimuth (\code{azimuth}) and inclination (\code{inclination}) as well
#' as the corresponding time vector for these values.
#'
#' @author Michael Dietze
#'
#' @keywords eseis
#'
#' @examples
#' ## load example data set
#' data(earthquake)
#'
#' ## filter seismic signals
#' s <- eseis::signal_filter(data = s,
#' dt = 1/200,
#' f = c(1, 3))
#'
#' ## integrate signals to get displacement
#' s_d <- eseis::signal_integrate(data = s, dt = 1/200)
#'
#' ## calculate particle motion parameters
#' pm <- signal_motion(data = s_d,
#' time = t,
#' dt = 1 / 200,
#' window = 100,
#' step = 10)
#'
#' ## plot function output
#' par_original <- par(no.readonly = TRUE)
#' par(mfcol = c(2, 1))
#'
#' plot(x = t, y = s$BHZ, type = "l")
#' plot(x = pm$time, y = pm$azimuth, type = "l")
#'
#' par(par_original)
#'
#' @export signal_motion
#'
signal_motion <- function(
data,
time,
dt,
window,
step,
order = "xyz"
) {
## check/set dt
if(missing(dt) == TRUE && class(data[[1]])[1] != "eseis") {
## check/set time vector
if(missing(time) == TRUE) {
if(missing(dt) == TRUE) {
stop("Neither time nor dt provided!")
} else {
time <- seq(from = 0,
by = dt,
length.out = nrow(data))
}
}
} else if(missing(dt) == TRUE){
dt <- NULL
time <- NULL
}
## check/set window size
if(missing(window) == TRUE) {
if(class(data[[1]])[1] == "eseis") {
n <- data[[1]]$meta$n
} else {
n <- nrow(data)
}
window <- round(x = nrow(data) * 0.01,
digits = 0)
}
## optionally convert window size to even number
if(window %% 2 != 0) {
window <- window - 1
}
## check/set step size
if(missing(step) == TRUE) {
step <- round(x = window * 0.5,
digits = 0)
}
## get start time
eseis_t_0 <- Sys.time()
## collect function arguments
eseis_arguments <- list(data = "",
time = time,
dt = dt,
window = window,
step = step,
order = order)
## homogenise data structure
if(class(data[[1]])[1] == "eseis") {
## set eseis flag
eseis_class <- TRUE
## store initial object
eseis_data <- data
## extract signal vector
data <- lapply(X = data, FUN = function(X) {
X$signal
})
## update dt
dt <- eseis_data[[1]]$meta$dt
## generate time vector
time <- seq(from = eseis_data[[1]]$meta$starttime,
by = eseis_data[[1]]$meta$dt,
length.out = eseis_data[[1]]$meta$n)
} else {
## set eseis flag
eseis_class <- FALSE
}
## homogenise data structure
if(class(data)[1] == "list") {
data <- do.call(cbind, data)
}
data <- as.data.frame(x = data)
## optionally update component order
component_ID <- strsplit(x = order, split = "")[[1]]
data <- data[,order(component_ID)]
## define window indices
window_left <- seq(from = 1,
to = nrow(data) - window,
by = step)
window_right <- seq(from = window,
to = nrow(data),
by = step)
## define output variables
time_i <- time[(window_left + window_right) / 2]
eig_ratio_i <- numeric(length = length(window_left))
azimuth_i <- numeric(length = length(window_left))
inclination_i <- numeric(length = length(window_left))
## do window-based calculus
for(i in 1:length(window_left)) {
## isolate data within window
data_i <- data[window_left[i]:window_right[i],]
## calculate covariance matrix
cov_i <- stats::var(x = data_i)
## calcualate eigen space
eig_i <- eigen(x = cov_i, symmetric = TRUE)
## calculate eigenvalue ratio
eig_ratio_i[i] <- 1 - ((eig_i$values[2] + eig_i$values[3]) /
(2 * eig_i$values[1]))
## calculate azimuth
azimuth_i[i] <- 180 / pi * atan2(eig_i$vectors[2,1],
eig_i$vectors[3,1])
## calculate inclination
inclination_i[i] <- abs(180 / pi * atan2(eig_i$vectors[1,1],
sqrt(eig_i$vectors[2,1]^2 +
eig_i$vectors[3,1]^2)))
}
## create furntion output
data_out <- list(time = time_i,
eigen = eig_ratio_i,
azimuth = azimuth_i,
inclination = inclination_i)
## optionally rebuild eseis object
if(eseis_class == TRUE) {
## assign aggregated signal vector
eseis_data <- list(time = time_i,
eigen = eig_ratio_i,
azimuth = azimuth_i,
inclination = inclination_i,
history = eseis_data[[1]]$history)
## calculate function call duration
eseis_duration <- as.numeric(difftime(time1 = Sys.time(),
time2 = eseis_t_0,
units = "secs"))
## update object history
eseis_data$history[[length(eseis_data$history) + 1]] <-
list(time = Sys.time(),
call = "signal_motion()",
arguments = eseis_arguments,
duration = eseis_duration)
names(eseis_data$history)[length(eseis_data$history)] <-
as.character(length(eseis_data$history))
## set S3 class name
class(eseis_data)[1] <- "eseis"
## assign eseis object to output data set
data_out <- eseis_data
}
## return function output
return(data_out)
}
|
d4fca4070ad516773af77b7bbaeb0c4882ef107d
|
1f5b0fe2b9ba34c0c04b072d794f644c767cd085
|
/man/ridge_reg.Rd
|
a4c3bed95afe3fb763ab8e81577b3a76da60f7a0
|
[] |
no_license
|
tianqizhao1203/bis557
|
2e9b2ffca3615dbca214a49213657fb94228f040
|
13d436406e9f3e719b7b851446c75527c8de3a70
|
refs/heads/master
| 2020-03-28T01:44:01.555023
| 2018-12-19T13:24:14
| 2018-12-19T13:24:14
| 147,527,066
| 0
| 0
| null | 2018-09-05T14:01:29
| 2018-09-05T14:01:28
| null |
UTF-8
|
R
| false
| false
| 850
|
rd
|
ridge_reg.Rd
|
\name{ridge_reg}
\alias{ridge_reg}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Ridge Regression via SVD
}
\description{
Use SVD to estimate the coefficients in ridge regression.
}
\usage{
ridge_reg(formula, lambda, data)
}
\arguments{
\item{formula}{Formula for ridge regression.}
\item{lambda}{Penalty on beta coefficients.}
\item{data}{Dataset for ridge regrssion.}
}
\value{
\item{coefficients }{Estimated betas for ridge regression}
\item{lambda }{Lambda used for calculating betas}
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
\dontrun{ridge_reg(Sepal.Length ~., 1.12, iris)}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
d4fedc0e76063d03ce19d09a84782086321e4a30
|
1cf1f5e094abdca9cf4222aeeaf393154d5b091d
|
/chap2/RINT217.R
|
1247274240969c0e83ac55d55d68fd737e4efe50
|
[] |
no_license
|
Daigo1009/r_introduction
|
a0c14d5ccc1395118ca2cc1fe6141a743f3eabce
|
e9983214ce3b61a8197ac003f1ee62fd1b9c0943
|
refs/heads/master
| 2022-04-19T16:32:38.699232
| 2013-10-08T13:52:12
| 2013-10-08T13:52:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 124
|
r
|
RINT217.R
|
x = runif(16,0,1)
x
y = sort(x)
y
z = sort(x,decreasing = TRUE)
z
# 順位を6番目まで抜き出す
yy = y[y<=y[6]]
yy
|
1d6a7a9a797bcb8afbdd99f4c5edd5fc0c67a4b5
|
2c3fdff1e610c107fc3eea8137b58928725dcec9
|
/R/tables-supplement.R
|
91992692d6c8f612499aeebdd0ea304559164be8
|
[] |
no_license
|
aloy/sociology_chapter
|
e460d3d60c9d2c161f35c65c50a6d49a20ec4546
|
86f590aa23c8d4ed25fa093a368e82e8be4bff27
|
refs/heads/master
| 2020-04-12T01:29:49.005056
| 2016-12-06T22:56:23
| 2016-12-06T22:56:23
| 9,603,647
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,409
|
r
|
tables-supplement.R
|
require(plyr)
require(magrittr)
require(vinference)
require(reshape2)
require(xtable)
set.seed(20161001)
turk <- read.csv("../data/study.csv")
turk <- turk %>% dplyr::mutate(
correct = response==data_location
)
lps <- ddply(turk, .(lineup), summarise,
correct=sum(correct*weight),
num=sum(weight)
)
lps$pvals <- unlist(llply(1:nrow(lps), function(i) {
correct <- floor(lps$correct[i])
n <- lps$num[i]
vinference:::pV(correct, n, m=20, scenario=3)
}))
lps$signif <- lps$pvals < 0.05
lps$rep <- as.numeric(gsub(".*-([0-9])","\\1",as.character(lps$lineup)))
lps$exp <- gsub("(.*)-[0-9]","\\1",as.character(lps$lineup))
pval.overall <- ddply(lps, .(exp), function(x) {
res <- vinference:::scenario3(N=10000, sum(x$num))
dres <- as.data.frame(res)
with(dres, sum(Freq[as.numeric(names(res)) >= sum(x$correct)]))
})$V1
lps$stars <- cut(lps$pvals, breaks=c(0,0.001, 0.01, 0.05, 0.1, 1))
levels(lps$stars) <- c("***", "**", "*", ".", " ")
lps$str <- with(lps, sprintf("%d/%d & \\hspace{-0.1in}%s", floor(correct), round(num), stars))
dt <- dcast(lps, exp~rep, value.var="str")
dt$overall <- ifelse(pval.overall < 10^-4, "$< 10^{-4}$", sprintf("%.4f",pval.overall))
# *********** Table 1 Supplement ********************
print(xtable(dt), sanitize.text.function=function(x)x)
# *********** Table 1 Supplement ********************
turk$choice <- gsub("_.*", "", as.character(turk$reason))
turk$choiceWT <- nchar(turk$choice)
reasons <- dlply(turk, .(lineup, correct), function(x) {
choices <- unlist(strsplit(x$choice, split=""))
weights <- rep(x$weight*1/x$choiceWT, x$choiceWT)
dt <- xtabs(weights~choices)
as.data.frame(dt)
})
dreasons <- ldply(reasons, function(x) x)
dreasons$exp <- gsub("(.*)-[0-9]", "\\1", as.character(dreasons$lineup))
dreasons$pick <- c("null", "data")[dreasons$correct+1]
res <- ddply(dreasons, .(exp, pick, choices), summarise, Freq=sum(Freq))
# probabilities the other way round
qt <- ddply(res, .(exp, choices), transform, perc=Freq/sum(Freq)*100)
qt2 <- dcast(qt, exp+pick~choices, value.var="perc")
names(qt2)[3:7] <- c("Outlier", "Spread", "Trend", "Asymmetry", "Other")
# *********** Table 2 Supplement ********************
print(xtable(subset(qt2, pick=="data")[,-2], digits=c(1,1,1,1,1,1,1)), include.rownames=FALSE, NA.string="0.0")
# *********** Table 2 Supplement ********************
|
d19669e202390f4ddb5ccb5b281443f611a05987
|
4673e7ccf7fe53ff8dacb56453580cdb11075319
|
/scripts/Resumenmunicipios.R
|
73c306edabad41083811fde689a573132c8c2efd
|
[] |
no_license
|
dogomoreno/Covid19-Sonora-Municipios
|
2beb16fed697160b5785c961a6d88d6c6417a7d9
|
440eee0435dba671936194d8927e3dc4db95d690
|
refs/heads/master
| 2023-08-14T22:10:20.303409
| 2021-10-04T06:14:27
| 2021-10-04T06:14:27
| 274,054,270
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,366
|
r
|
Resumenmunicipios.R
|
rm(list=ls())
# Paquetes
library(tidyverse)
library(extrafont)
library(scales)
library(showtext)
library(units)
library(zoo)
library(lubridate)
library("Cairo")
library(directlabels)
library(ggtext)
library(patchwork)
# Función para ejes en números enteros
int_breaks_rounded <- function(x, n = 5) pretty(x, n)[round(pretty(x, n),1) %% 1 == 0]
# Bases municipales
Casos <- read_csv("Bases/Casosdiarios.csv",
col_types = cols(CASOS = col_integer(),
CVEGEO = col_character(), Fecha = col_date(format = "%Y-%m-%d"),
MUNICIPIO = col_character(), NUEVOS = col_integer(), X1 = col_skip()),
locale = locale(encoding = "ISO-8859-1"))
Decesos <- read_csv("Bases/Decesosdiarios.csv",
col_types = cols(DECESOS = col_integer(),
CVEGEO = col_character(), Fecha = col_date(format = "%Y-%m-%d"),
MUNICIPIO = col_character(), NUEVOS = col_integer(), X1 = col_skip()),
locale = locale(encoding = "ISO-8859-1"))
Fechahoy <- paste0("Al reporte del ", day(Sys.Date()), " de ", months.Date(Sys.Date())," de ", year(Sys.Date()))
fuente <- "Elaboración Luis Armando Moreno (@dogomoreno) con información de la Secretaría de Salud del Estado de Sonora\nPor continuidad, la fecha de corte se asume como la del día anterior al reporte. | www.luisarmandomoreno.com"
POBMUN <- read_csv("Bases/POBMUN.csv", col_types = cols(CVEGEO = col_character()),
locale = locale(encoding = "ISO-8859-1"))
temaejes <- theme(axis.line = element_line(linetype = "solid"), plot.margin = margin(10, 25, 10, 25),
plot.title = element_markdown(family = "Lato Black", size = 15),
plot.subtitle = element_text(family = "Lato Light", size = 10, color = "black"), legend.title = element_blank(),
strip.text = element_text(family = "Lato Black", size = 10),
axis.text = element_text(family = "Lato", size =6),
plot.background = element_blank(),
axis.title.x = element_text(family = "Lato Light", size = 8, hjust=1),
axis.title.y = element_text(family = "Lato Light", size = 8, hjust=1),
plot.caption = element_text(family = "Lato", size = 6),
legend.text = element_blank(),
legend.position = "none", plot.title.position = 'plot', plot.caption.position = 'plot')
Casosconfd <-Casos %>% group_by(MUNICIPIO) %>%
rename(Casos.diarios=NUEVOS) %>%
mutate(Casos.media.7d=round(rollmeanr(x=Casos.diarios, 7, fill = NA),1))
Decesosconfd <-Decesos %>% group_by(MUNICIPIO) %>%
rename(Decesos.diarios=NUEVOS) %>%
mutate(Decesos.media.7d=round(rollmeanr(x=Decesos.diarios, 7, fill = NA),1))
CasosDecesos <- Casosconfd %>% left_join(Decesosconfd, by= c("Fecha", "CVEGEO", "MUNICIPIO"))
CasosDecesos$DECESOS[is.na(CasosDecesos$DECESOS)] <- 0
CasosDecesos$Decesos.diarios[is.na(CasosDecesos$Decesos.diarios)] <- 0
plot_municipio <- function(x = "Hermosillo") {
tmp <- CasosDecesos %>%
filter(MUNICIPIO == x)
tmp2 <- tmp %>% filter(Fecha==max(as.Date(Fecha)))
p1 <- ggplot(tmp) +
geom_area(aes(x= Fecha, y= Casos.media.7d), fill= "#58BCBC", alpha=0.3)+
geom_line(aes(x= Fecha, y= Casos.media.7d, color= "Tendencia promedio móvil 7 días"), linetype= "solid", size=.75, arrow=arrow(type="open", length=unit(0.10,"cm")))+
geom_point(aes(x= Fecha, y= Casos.diarios), color = "white", fill= "#01787E", size = 0.9, stroke=0.4, alpha=0.65, shape = 21) +
scale_fill_manual(name="", values= c("Tendencia promedio móvil 7 días" = "#58BCBC", "Casos diarios" = "#01787E")) +
scale_color_manual(name="", values= c("Tendencia promedio móvil 7 días" = "#01787E", "Casos diarios" = "white")) +
scale_y_continuous(expand = c(0, 0), limits = c(0, (max(tmp$Casos.diarios)+5)), breaks = int_breaks_rounded) +
scale_x_date(expand=c(0,0), date_breaks = "1 month", date_labels = "%B", limits=c(as.Date("2020-03-16"), (Sys.Date()+5))) +
theme_bw() + temaejes +
theme(legend.text = element_text(family = "Lato", size = 8), legend.background = element_rect(fill="transparent"),
legend.position = c(0.02,0.95), legend.justification="left", legend.margin=margin(t = 0, unit='cm'),
legend.key = element_rect(fill="transparent")) +
labs(y = NULL,
x = NULL,legend= NULL, title = paste0("<span style = 'color:#01A2AC';>Casos confirmados acumulados: ", prettyNum(as.numeric(max(tmp$CASOS)), big.mark=",", preserve.width="none"),"</span>"),
subtitle= paste0("Casos confirmados hoy: ",tmp2$Casos.diarios), caption =NULL)
p2 <- ggplot(tmp) +
geom_area(aes(x= Fecha, y= Decesos.media.7d), fill= "#D075A3", alpha=0.3)+
geom_line(aes(x= Fecha, y= Decesos.media.7d, color= "Tendencia promedio móvil 7 días"), linetype= "solid", size=.75, arrow=arrow(type="open", length=unit(0.10,"cm")))+
geom_point(aes(x= Fecha, y= Decesos.diarios), color = "white", fill= "#73264D", size = 0.9, stroke=0.4, alpha=0.65, shape = 21) +
scale_fill_manual(name="", values= c("Decesos diarios" = "#73264D", "Tendencia promedio móvil 7 días" = "#D075A3")) +
scale_color_manual(name="", values= c("Decesos diarios" = "white","Tendencia promedio móvil 7 días" = "#73264D")) +
scale_y_continuous(expand = c(0, 0), limits = c(0, (max(tmp$Decesos.diarios)+2)), breaks = int_breaks_rounded) +
scale_x_date(expand=c(0,0), date_breaks = "1 month", date_labels = "%B", limits=c(as.Date("2020-03-16"), (Sys.Date()+5))) +
theme_bw() + temaejes +
theme(legend.text = element_text(family = "Lato", size = 8), legend.background = element_rect(fill="transparent"),
legend.position = c(0.02,0.95), legend.justification="left", legend.margin=margin(t = 0, unit='cm'),
legend.key = element_rect(fill="transparent")) +
labs(y = NULL,
x = NULL,legend= NULL, title = paste0("<span style = 'color:#993366';> Decesos confirmados acumulados: ", prettyNum(as.numeric(max(tmp$DECESOS)), big.mark=",", preserve.width="none"),"</span>"),
subtitle= paste0("Decesos confirmados hoy: ",tmp2$Decesos.diarios), caption =NULL)
patchwork <- (p1 / p2)
p3 <- patchwork + plot_annotation(
title = paste0("<span style = 'font-size:12pt'>Covid-19 en Sonora:</span><br>",x),
subtitle = Fechahoy,
caption = fuente, theme= theme(
plot.title = element_markdown(family = "Lato Black", size = 30),
plot.subtitle = element_text(family = "Lato Light", size = 12, color = "black"),
plot.caption = element_text(family = "Lato", size = 8), plot.title.position = 'plot',
plot.caption.position = 'plot', plot.margin = margin(10, 25, 10, 25),
plot.background = element_rect(fill = "white", color = "white", size = 3)))
ggsave(paste0("municipales/", x,".png"),p3, width = 5 * (16/9), height = 10, type = "cairo", dpi = 400)
}
for (k in unique(CasosDecesos$MUNICIPIO)) {
plot_municipio(k)
}
|
fcad8500f1ebc4a5d1a25ff9d5e0540868beb1a9
|
d5e64b2499f6a4ae18dff2c15894caf91bd41fc7
|
/man/meteo_noaa_hourly.Rd
|
a77a2e53f43438f8845478b41791906b73e1cf47
|
[
"MIT"
] |
permissive
|
bczernecki/climate
|
cbe81b80335d3126ad943726c8d3185805900462
|
9c168a6a58854c374cd4c7b13b23cba28adeb7e2
|
refs/heads/master
| 2023-04-14T03:02:26.083700
| 2023-04-01T13:48:48
| 2023-04-01T13:48:48
| 197,452,909
| 66
| 23
|
NOASSERTION
| 2023-04-01T13:48:50
| 2019-07-17T19:49:40
|
R
|
UTF-8
|
R
| false
| true
| 1,221
|
rd
|
meteo_noaa_hourly.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/meteo_noaa_hourly.R
\name{meteo_noaa_hourly}
\alias{meteo_noaa_hourly}
\title{Hourly NOAA Integrated Surface Hourly (ISH) meteorological data}
\usage{
meteo_noaa_hourly(
station = NULL,
year = 2019,
fm12 = TRUE,
allow_failure = TRUE
)
}
\arguments{
\item{station}{ID of meteorological station(s) (characters). Find your station's ID at: https://www1.ncdc.noaa.gov/pub/data/noaa/isd-history.txt}
\item{year}{vector of years (e.g., 1966:2000)}
\item{fm12}{use only FM-12 (SYNOP) records (TRUE by default)}
\item{allow_failure}{logical - whether to proceed or stop on failure. By default set to TRUE (i.e. don't stop on error). For debugging purposes change to FALSE}
}
\description{
Downloading hourly (meteorological) data from the SYNOP stations available in the NOAA ISD collection.
Some stations in the dataset are dated back even up to 1900.
By default only records that follow FM-12 (SYNOP) convention are processed.
Further details available at: https://www1.ncdc.noaa.gov/pub/data/noaa/readme.txt
}
\examples{
\donttest{
# London-Heathrow, United Kingdom
noaa = meteo_noaa_hourly(station = "037720-99999", year = 1949)
}
}
|
786ebf337cb8f5df97d49718d435fd047a68d17e
|
ffe87a0a6134783c85aeb5b97332b201d50aca9d
|
/MINI_2015/filmy/Faza 1/Pytlak_Rdzanowski_Sommer/projekt.filmy/R/uprosc_tytul.R
|
db8aae1e7dc2a479a59e8f81a99ba64f4fc8d546
|
[] |
no_license
|
smudap/RandBigData
|
d34f6f5867c492a375e55f04486a783d105da82d
|
4e5818c153144e7cc935a1a1368426467c3030a5
|
refs/heads/master
| 2020-12-24T15:51:11.870259
| 2015-06-16T08:50:34
| 2015-06-16T08:50:34
| 32,064,294
| 0
| 0
| null | 2015-03-12T07:53:56
| 2015-03-12T07:53:56
| null |
UTF-8
|
R
| false
| false
| 980
|
r
|
uprosc_tytul.R
|
#' Upraszcza tytuly filmow
#'
#' Funkcja \code{uprosc_tytul()} zwraca wektor tytulow pozbawiony spacji, myslnikow, przecinkow, itd., wszystkie litery sa zamienione na male, a znaki z UTF-8 na znaki z alfabetu lacinskiego.
#'
#' @param wektor_tytulow wektor napisow
#' @return wektor napisow
uprosc_tytul <- function(wektor_tytulow){
wektor_tytulow %>%
stri_replace_all_fixed("\u0105", "a") %>%
stri_replace_all_fixed("\u0107", "c") %>%
stri_replace_all_fixed("\u0119", "e") %>%
stri_replace_all_fixed("\u0142", "l") %>%
stri_replace_all_fixed("\u0144", "n") %>%
stri_replace_all_fixed("\u00F3", "o") %>%
stri_replace_all_fixed("\u015B", "s") %>%
stri_replace_all_fixed("\u017A", "z") %>%
stri_replace_all_fixed("\u017C", "z") %>%
stri_replace_all_fixed("\u00FC", "u") %>%
stri_replace_all_fixed("\u00E7", "c") %>%
stri_trans_tolower() %>%
stri_extract_all_words() %>%
lapply(stri_paste, collapse="") %>%
unlist()
}
|
650e3583b7c9b871e231219d8c3435e988cbfac0
|
7a7375245bc738fae50df9e8a950ee28e0e6ec00
|
/man/SA3__Year_SameAddress5YearsAgo_Sex.Rd
|
4341858a3c47f653d4ebaabb3ee633f2e9d632ac
|
[] |
no_license
|
HughParsonage/Census2016.DataPack.TimeSeries
|
63e6d35c15c20b881d5b337da2f756a86a0153b5
|
171d9911e405b914987a1ebe4ed5bd5e5422481f
|
refs/heads/master
| 2021-09-02T11:42:27.015587
| 2018-01-02T09:01:39
| 2018-01-02T09:02:17
| 112,477,214
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 443
|
rd
|
SA3__Year_SameAddress5YearsAgo_Sex.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SA3__Year_SameAddress5YearsAgo_Sex.R
\docType{data}
\name{SA3__Year_SameAddress5YearsAgo_Sex}
\alias{SA3__Year_SameAddress5YearsAgo_Sex}
\title{SameAddress5YearsAgo, Sex by SA3, Year}
\format{6,444 observations and 5 variables.}
\usage{
SA3__Year_SameAddress5YearsAgo_Sex
}
\description{
Number of personsSameAddress5YearsAgo, Sex by SA3, Year
}
\keyword{datasets}
|
c688620203fcc4435100934717d1875094dc0e92
|
2b3cbc05953d0502cfd03db9cc8818ceff5783c2
|
/80bb2a25-ac5d-47d0-abfc-b3f3811f0936/R/Temp/athD2jOBHdugj.R
|
3eb6132b25fe67116e23481c657cce032465c879
|
[] |
no_license
|
ayanmanna8/test
|
89124aa702fba93a0b6a02dbe6914e9bc41c0d60
|
4f49ec6cc86d2b3d981940a39e07c0aeae064559
|
refs/heads/master
| 2023-03-11T19:23:17.704838
| 2021-02-22T18:46:13
| 2021-02-22T18:46:13
| 341,302,242
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 850
|
r
|
athD2jOBHdugj.R
|
with(a413c0d0cd878438a8d3b09b70251d2b6, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';source("D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/R/Recommendations/advanced_federation_blend.r");a2Hrpdwy3col1<- as.character(FRAME878836$location);linkaep6OB <- data.table("col1"=c("null"), "col2"=c("null")); linkaep6OB <- unique(linkaep6OB);af9ZX8BPF<- curate(a2Hrpdwy3col1,linkaep6OB);af9ZX8BPF <- as.data.table(af9ZX8BPF);names(af9ZX8BPF)<-"a3Llba3UK";FRAME878836 <- cbind(FRAME878836,af9ZX8BPF);FRAME878836 <- FRAME878836[,-c("location")];colnames(FRAME878836)[colnames(FRAME878836)=="a3Llba3UK"] <- "location";rm(af9ZX8BPF,linkaep6OB,a2Hrpdwy3col1,a2Hrpdwy3, best_match, best_match_nonzero, best_match_zero, blend, curate, self_match );});
|
a56671b0b4d6eaa494218f73a2b537d235d42103
|
25e07fb791152e7532c1c4375910b443751497ca
|
/meta-meta/analyses/irr.r
|
db814193b26023e1dc04c1c3f848f4d3c2c79b53
|
[] |
no_license
|
aadu/manuscripts
|
96da7510ab9098e53c8ea08d2dafdd1e71220cdf
|
de1cc69e315bc685d814a9f72499d1b1344d95e0
|
refs/heads/master
| 2021-01-18T14:15:58.715577
| 2015-03-24T17:35:05
| 2015-03-24T17:35:05
| 32,485,237
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 716
|
r
|
irr.r
|
library(irr)
source("./functions.r")
#es = import.google("https://docs.google.com/spreadsheet/pub?key=0Ar2MWSi_hKI6dHN6NWZvV09teWVSdUxVRElHWG5KSWc&single=true&gid=14&output=csv")
es = import.google("https://docs.google.com/spreadsheet/pub?key=0Ar2MWSi_hKI6dEtjZEhiVE11amZMRDZ2SzZIeG56Unc&single=true&gid=14&output=csv")
es = es[es$coder == "Aaron",]
es = es[es$include == 1, ]
es = es[!is.na(es$coder),]
es$weight = as.numeric(gsub(",", "", es$weight))
es = es[order(es$bibtex),]
View(es)
x = data.frame(me = as.double(es$d), her = as.double(es$me.recode))
for(i in 1:nrow(x)){
if(is.na(x$her[i]))
x$her[i] = x$me[i]
}
x = subset(x, !is.na(her))
x[24, 2] = .204
agree(x)
kappa2(ratings=x)
|
9267f354ce9938a10751da3ae70f9a8e44f1771c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rfm/examples/rfm_histograms.Rd.R
|
41b92fe428fb0f569b640246d368fd80651112c7
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 587
|
r
|
rfm_histograms.Rd.R
|
library(rfm)
### Name: rfm_histograms
### Title: RFM histograms
### Aliases: rfm_histograms
### ** Examples
# using transaction data
analysis_date <- lubridate::as_date('2006-12-31', tz = 'UTC')
rfm_order <- rfm_table_order(rfm_data_orders, customer_id, order_date,
revenue, analysis_date)
# histogram
rfm_histograms(rfm_order)
# using customer data
analysis_date <- lubridate::as_date('2007-01-01', tz = 'UTC')
rfm_customer <- rfm_table_customer(rfm_data_customer, customer_id,
number_of_orders, recency_days, revenue, analysis_date)
# histogram
rfm_histograms(rfm_customer)
|
67b88bcb50959d935812d6a50f2adc7c0b38169d
|
37588c10c4204facf11b49b0468e1c3eb411cb7a
|
/TwitterSentiAnalysis.R
|
df32b89b8823ba3dee0893c0937385221ea60ef8
|
[] |
no_license
|
sneharavi12/ADS
|
e9a3fff3042d3dc32ac0421d6455e07b21cba9ae
|
f0b7ed473856cbf7db31f928c4ff9bffe2e5cdc1
|
refs/heads/master
| 2021-01-23T08:21:46.585916
| 2017-02-14T15:42:25
| 2017-02-14T15:42:25
| 80,540,179
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,316
|
r
|
TwitterSentiAnalysis.R
|
install.packages(c("twitteR","ROAuth","plyr","stringr","ggplot2","base64enc"),dependencies = T)
library(base64enc)
library(twitteR)
library(ROAuth)
library(plyr)
library(stringr)
library(ggplot2)
library(RCurl)
options(RCurlOptions = list(cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl")))
download.file(url = "http://culr.haxx.se/cs/cacert.pen", destfile = "cacert.pen"
)
requestURL <- "https://api.twitter.com/oauth/request_token"
accessURL <- "https://api.twitter.com/oauth/access_token"
authURL <- "https://api.twitter.com/oauth/authorize"
consumerKey <- "hHyxCVkHEi2Y7UlMftIX3U0oD"
consumerSecret <- "paASADLDtQa0Or3yEvjG6QHJEYLhImoBHZYsIbtzJr8erG00vQ"
Cred <- OAuthFactory$new(consumerKey = consumerKey,
consumerSecret = consumerSecret,
requestURL = requestURL,
accessURL = accessURL,
authURL = authURL)
save(Cred, file ="twitter authentication.Rdata")
load("twitter authentication.Rdata")
Cred$handshake()
access_token <- "831204380460335104-Qk4WwgPuFVRc4uFe4elzW72yOQaY1AU"
access_token_secret <- "IpAOC1NnRbsBpZfBnxIquw6gQdl8quUkl1VxfrogBCQBD"
setup_twitter_oauth(consumerKey,consumerSecret,access_token,access_token_secret)
|
c06762e645223db0824cecaa000690c540720510
|
17671f171287c4d04ac913755bd1324e325d83ca
|
/R/RelCond_Fisheries.R
|
0676bca36adc15c39e0e18511c7b005d0925f22d
|
[
"MIT"
] |
permissive
|
rosalieb/GrowthEqFisheries
|
86b861dc739689ed6284237d1af8d548dedf8d5b
|
1748ac754ff7d53eba4ef7f09991592f0f7d1634
|
refs/heads/master
| 2020-08-02T12:14:27.635621
| 2019-09-27T19:22:49
| 2019-09-27T19:22:49
| 211,348,685
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,700
|
r
|
RelCond_Fisheries.R
|
#' Relative condition factor
#'
#' Compute relative condition factor, used to compare condition of individuals of different lengths in fisheries. Kn= W/W', with W' predicted weight.
#'
#' @param pop_ID Optional ID
#' @param pop_length Vector of length (in cm)
#' @param pop_weight Vector of weigth (in g)
#' @param size_limits Vector (length(size_limits) == 2) of min and max size of individuals included in the calculation. Default=NULL to use all individuals.
#' @param outliers Index of any point that should be discarded from computation
#'
#' @export
#' @examples Compute_FultonK(pop_length = FSAdata::YPerchGL$fl/10, pop_weight = FSAdata::YPerchGL$w, bin_limits = c(0,7.5,10,17.5))
#'
#' @keywords fisheries
#' @keywords relative condition
#'
#'
RelCond_Fisheries <- function(pop_ID=NULL, pop_length, pop_weight, size_limits=NULL, outliers = NULL) {
# Create list to save outputs
list_out <- list()
# Make sure we're working with numeric variables
pop_weight = as.numeric(paste(pop_weight))
pop_length = as.numeric(paste(pop_length))
# Edit size_limits if entered in a wrong format
if(length(size_limits)%%2 != 0) size_limits=NULL
if(is.null(size_limits)) size_limits = c(min(pop_length, na.rm=T), max(pop_length, na.rm=T))
size_limits <- size_limits[order(size_limits, decreasing = F)]
which_size_limits <- which(pop_length>=size_limits[1] & pop_length<=size_limits[2])
# warning message if wrong length of input vector
if (length(pop_weight) != length(pop_length)) stop("\n You do not have the same number of observations for weight and length. Review your data and run the function again.")
if (!is.null(pop_ID) | length(pop_weight) != length(pop_ID)) pop_ID=1:length(pop_weight)
# Remove outliers, then create output dataframe
if(!is.null(outliers) & is.numeric(outliers)) {
pop_ID <- pop_ID[-outliers]
pop_length <- pop_length[-outliers]
pop_weight <- pop_weight[-outliers]
}
pop_ID <- pop_ID[which_size_limits]
pop_length <- pop_length[which_size_limits]
pop_weight <- pop_weight[which_size_limits]
list_out$Data <- data.frame(pop_ID, pop_length,pop_weight)
# Compute optimal weight for population
lm_temp = lm(log10(pop_weight)~log10(pop_length))
a_temp = 10^(lm_temp$coefficients[1])
b_temp = lm_temp$coefficients[2]
# Save output parameters
list_out$a <- a_temp
list_out$b <- b_temp
# Compute Kn= W/W'
list_out$Data$Wprim <- a_temp*list_out$Data$pop_length^b_temp
list_out$Data$Kn <- list_out$Data$pop_weight / list_out$Data$Wprim
# create plot from which a and b parameter were extracted
p0 <- ggplot(list_out$Data, aes(pop_length,pop_weight)) +
geom_point() +
labs(title="Weight:length relationship",
x ="Length (cm))", y = "Weight (g)") +
stat_smooth() +
theme_bw()
# create plot from which a and b parameter were extracted
p1 <- ggplot(list_out$Data, aes(log10(pop_length),log10(pop_weight))) +
geom_point() +
labs(title="weight:length relationship (log10 scale)",
subtitle = paste0("For this sample, a= ",signif(a_temp,3), ", b= ", signif(b_temp,3)),
x ="log10(length)", y = "log10(weight)") +
stat_smooth(method="lm") +
theme_bw()
# create plot of Kn vs. length
p2 <- ggplot(list_out$Data, aes(pop_length,Kn)) +
geom_point() +
labs(title="Relative condition factor Kn",
subtitle = bquote(K[n]*"=W/W', with W'=a"*L^b),
x ="Length (cm)", y = "Kn") +
stat_smooth(method="lm", col="coral") +
geom_hline(yintercept=1) +
theme_bw()
list_out$plot_WL <- p0
list_out$plot_WL_log <- p1
list_out$plot_KnL <- p2
return(list_out)
}
|
41f30087dfca8bd44e7af1d1768e0b988b3a9771
|
1b872282a8fcfa99273958a7f95fab6a813d7d09
|
/man/do.svdGeneSetTest.Rd
|
7cdb021d5505493f8b5bea5ca25eb1062d34f60d
|
[
"MIT"
] |
permissive
|
mirax87/multiGSEA
|
cecc8e1b6ebedbe92a87ecb7a91034635b3b69c3
|
b8747abc1815ab4fa08ef024e77eee2a864ac6ed
|
refs/heads/master
| 2020-04-02T13:49:21.057653
| 2018-06-01T22:07:22
| 2018-06-01T22:07:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,125
|
rd
|
do.svdGeneSetTest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/do.svdGeneSetTest.R
\name{do.svdGeneSetTest}
\alias{do.svdGeneSetTest}
\title{Worker function summarizes gene set activity by SVD score and t-test across
contrast}
\usage{
\method{do}{svdGeneSetTest}(gsd, x, design, contrast = ncol(design),
gs.idxs = as.list(gsd, active.only = TRUE, value = "x.idx"),
trend.eBayes = FALSE, ...)
}
\description{
The idea is to summarize gene set level activity per sample by their
svd (GSDecon) score to create a geneset x sample matrix. Then uses this
matrix in a limma based framework to assess their "differential expression"
This method seems like it should produce sane results, but I haven't really
tested it yet and is therefore experimental. The fact that the
"svdGeneSetTest" method isn't mentioned anywhere in the documentation yet is
by design. Still, you have found that it exists and you can try to use it
if you like (or, please contact me to tell me why it's a bad idea!)
\strong{This function is not meant to be called directly, it should only be
called internally within \code{multiGSEA}}
}
|
82bdb198cb0c87fba7cf993e3eeeb016fca05722
|
93d579c61bd0a21dd746199a55c16f7dceef5507
|
/man/get_data.Rd
|
43037184d50186aff1e148b1e67551a17aa111b4
|
[
"MIT"
] |
permissive
|
peterhurford/surveytools
|
d782eee226f560c3e171be605b0faaa289a76e2f
|
22ac08ce97ecdb95a1dcbec8c7e9569d52df0912
|
refs/heads/master
| 2020-04-06T05:20:59.077261
| 2015-03-31T01:24:54
| 2015-03-31T01:24:54
| 25,450,472
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 327
|
rd
|
get_data.Rd
|
% Generated by roxygen2 (4.0.2.9000): do not edit by hand
% Please edit documentation in R/get_data.R
\name{get_data}
\alias{get_data}
\title{Gets the data parameter that was previously set by set_data() or errors.}
\usage{
get_data()
}
\description{
Gets the data parameter that was previously set by set_data() or errors.
}
|
39e866108b945ec1875f5f5523682ac653c826d6
|
b83b5477cb9a84f3da7aa1ea728195e7270ea2f0
|
/plot3.R
|
3bc02674f6c10e640b62397e5edb1f77d93af1e8
|
[] |
no_license
|
AmelieRu/ExData_Plotting1
|
d654c108aef954ab8fe75c3bab659d338996d720
|
d31849092e2e7b3e240049559dc54e28fbc2a7f3
|
refs/heads/master
| 2021-06-25T02:04:13.951576
| 2017-08-16T19:17:53
| 2017-08-16T19:17:53
| 100,505,795
| 0
| 0
| null | 2017-08-16T15:44:32
| 2017-08-16T15:44:32
| null |
UTF-8
|
R
| false
| false
| 1,616
|
r
|
plot3.R
|
## Download and unzip the data if zip or txt file does not exist
filename <- "electric_power_consumption.zip"
if (!file.exists(filename)){
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, destfile = "electric_power_consumption.zip")
unzip("electric_power_consumption.zip")
}
if (!file.exists("household_power_consumption.txt")) {
unzip(filename)
}
## Read the data
library("data.table")
full_data <- fread("household_power_consumption.txt", sep = ";",header = TRUE,na.strings="?")
# Subset data from 01/02/2007 and 02/02/2007
data <- full_data[(full_data$Date=="1/2/2007" | full_data$Date=="2/2/2007" ), ]
## Adapt the date and time format
# Convert the char date as a date date
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
# Concatenate date and time in a char vector
date_time <- paste(data$Date, data$Time)
# Transform the char vector into a date-time variables and add it to the dataset
data$Date_time <- as.POSIXct(date_time)
## Define the graphic device
png("plot3.png",width=480,height=480)
## Create the plot (it will be added to the defined graphic device)
# In case your computer language is not english
Sys.setlocale("LC_TIME", "English")
# Create the plot
with(data,plot(Date_time,Sub_metering_1,type="l",ylab="Global Active Power (kilowatts)",xlab=""))
with(data,lines(Date_time,Sub_metering_2,col='Red'))
with(data,lines(Date_time,Sub_metering_3,col='Blue'))
# Add the legend
legend("topright", lty=1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"))
dev.off()
|
f1c339192aabb44405592565824e3fd2c0149607
|
2e731f06724220b65c2357d6ce825cf8648fdd30
|
/lowpassFilter/inst/testfiles/convolve/AFL_convolve/convolve_valgrind_files/1616007546-test.R
|
aae5d1d3f0b87ea2374e8df52420a50dbf3c6c6f
|
[] |
no_license
|
akhikolla/updatedatatype-list1
|
6bdca217d940327d3ad42144b964d0aa7b7f5d25
|
3c69a987b90f1adb52899c37b23e43ae82f9856a
|
refs/heads/master
| 2023-03-19T11:41:13.361220
| 2021-03-20T15:40:18
| 2021-03-20T15:40:18
| 349,763,120
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 903
|
r
|
1616007546-test.R
|
testlist <- list(kern = c(NaN, 7.87136345137687e+207, 3.36446270004699e-308, 2.83431348996114e+220, 5.99427455731732e-39, -7.44328556191484e-58, 1.43291828385645e-190, 1.43291828549171e-190, 1.28557063893826e-299, -8.08603736879684e+40, -41255400998277.5, 6.98201165124923e-310, -7.51771763333532e+250, 3.76770030813314e+268, -2.77249233472604e+263, -5.11427841749999e+189, 69.2705882356546, -7.29084514043348e-304, 4.65127004357787e+82, 3.00746714358021e+267, 7.29074519380001e-304, 1.15870590482061e-218, -8.63581823659212e-26, -1.06876506836247e+215, 1.93550994816444e+222, 9.53199924334251e-307, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), val = c(6.98220487060317e-310, -7.51771763333532e+250, -2.75570826998156e+23, -4.57367029960048e+305 ))
result <- do.call(lowpassFilter:::convolve,testlist)
str(result)
|
31bcd7e0c33336cff87d25c5ebac48fd575f0a91
|
90ce5381487899129e90911cb01a3e83dd9aad87
|
/man/gini_process.Rd
|
dd09a3a6134a62b5d69b3ff6beb83939bdcb054e
|
[] |
no_license
|
cran/rfVarImpOOB
|
82274d6af3743d660b6178209e0136f3eb9ba15b
|
60b5b1366bc22750cf609eb6c60f78ecb2b5172a
|
refs/heads/master
| 2022-07-08T15:10:24.184676
| 2022-07-01T13:40:02
| 2022-07-01T13:40:02
| 236,881,840
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,098
|
rd
|
gini_process.Rd
|
\name{gini_process}
\alias{gini_process}
\title{computes Gini index }
\description{computes Gini index }
\usage{gini_process(classes, splitvar = NULL)}
\arguments{
\item{classes}{vector of factors/categorical vars}
\item{splitvar}{split variable}
}
\value{Gini index}
\author{Markus Loecher <Markus.Loecher@gmail.com>}
\examples{
#Test binary case:
#50/50split
gini_process(c(rep(0,10),rep(1,10)))#0.5 CORRECT !
#10/90split
gini_process(c(rep(0,1),rep(1,9)))#0.18= CORRECT !
#0/100split
gini_process(factor(c(rep(0,0),rep(1,10)), levels=c(0,1)))#0
#Test binary case:
#25/25/25/25 split
gini_process(factor(c(rep(0,5),rep(1,5),rep(2,5),
rep(3,5)), levels=c(0:3)))#0.75 = 4*0.25*0.75 CORRECT !
#10/10/10/70 split
gini_process(factor(c(rep(0,1),rep(1,1),rep(2,1),
rep(3,7)), levels=c(0:3)))#0.48 = 3*0.1*0.9+0.7*0.3 CORRECT !
#0/0/0/100 split
gini_process(factor(c(rep(0,0),rep(1,0),rep(2,0),
rep(3,20)), levels=c(0:3)))#0. CORRECT !
}
|
fd7e0dd45be9daf8b59a026325016b2acc829552
|
96482cd992bca3a41cb506774119617f56c3129b
|
/R/calculate_MIC_OD_inhib.R
|
5bde98536b4ffab871232a60caa3ac9f27974d11
|
[] |
no_license
|
Vincent-AC/wellexplainer
|
f1600ae100b73e2c8b90a26b657449cae2317e4b
|
e4623a4f5142ac67e68e3fd875b123bfcbccafac
|
refs/heads/master
| 2021-05-15T21:44:41.583531
| 2017-11-24T16:20:03
| 2017-11-24T16:20:03
| 106,555,072
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,592
|
r
|
calculate_MIC_OD_inhib.R
|
#' Convert OD into inhibition percentage
#'
#' Calculate inhibition percentage in each well in order to determine the MIC for each row
#'
#' @param processed_OD_data_list List of data.frame. The first column of each
#' data.frame being the denomination of what is in the line and the following
#' columns being OD values read by the plate reader. The output of extract_MIC_OD_data
#' is already in the correct format.
#' @param negative_control_col Number of the column that contains the negative control.
#' Defaults to 12
#' @param positive_control_col Number of the column that contains the positive control.
#' Defaults to 11
#' @param higest_conc_col Number of the column that contains the highest concentration
#' of tested antibiotic. Defaults to 1
#' @param lowest_conc_col Number of the column that contains the lowest concentration
#' of tested antibiotic. Defaults to 10
#' @param number_of_lines Number of lines used by plate. Defaults to 8
#' @keywords MIC, microdilution, OD, inhibition
#' @export
#' @examples
#' calculate_MIC_OD_inhib()
calculate_MIC_OD_inhib <-
function(processed_OD_data_list,
negative_control_col = 12,
positive_control_col = 11,
highest_conc_col = 1,
lowest_conc_col = 10,
number_of_lines = 8,
species)
{
inhib_data_list <- list()
for (i in 1:length(processed_OD_data_list))
{
inhib_data <- processed_OD_data_list[[i]]
inhib_data[, "MIC"] <- NULL
dataframe <- processed_OD_data_list[[i]]
for (l in 1:number_of_lines)
{
for (k in (highest_conc_col +1):(negative_control_col+1))
{
inhib_data[l, k] <- 1 -
((dataframe[l, k] - dataframe[l, (negative_control_col+1)]) /
(dataframe[l, (positive_control_col+1)] - dataframe[l, (negative_control_col+1)]))
}
if (dataframe[l, (positive_control_col+1)] < 0.4)
{
inhib_data[l, "MIC"] <- "Positive control problem"
}
else if (dataframe[l, (negative_control_col+1)] > 0.4)
{
inhib_data[l, "MIC"] <- "Negative control problem"
}
else if (sum(inhib_data[l, (highest_conc_col +1):(lowest_conc_col +1)] >= 0.9) ==
0)
{
inhib_data[l, "MIC"] <- paste0(">", inhib_data[l, "Max.Conc"])
}
else if (sum(inhib_data[l, (highest_conc_col +1):(lowest_conc_col +1)] >= 0.9) ==
10)
{
inhib_data[l, "MIC"] <-
paste0("<=", min(geomSeries(2, inhib_data[l, "Max.Conc"])))
}
else if (dataframe[l, (positive_control_col+1)] >= 0.4 &
dataframe[l, (negative_control_col+1)] <= 0.4)
{
columns_without_growth <-
as.numeric(colnames(inhib_data[l,-c(1, (positive_control_col+1):ncol(inhib_data))])[which(inhib_data[l,-c(1, (positive_control_col+1):ncol(inhib_data))] >=
0.9)])
inhib_data[l, "MIC"] <-
min(geomSeries(2, inhib_data[l, "Max.Conc"])[columns_without_growth])
}
}
inhib_data_list[[i]] <- inhib_data
names(inhib_data_list)[i] <-
paste0(names(processed_OD_data_list)[i],"_inhib")
}
return(inhib_data_list)
}
|
8d354a94971a4f5e5e76c4b3d5565a1d03527676
|
164d2fe8bea6f6bc6e9f8397ae0a63c43b2a0768
|
/maps_app/app.R
|
d524fa90d419022725511ac1fd1d25e311611ab7
|
[] |
no_license
|
Public-Health-Scotland/covid-vulnerability
|
fab5753c64efa92cc50909da54a2c0437726af65
|
9385de13200613aea5baf5cd61053c21fe18bc95
|
refs/heads/master
| 2023-04-27T03:47:24.105962
| 2021-05-13T14:13:57
| 2021-05-13T14:13:57
| 249,380,562
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,601
|
r
|
app.R
|
# Shiny app to present the vulnerability maps
###############################################.
## Packages ----
###############################################.
library(shiny)
library(leaflet)
library(shinyWidgets)
library(shinycssloaders)
library(dplyr)
library(shinyBS) #modals
library(flextable)
###############################################.
## Functions ----
###############################################.
#Download button for charts, just changing the icon
savechart_button <- function(outputId, label = "Save chart", class=NULL){
tags$a(id = outputId, class = paste("btn btn-default shiny-download-link", class),
href = "", target = "_blank", download = NA, icon("image"), label)
}
#Function to create color palettes for map
create_map_palette <- function(dataset, vulnerability){
if (vulnerability == "overall") {
case_when(dataset$comb_quintile == 5 ~ "#FFFFB2",
dataset$comb_quintile == 4 ~ "#FECC5C",
dataset$comb_quintile == 3 ~ "#FD8D3C",
dataset$comb_quintile == 2 ~ "#F03B20",
dataset$comb_quintile == 1 ~ "#BD0026",
TRUE ~ '#ffffff')
} else if (vulnerability == "clinsoc") {
case_when(dataset$dim1_quintile == 5 ~ "#FFFFB2",
dataset$dim1_quintile == 4 ~ "#FECC5C",
dataset$dim1_quintile == 3 ~ "#FD8D3C",
dataset$dim1_quintile == 2 ~ "#F03B20",
dataset$dim1_quintile == 1 ~ "#BD0026",
TRUE ~ '#ffffff')
} else if (vulnerability == "demog") {
case_when(dataset$dim2_quintile == 5 ~ "#FFFFB2",
dataset$dim2_quintile == 4 ~ "#FECC5C",
dataset$dim2_quintile == 3 ~ "#FD8D3C",
dataset$dim2_quintile == 2 ~ "#F03B20",
dataset$dim2_quintile == 1 ~ "#BD0026",
TRUE ~ '#ffffff')
}
}
###############################################.
## Shapefiles and other objects ----
###############################################.
# The "creation" of the mpas needs to be done in the app as bug with leaflet
# see https://community.rstudio.com/t/leaflet-map-working-in-local-shiny-but-couldnt-normalize-path-error-on-shinyapps-io/35638
# Reading in shapefiles for each area type
dz_bound <- readRDS("data/dz_bound.rds")
iz_bound <- readRDS("data/iz_bound.rds")
ca_bound <- readRDS("data/ca_bound.rds")
#Indicator list
indicator_list <- readRDS("data/COVID19 PCA Indicator List.rds")
# labels for map
labels_map <- c("Most vulnerable", "", "", "", "Least vulnerable")
#color palette
col_pal <- c("#BD0026", "#F03B20", "#FD8D3C", "#FECC5C", "#FFFFB2")
###############################################.
## UI ----
###############################################.
ui <- fluidPage(
includeCSS("www/styles.css"), #adding csss to app
tags$link(rel="shortcut icon", href="favicon_scotpho.ico"), #Icon for browser tab
# Application title
titlePanel(div(tags$a(img(src="scotpho_reduced.png", height=40), href= "http://www.scotpho.org.uk/"),
"ScotPHO COVID-19 Community Vulnerability", style = "position: relative; top: -5px;"),
windowTitle = "ScotPHO COVID-19 Community Vulnerability"), #browser tab name
"In addition to clinical and public health responses to the Coronavirus (COVID-19) pandemic, action will
be required to mitigate a range of associated social harms. ScotPHO have produced maps to illustrate a COVID-19 community vulnerability measure based on available demographic, social and clinical indicators relevant either directly to COVID-19 or to socio-economic factors that are likely to
modify the impacts of the pandemic and efforts to delay it.",br(),
"The maps do not include any real-time information about COVID-19 infections, hospitalisations or deaths.The shading shows vulnerability of a geographical area to health and care systems and socio-economic impacts.",br(),
"A briefing paper and summary analysis have also been produced to complement these maps & provide additional detail of methodology.",br(),
tags$ul(
#Link to Briefing Paper
tags$li(class= "li-custom", tags$a(href="https://www.scotpho.org.uk/media/1969/scotpho-covid-19-community-vulnerability-brieifing-20200331.docx",
"ScotPHO COVID-19 Community Vulnerability Briefing (31/03/2020)", class="externallink")),
#Link to Excel analysis
tags$li(class= "li-custom", tags$a(href="https://www.scotpho.org.uk/media/1974/scotpho-covid-19-community-vulnerability-analysis_v3.xlsx",
"ScotPHO CoVID-19 Community Vulnerability Analysis V3.0 (03/04/2020) **Now includes population weighted quintiles**", class="externallink")),
# Link to technical document
tags$li(class= "li-custom", tags$a(href="https://www.scotpho.org.uk/media/1968/scotpho-covid_19-technical-document-20200331.docx",
"ScotPHO CoVID-19 Community Vulnerability Technical Document - DRAFT (31/03/2020)", class="externallink"))), br(),
p("Contact: ",tags$b(tags$a(href="mailto:phs.scotpho@phs.scot", "phs.scotpho@phs.scot", class="externallink"))),br(),
sidebarPanel(
radioGroupButtons("map_area",
label= "Select a geography level",
choices = c("Council area", "Intermediate zone", "Data zone"),
status = "primary", justified = TRUE, direction = "vertical"),
radioGroupButtons("quintile_type",
label= "Select quintile type",
choices = c("Population weighted", "No population weights"),
status = "primary", justified = TRUE, direction = "vertical"),
actionButton("indicator_help",label=HTML("Indicators used to<br/>calculate vulnerability"), icon= icon('question-circle'), class ="down"),
savechart_button('download_mapplot', 'Save map', class= "down")
# actionButton("browser", "Browser") # used for testing
), #sidebar panel bracket
mainPanel(
h4(textOutput("uimap_title"), style="color: black; text-align: left"),
textOutput("uimap_subtitle"),
withSpinner(leafletOutput("map_vuln", width="100%",height="550px"))
),#mainpanel bracket
p("Methodology", style = "font-weight: bold; color: black;"), #
"The measures presented in this tool have been developed in response to COVID-19 and may be further refined over time.",br(),
p("ScotPHO have used data routinely available from the Health and Wellbeing Profile in the ",tags$a(href="https://scotland.shinyapps.io/ScotPHO_profiles_tool/","ScotPHO Online Profiles Tool", class="externallink")," (with the exception of the Diabetes Hospitalisations indicator). The indicators were chosen on the basis of being immediately available to the ScotPHO team; and were demographic or clinical indicators deemed to be directly relevant to COVID-19, clinical indicators relevant to other demands on clinical services or population health, or social factors that would be likely to modify the impact of COVID-19 on communities. We used the most recent period available in all cases (depending on the indicator this could be a single year value or an multi-year aggregated value.)."),
p("We combined these indicators into a single measure for overall vulnerability across each of the three domains using an approach called principal components analysis (PCA)."), br()
) #fluidpage bracket
###############################################.
## Server ----
###############################################.
server <- function(input, output) {
# observeEvent(input$browser, browser()) # used for testing
###############################################.
# Map rendering
output$map_vuln <- renderLeaflet({
# Selecting shapefile based on user input
if (input$map_area == "Data zone") {
map_bound <- dz_bound
} else if (input$map_area == "Intermediate zone") {
map_bound <- iz_bound
} else if (input$map_area == "Council area") {
map_bound <- ca_bound
}
# Renaming variables depending if population weighted or not
if (input$quintile_type == "Population weighted") {
names(map_bound@data)[names(map_bound@data)=="comb_quintile"] <- "old_comb_quintile"
names(map_bound@data)[names(map_bound@data)=="comb_weightedq"] <- "comb_quintile"
names(map_bound@data)[names(map_bound@data)=="dim1_quintile"] <- "old_dim1_quintile"
names(map_bound@data)[names(map_bound@data)=="dim1_weightedq"] <- "dim1_quintile"
names(map_bound@data)[names(map_bound@data)=="dim2_quintile"] <- "old_dim2_quintile"
names(map_bound@data)[names(map_bound@data)=="dim2_weightedq"] <- "dim2_quintile"
} else {
map_bound <- map_bound
}
# Creating colour palettes for the map
map_pal_over <- create_map_palette(map_bound, "overall")
map_pal_clinsoc <- create_map_palette(map_bound, "clinsoc")
map_pal_demog <- create_map_palette(map_bound, "demog")
###############################################.
# Creating map
# One layer for each type of vulnerability, this could be moved to a dropdown
# for quicker loading (although every change of inputs will take time to reload)
map_dz <- leaflet() %>%
addProviderTiles(providers$CartoDB.Positron) %>% # base tiles
# Overall layer
addPolygons(data= map_bound, group = "Overall",
color = "#444444", weight = 1, smoothFactor = 0.5,
label = (sprintf( #tooltip
"<strong>%s %s</strong>",
map_bound$area_name, map_bound$code) %>% lapply(htmltools::HTML)),
opacity = 1.0, fillOpacity = 0.5, fillColor = map_pal_over,
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = TRUE)) %>%
# Social/clinical layer
addPolygons(data= map_bound, group = "Social/Clinical",
color = "#444444", weight = 1, smoothFactor = 0.5,
label = (sprintf( #tooltip
"<strong>%s %s</strong>",
map_bound$area_name, map_bound$code) %>% lapply(htmltools::HTML)),
opacity = 1.0, fillOpacity = 0.5, fillColor = map_pal_clinsoc,
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = TRUE)) %>%
# Demographic layer
addPolygons(data=map_bound, group = "Demographic",
color = "#444444", weight = 1, smoothFactor = 0.5,
label = (sprintf( #tooltip
"<strong>%s %s</strong>",
map_bound$area_name, map_bound$code) %>% lapply(htmltools::HTML)),
opacity = 1.0, fillOpacity = 0.5, fillColor = map_pal_demog,
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = TRUE)) %>%
# Adding radio buttons to select layer
addLayersControl(baseGroups = c("Overall", "Social/Clinical", "Demographic"),
options = layersControlOptions(collapsed = FALSE)) %>%
addLegend(colors = col_pal, labels = labels_map ) # adding legend
}) # end of renderLeaflet
###############################################.
# Downloading the maps
plot_map_download <- function(){
# Select shapefile based on user input
if (input$map_area == "Data zone") {
map_bound <- dz_bound
} else if (input$map_area == "Intermediate zone") {
map_bound <- iz_bound
} else if (input$map_area == "Council area") {
map_bound <- ca_bound
}
# Select variables to show in map depending if population weighted or not
if (input$quintile_type == "Population weighted") {
names(map_bound@data)[names(map_bound@data)=="comb_quintile"] <- "old_comb_quintile"
names(map_bound@data)[names(map_bound@data)=="comb_weightedq"] <- "comb_quintile"
names(map_bound@data)[names(map_bound@data)=="dim1_quintile"] <- "old_dim1_quintile"
names(map_bound@data)[names(map_bound@data)=="dim1_weightedq"] <- "dim1_quintile"
names(map_bound@data)[names(map_bound@data)=="dim2_quintile"] <- "old_dim2_quintile"
names(map_bound@data)[names(map_bound@data)=="dim2_weightedq"] <- "dim2_quintile"
} else {
map_bound <- map_bound
}
# Create palettes for map
color_map <- case_when(input$map_vuln_groups == "Overall" ~ create_map_palette(map_bound, "overall"),
input$map_vuln_groups == "Social/Clinical" ~ create_map_palette(map_bound, "clinsoc"),
input$map_vuln_groups == "Demographic" ~ create_map_palette(map_bound, "demog"))
# Plotting map with title
title_map <- paste0("ScotPHO COIVD19 Community:", input$map_vuln_groups)
plot(map_bound, col=color_map)
title(title_map, cex.main = 20, line = -10) # adding title
} #end of function
#Donwloading map chart
output$download_mapplot <- downloadHandler(
filename = 'map.png',
content = function(file){
png(file, width = 6000, height = 6000, units = "px")
plot_map_download()
dev.off()
})
###############################################.
# Title/Subtitle
# Generate text string for map title and subtitle in UI
output$uimap_title <- renderText(paste0("ScotPHO COIVD-19 Community Vulnerability: ",
input$map_vuln_groups))
output$uimap_subtitle<- renderText(paste0(input$map_area,", ",input$quintile_type))
###############################################.
# Modal with indicator information
# Indicator details help button
observeEvent(input$indicator_help, {showModal(indicator_help_modal)})
# Function to create flextable displying indicators and their associated domains.
indicator_flextable <- function(){
ft <- indicator_list %>%
select(domain,indicator, geography) %>%
flextable() %>%
set_header_labels (domain="Vulnerability Domain",indicator="Indicator",geography="Available geography") %>%
merge_v(j = ~ domain) %>%
theme_box() %>%
autofit()
align_text_col(ft,align = "left", header = TRUE, footer = TRUE) %>%
htmltools_value() }
# Display text for indicator help
indicator_help_modal <- modalDialog(
title = "Which indicators are included in ",
p("The ScotPHO COVID-19 community vulnerability is calculated from a combination of different indicators."),
p("The overall vulnerability measure is calculated using all indicators available."),
p("The social/clinical vulnerability measure is calculated using clinical and social indicators only."),
p("The demographic vulnerability measure is calculated using demographic indicators only."),
p("Note some indicators are not available for smaller geographies (e.g. DZ or IZ) and are therefore not part of the vulnerability score for those geographies."),
indicator_flextable (),
p("DZ- Datazone, IZ- Intermediate Zone, CA - Council Area"),
size = "l", easyClose = TRUE, fade=FALSE,
footer = modalButton("Close (Esc)"))
} # server end
# Run the application
shinyApp(ui = ui, server = server)
|
78f82d82542cf79902b553e1242f92866ab43e5b
|
2914217dfab141d994f7377f46768f16ce1d1cf4
|
/cachematrix.R
|
5c12f5f6f04bbeed5b2ee6ef4eade04531c52157
|
[] |
no_license
|
rajp3k/ProgrammingAssignment2
|
058d5287278be6fd466e6e43fdc27358db89c943
|
c58bbfcf41e5f3e683a0845ded0af4c7c86bc18e
|
refs/heads/master
| 2022-11-20T17:29:05.057178
| 2020-07-20T03:35:48
| 2020-07-20T03:35:48
| 280,801,314
| 0
| 0
| null | 2020-07-19T05:50:22
| 2020-07-19T05:50:22
| null |
UTF-8
|
R
| false
| false
| 1,143
|
r
|
cachematrix.R
|
## Below 2 functions cache the inverse of a matrix, so
## if matrix doesn't change, inverse doesn't have to be
## recomputed since it's a costly computation.
## makeCacheMatrix function creates a special "matrix"
## object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
invert <- NULL
set <- function(y) {
x <<- y
invert <<- NULL
}
get <- function() x
setinv <- function(solve) invert <<- solve
getinv <- function() invert
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve function computes inverse of matrix
## generated by makeCacheMatrix function above. If
## matrix's inverse has already been computed,
## cacheSolve will retrieve answer from cache.
cacheSolve <- function(x, ...) {
invert <- x$getinv()
if(!is.null(invert)) {
message("Getting cached data")
return(invert)
}
data <- x$get()
invert <- solve(data,...)
x$setinv(invert)
invert
}
|
4d60b93c5a7f1414c7afdccb02a631b46b3c06dc
|
f46d5757c1429c292ab17473c66c0edc302e1549
|
/Project2/plot2.R
|
2283ad88face22d92521c2d2aa8c68b08c199232
|
[] |
no_license
|
bltgent/ExData_Plotting1
|
142fb4839ff34362688373e13cbf4bf952ea8a91
|
2af91f6482e199a2edcfd33ab2bfb606da149527
|
refs/heads/master
| 2021-01-17T18:32:57.152612
| 2015-01-08T00:46:11
| 2015-01-08T00:46:11
| 26,229,343
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 519
|
r
|
plot2.R
|
#plot2
#Load the dplyr package
library(dplyr)
#Access the data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Filter only the observations in which fips==24510
a<-filter(NEI,fips==24510)
#Group the subset by year
a<-group_by(a,year)
#Sum the Emissions data
a<-summarise_each(a,funs(sum),Emissions)
#Plot the data
plot(a)
## Copy my plot to a PNG file
dev.copy(png, file = "plot2.png", width = 480, height = 480)
#Close the png connection.
dev.off(); dev.off(); dev.off();
|
de4efe4d74d7a3553731b64be6f3345552ffcb56
|
237395abaf91d724530cf0cc5f4f0a5c07d806c4
|
/testScript/fftwtoolsTest.R
|
b366b114feddb52378cfafb333bd91fbf331ed2e
|
[] |
no_license
|
krahim/fftwtools
|
7493381e50c24df1291101f9dd248f25f2abdb2d
|
e861cb655bd33e0c492f8c0e849746a6b961a11c
|
refs/heads/master
| 2021-05-04T11:37:34.111426
| 2021-02-27T22:03:39
| 2021-02-27T22:03:39
| 8,509,251
| 8
| 11
| null | 2021-02-27T23:08:42
| 2013-03-01T20:04:29
|
R
|
UTF-8
|
R
| false
| false
| 1,612
|
r
|
fftwtoolsTest.R
|
library("fftwtools")
fftw_r2c_2d_raw <- function(data) {
## can be done with two mvfft's t(mvfft(t(mvfft(a))))
## == mvfft(t(mvfft(t(a))))
data <- as.matrix(data)
nR <- dim(data)[1]
nC <- dim(data)[2]
nRc <- floor(nR/2) +1
##idxRowAppend <- (nRc - (nRc %% 2)):2
##correct for the fact the c call is column-major
out <- .C("fft_r2c_2d", as.integer(nC), as.integer(nR),
as.double(data), res=matrix(as.complex(0), nRc , nC))
out$res
}
fft2d <- function(X) {
mvfft(t(mvfft(t(X))))
}
x1 = matrix(1:10, 5, 2)
x2 = matrix(1:12, 6, 2)
x=c(1, 2, 3, 9, 8, 5, 1, 2, 9, 8, 7, 2)
x= t(matrix(x, nrow=4))
fftw_c2c_2d(fftw_c2c_2d(x), inverse=1)/12
fftw_c2c_2d(fftw_c2c_2d(x1), inverse=1)/10
fftw_c2c_2d(fftw_c2c_2d(x2), inverse=1)/12
fftw_c2c_2d(fftw_c2c_2d(t(x)), inverse=1)/12
fftw_c2c_2d(fftw_c2c_2d(t(x1)), inverse=1)/10
fftw_c2c_2d(fftw_c2c_2d(t(x2)), inverse=1)/12
fftw_c2c_2d(fftw_r2c_2d(x), inverse=1)/12
fftw_c2c_2d(fftw_r2c_2d(x1), inverse=1)/10
fftw_c2c_2d(fftw_r2c_2d(x2), inverse=1)/12
fftw_c2c_2d(fftw_r2c_2d(t(x)), inverse=1)/12
fftw_c2c_2d(fftw_r2c_2d(t(x1)), inverse=1)/10
fftw_c2c_2d(fftw_r2c_2d(t(x2)), inverse=1)/12
fftw_c2c_2d(x)
fftw_r2c_2d(x)
fftw_c2c_2d(t(x))
fftw_r2c_2d(t(x))
fftw_r2c_2d_raw (t(x))
res <- fftw_r2c_2d_raw (t(x))
fftw_c2c_2d(x1)
fftw_r2c_2d(x1)
fftw_c2c_2d(t(x1))
fftw_r2c_2d(t(x1))
fftw_c2c_2d(x2)
fftw_r2c_2d(x2)
fftw_c2c_2d(t(x2))
fftw_r2c_2d(t(x2))
## looks good but ....
## consider this mvfftw_r2c
mvfftw(x)
mvfftw_r2c(x)
mvfft(x1)
mvfftw_r2c(x1)
mvfft(x2)
mvfftw_r2c(x2)
## looks good March 17
|
97f15f784a74d9722f0de5812d39f46a735b5238
|
007ae03cfe5abf41a0ad864eade451141c267cca
|
/auto-docs/executables/r/line_shapes.r
|
89c0d44c160fea310ca8754dd376a4e2548aa0bf
|
[] |
no_license
|
VukDukic/documentation
|
ca96eb1994eeb532fe60c542960b017354bcede1
|
8e5aefdc38788956cfe31d8fe8b4b77cdf790e57
|
refs/heads/master
| 2021-01-18T09:02:27.034396
| 2015-01-20T23:46:58
| 2015-01-20T23:46:58
| 30,007,728
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,885
|
r
|
line_shapes.r
|
# Learn about API authentication here: {{BASE_URL}}/r/getting-started
# Find your api_key here: {{BASE_URL}}/settings/api
library(plotly)
py <- plotly(username='TestBot', key='r1neazxo9w')
trace1 <- list(
x = c(1, 2, 3, 4, 5),
y = c(1, 3, 2, 3, 1),
mode = "lines+markers",
name = "'linear'",
line = list(shape = "linear"),
type = "scatter"
)
trace2 <- list(
x = c(1, 2, 3, 4, 5),
y = c(6, 8, 7, 8, 6),
mode = "lines+markers",
name = "'spline'",
text = c("tweak line smoothness<br>with 'smoothing' in line object", "tweak line smoothness<br>with 'smoothing' in line object", "tweak line smoothness<br>with 'smoothing' in line object", "tweak line smoothness<br>with 'smoothing' in line object", "tweak line smoothness<br>with 'smoothing' in line object", "tweak line smoothness<br>with 'smoothing' in line object"),
line = list(shape = "spline"),
type = "scatter"
)
trace3 <- list(
x = c(1, 2, 3, 4, 5),
y = c(11, 13, 12, 13, 11),
mode = "lines+markers",
name = "'vhv'",
line = list(shape = "vhv"),
type = "scatter"
)
trace4 <- list(
x = c(1, 2, 3, 4, 5),
y = c(16, 18, 17, 18, 16),
mode = "lines+markers",
name = "'hvh'",
line = list(shape = "hvh"),
type = "scatter"
)
trace5 <- list(
x = c(1, 2, 3, 4, 5),
y = c(21, 23, 22, 23, 21),
mode = "lines+markers",
name = "'vh'",
line = list(shape = "vh"),
type = "scatter"
)
trace6 <- list(
x = c(1, 2, 3, 4, 5),
y = c(26, 28, 27, 28, 26),
mode = "lines+markers",
name = "'hv'",
line = list(shape = "hv"),
type = "scatter"
)
data <- list(trace1, trace2, trace3, trace4, trace5, trace6)
layout <- list(legend = list(
y = 0.5,
traceorder = "reversed",
font = list(size = 16),
yref = "paper"
))
response <- py$plotly(data, kwargs=list(layout=layout, filename="line-shapes", fileopt="overwrite"))
url <- response$url
|
981cc47f3e479fe2f649e3c816b2317e8663d8a4
|
fe8d79231e4bec69632a1aaa1c5a76923f519c9e
|
/scripts/stat_fill_contour.R
|
959bba9c6e05b5b0e80a21116d6698943844c170
|
[] |
no_license
|
eliocamp/tesis
|
f56064499e9fea279d06eb84ff529315f4adab4d
|
475d4a5ff56884f3426bd602a26f0fca22c977f0
|
refs/heads/master
| 2021-01-13T05:31:25.241667
| 2018-07-06T19:21:04
| 2018-07-06T19:21:22
| 86,624,691
| 7
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,821
|
r
|
stat_fill_contour.R
|
stat_fill_contour <- function(mapping = NULL, data = NULL,
geom = "polygon", position = "identity",
...,
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
layer(
data = data,
mapping = mapping,
stat = StatFillContour,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
na.rm = na.rm,
...
)
)
}
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
StatFillContour <- ggproto("StatFillContour", Stat,
required_aes = c("x", "y", "z"),
default_aes = aes(fill = ..int.level..),
compute_group = function(data, scales, bins = NULL, binwidth = NULL,
breaks = NULL, complete = FALSE, na.rm = FALSE,
exclude = NA) {
# If no parameters set, use pretty bins
if (is.null(bins) && is.null(binwidth) && is.null(breaks)) {
breaks <- pretty(range(data$z), 10)
}
# If provided, use bins to calculate binwidth
if (!is.null(bins)) {
binwidth <- diff(range(data$z)) / bins
}
# If necessary, compute breaks from binwidth
if (is.null(breaks)) {
breaks <- fullseq(range(data$z), binwidth)
}
# if (is.null(binwidth)) {
# binwidth <- diff(range(data$z)) / length(breaks)
# }
breaks.keep <- breaks[!(breaks %in% exclude)]
f <<- data # debug
dx <- abs(diff(subset(data, y == data$y[1])$x)[1])
dy <- abs(diff(subset(data, x == data$x[1])$y)[1])
range.data <- as.data.frame(sapply(data[c("x", "y", "z")], range))
extra <- rbind(
expand.grid(y = c(range.data$y[2] + dy, range.data$y[1] - dy),
x = unique(data$x)),
expand.grid(y = c(unique(data$y), range.data$y[2] + dy, range.data$y[1] - dy),
x = c(range.data$x[1] - dx, range.data$x[2] + dx))
)
# Y le doy un valor muy bajo.
# extra$z <- range.data$z[1] - 3*binwidth
mean.z <- mean(data$z)
mean.level <- breaks.keep[breaks.keep %~% mean.z]
extra$z <- mean.z
# extra$PANEL <- data$PANEL[1]
cur.group <- data$group[1]
# extra$group <- data$group[1]
# dbug.data <<- copy(data)
# dbug.extra <<- copy(extra)
data2 <- rbind(data[c("x", "y", "z")], extra)
cont <- ggplot2:::contour_lines(data2, breaks.keep, complete = complete)
setDT(cont)
co <<- copy(cont) # debug
data3 <<- data2 # debug
cont <- CorrectFill(cont, data2, breaks)
i <- which(breaks.keep == mean.level)
correction <- (breaks.keep[i + sign(mean.z - mean.level)] - mean.level)/2
# correction <- 0
mean.cont <- data.frame(
level = mean.level,
x = c(rep(range.data$x[1], 2), rep(range.data$x[2], 2)),
y = c(range.data$y[1], rep(range.data$y[2], 2), range.data$y[1]),
piece = max(cont$piece) + 1,
int.level = mean.level + correction)
mean.cont$group <- factor(paste(cur.group, sprintf("%03d", mean.cont$piece), sep = "-"))
cont <- rbind(cont, mean.cont)
co.2 <<- copy(cont) # debug
areas <- cont[, .(area = abs(area(x, y))), by = .(piece)][
, rank := frank(-area, ties.method = "dense")]
areas <- areas[, head(.SD, 1), by = piece]
cont <-cont[areas, on = "piece"]
cont[, piece := rank]
cont[, group := factor(paste(cur.group,
sprintf("%03d", piece), sep = "-"))]
cont$x[cont$x > range.data$x[2]] <- range.data$x[2]
cont$x[cont$x < range.data$x[1]] <- range.data$x[1]
cont$y[cont$y < range.data$y[1]] <- range.data$y[1]
cont$y[cont$y > range.data$y[2]] <- range.data$y[2]
cont
}
)
# From https://stat.ethz.ch/pipermail/r-help/2004-December/063046.html
area <- function(x, y){
X <- matrix(c(x, y), ncol = 2)
X <- rbind(X,X[1,])
x <- X[,1]
y <- X[,2]
lx <- length(x)
-sum((x[2:lx] - x[1:lx-1])*(y[2:lx] + y[1:lx-1]))/2
}
CorrectFill <- function(cont, data, breaks) {
levels <- breaks
m.level <- -levels[2] + 2*levels[1]
M.level <- 2*levels[length(levels)] - levels[length(levels) - 1]
levels <- c(m.level, levels, M.level)
cont[, int.level := 0]
pieces <- unique(cont$piece)
data <- as.data.table(data)
x.data <- unique(data$x)
x.data <- x.data[order(x.data)]
x.N <- length(x.data)
# range.x <- range(data$x)
y.data <- unique(data$y)
y.data <- y.data[order(y.data)]
y.N <- length(y.data)
# range.y <- range(data$y)
for (p in pieces) {
level <- cont[piece == p, level[1]]
i <- which(levels == level)
cur.piece <- cont[piece == p]
p0 <- cur.piece[x >= x.data[2] & x <= x.data[x.N-1]
& y >= y.data[2] & y <= y.data[y.N-1]][1]
if (nrow(p0[!is.na(x)]) == 0) {
inside.z <- level
} else {
if (p0$x %in% x.data) {
y1 <- Closest(y.data, p0$y, sign = 1)
y2 <- Closest(y.data, p0$y, sign = -1)
p1 <- data[x == p0$x & y == y1]
p2 <- data[x == p0$x & y == y2]
} else {
x1 <- Closest(x.data, p0$x, sign = 1)
x2 <- Closest(x.data, p0$x, sign = -1)
p1 <- data[x == x1 & y == p0$y]
p2 <- data[x == x2 & y == p0$y]
}
points <- rbind(p1, p2)
# Get one point whose value is NOT equal to the contour level.
points[, equal := (z == cur.piece$level[1])]
points <- points[equal == FALSE][1]
points[, inside := IsInside(x, y, cur.piece$x, cur.piece$y)]
# if (IsInside(p1$x, p1$y, cur.piece$x, cur.piece$y)) {
# inside.z <- p1$z
# } else {
# inside.z <- p2$z
# }
}
correction <- (levels[i + sign(points$z - level)] - level)/2
# Change sign of correction if point is outside.
corr.sign <- as.numeric(points$inside)*2 - 1
correction <- correction*corr.sign
cont[piece == p, int.level := level + correction]
}
return(cont)
}
# "Similar"
`%~%` <- function(x, target) {
x <- abs(x - target)
return(x == suppressWarnings(min(x)))
}
Closest <- function(x, target, sign = c(1, -1)) {
tmp <- (x - target)*sign[1]
tmp[tmp<0] <- NA
x[which.min(abs(tmp))]
}
IsInside <- function(xp, yp, x, y) {
!(sp::point.in.polygon(xp, yp, x, y) == 0)
}
# v3d <- reshape2::melt(volcano)
# names(v3d) <- c("x", "y", "z")
#
# ggplot(v3d, aes(x, y, z = z)) + scale_color_brewer(type = "qual", palette = 3)+
# stat_fill_contour(color = "black", size = 0.2, binwidth = 10)
# dates <- unique(ncep$date)
# ggplot(RepeatLon(ncep[date %in% dates[6]]), aes(lon, lat, z = gh.t)) +
# stat_fill_contour() +
# map.SH.3 +
# geom_contour(color = "black", size = 0.2) +
# stat_contourlabel(step = 1, size = 2.5, geom = "label",
# label.padding = unit(0.1, "lines"),
# label.r = unit(0, "lines")) +
# # coord_map(projection = "stereographic", orientation = c(-90, 0, 0)) +
# coord_polar() +
# scale_x_longitude() +
# ylim(c(-90, -10)) +
# facet_wrap(~date) +
# # scale_fill_viridis()
# scale_fill_divergent(name = "Geopotential Height Anomalies") +
# guides(fill = guide_colorbar(title.position = "top")) +
# ggtitle("Filled Contours and easy \ncontour labels in ggplot2")
# # stat_fill_contour(aes(fill = ..levelc.., label = ..rank..), geom = "text",
# # size = 3)
# # xlim(c(0, 50)) + ylim(c(0, 90))
# ggplot(h, aes(x, y)) +
# geom_path(aes(group = group)) +
# geom_point(color = "red") +
# geom_point(data = f)
|
a21a55276556d90a2304545a758911dce7527394
|
e6b810dd97a74b96e814c61467f56818c6459ab0
|
/tests/testthat/test-nsims.R
|
fb4d04de8d8f8154c152825e55f843415f3d2871
|
[
"MIT"
] |
permissive
|
poissonconsulting/universals
|
32800e3dc2ebb8c8c27cad5fe6d6633fd594177f
|
152629f241f50f690d51a7770a81e32e8c62815c
|
refs/heads/main
| 2022-10-20T13:49:00.645542
| 2022-10-15T12:31:36
| 2022-10-15T12:31:36
| 234,643,049
| 4
| 1
|
NOASSERTION
| 2022-06-17T21:41:06
| 2020-01-17T21:53:16
|
R
|
UTF-8
|
R
| false
| false
| 321
|
r
|
test-nsims.R
|
test_that("", {
expect_identical(nsims(nlist::nlist()), 1L)
expect_identical(nsims(nlist::nlist(x = 1)), 1L)
})
test_that("", {
expect_identical(nsims(nlist::nlists()), 0L)
expect_identical(nsims(nlist::nlists(nlist::nlist())), 1L)
expect_identical(nsims(nlist::nlists(nlist::nlist(), nlist::nlist())), 2L)
})
|
14ca5a0627c193a3423ede6c94500486228e7133
|
2975fba6bf359214c55e7d936f896a5a4be3d8f5
|
/R/discreteRoot.R
|
72faadfb0191fcccc7dcf6cfd415cdb2e94e1dfc
|
[] |
no_license
|
tagteam/riskRegression
|
6bf6166f098bbdc25135f77de60122e75e54e103
|
fde7de8ca8d4224d3a92dffeccf590a786b16941
|
refs/heads/master
| 2023-08-08T03:11:29.465567
| 2023-07-26T12:58:04
| 2023-07-26T12:58:04
| 36,596,081
| 38
| 14
| null | 2023-05-17T13:36:27
| 2015-05-31T09:22:16
|
R
|
UTF-8
|
R
| false
| false
| 11,254
|
r
|
discreteRoot.R
|
### discreteRoot.R ---
##----------------------------------------------------------------------
## Author: Brice Ozenne
## Created: nov 22 2017 (13:39)
## Version:
## Last-Updated: Feb 10 2023 (09:19)
## By: Thomas Alexander Gerds
## Update #: 250
##----------------------------------------------------------------------
##
### Commentary:
##
### Change Log:
##----------------------------------------------------------------------
##
### Code:
## * discreteRoot - Documentation
#' @title Dichotomic search for monotone function
#' @description Find the root of a monotone function on a discrete grid of value using dichotomic search
#' @name discreteRoot
#'
#' @param fn [function] objective function to minimize in absolute value.
#' @param grid [vector] possible minimizers.
#' @param increasing [logical] is the function fn increasing?
#' @param check [logical] should the program check that fn takes a different sign for the first vs. the last value of the grid?
#' @param tol [numeric] the absolute convergence tolerance.
## * discreteRoot
#' @rdname dicreteRoot
#' @export
discreteRoot <- function(fn, grid, increasing = TRUE, check = TRUE,
tol = .Machine$double.eps ^ 0.5) {
n.grid <- length(grid)
value.grid <- rep(NA, n.grid)
iter <- 1
ncv <- TRUE
iSet <- 1:n.grid
factor <- c(-1,1)[increasing+1]
### ** Check
if(check){
value.grid[1] <- fn(grid[1])
value.grid[n.grid] <- fn(grid[n.grid])
if(sign(value.grid[1])==value.grid[n.grid]){
return(list(par = NA,
value = NA,
counts = 0,
cv = 1,
message = "Cannot find a solution because the function does not change sign \n"))
}
if(increasing[[1]] && value.grid[[1]] > value.grid[[n.grid]]){
return(list(par = NA,
value = NA,
counts = 0,
cv = 1,
message = "Cannot find a solution - argument \'increasing\' does not match the variations of the functions \n"))
}
if(!increasing[[1]] && value.grid[[1]] < value.grid[[n.grid]]){
return(list(par = NA,
value = NA,
counts = 0,
cv = 1,
message = "Cannot find a solution - argument \'increasing\' does not match the variations of the functions \n"))
}
}
### ** Expore the grid using dichotomic search
while(iter[[1]] <= n.grid[[1]] && ncv[[1]]==TRUE && length(iSet)>0){
iMiddle <- ceiling(length(iSet)/2)
iIndexInSet <- iSet[iMiddle]
if(check[[1]]==FALSE || iIndexInSet %in% c(1,n.grid) == FALSE){
## if the current index we are looking at has not already been computed,
## then evaluate the objective function.
## this is only the case when check is TRUE and we look at the borders
value.grid[iIndexInSet] <- fn(grid[iIndexInSet])
}
if(is.na(value.grid[iIndexInSet])){
## handle NA value by just removing the observation from the set of possibilities
iSet <- setdiff(iSet,iMiddle)
iter <- iter + 1
}else if(factor*value.grid[iIndexInSet] > tol){
## look in subgrid corresponding to the lowest values (left part)
iSet <- iSet[setdiff(1:iMiddle,iMiddle)]
iter <- iter + 1
}else if(factor*value.grid[iIndexInSet] < -tol){
## look in subgrid corresponding to the largest values (right part)
iN.set <- length(iSet)
iSet <- iSet[setdiff(iMiddle:iN.set,iMiddle)]
iter <- iter + 1
}else{
## convergence
ncv <- FALSE
solution <- grid[iIndexInSet]
value <- value.grid[iIndexInSet]
}
}
### ** If did not find a value whose image matched tol, give the closest solution
if(ncv){
iIndexInSet <- which.min(abs(value.grid))
ncv <- FALSE
solution <- grid[iIndexInSet]
value <- value.grid[iIndexInSet]
}
return(list(par = solution,
value = value,
## grid = setNames(value.grid,grid),
counts = iter,
cv = ncv,
message = NULL))
}
## * boot2pvalue - Documentation
#' @title Compute the p.value from the distribution under H1
#' @description Compute the p.value associated with the estimated statistic
#' using a bootstrap sample of its distribution under H1.
#'
#' @param x [numeric vector] a vector of bootstrap estimates of the statistic.
#' @param null [numeric] value of the statistic under the null hypothesis.
#' @param estimate [numeric] the estimated statistic.
#' @param FUN.ci [function] the function used to compute the confidence interval.
#' Must take \code{x}, \code{alternative}, \code{conf.level} and \code{sign.estimate} as arguments
#' and only return the relevant limit (either upper or lower) of the confidence interval.
#' @param alternative [character] a character string specifying the alternative hypothesis, must be one of "two.sided" (default), "greater" or "less".
#' @param tol [numeric] the absolute convergence tolerance.
#' @details
#' For test statistic close to 0, this function returns 1. \cr \cr
#'
#' For positive test statistic, this function search the quantile alpha such that:
#'\itemize{
#' \item \code{quantile(x, probs = alpha)=0} when the argument alternative is set to \code{"greater"}.
#' \item \code{quantile(x, probs = 0.5*alpha)=0} when the argument alternative is set to \code{"two.sided"}.
#' }
#' If the argument alternative is set to \code{"less"}, it returns 1. \cr \cr
#'
#' For negative test statistic, this function search the quantile alpha such that:
#' \itemize{
#' \item \code{quantile(x, probs = 1-alpha=0} when the argument alternative is set to \code{"less"}.
#' \item \code{quantile(x, probs = 1-0.5*alpha=0} when the argument alternative is set to \code{"two.sided"}.
#' }
#' If the argument alternative is set to \code{"greater"}, it returns 1.
#'
#' @examples
#' set.seed(10)
#'
#' #### no effect ####
#' x <- rnorm(1e3)
#' boot2pvalue(x, null = 0, estimate = mean(x), alternative = "two.sided")
#' ## expected value of 1
#' boot2pvalue(x, null = 0, estimate = mean(x), alternative = "greater")
#' ## expected value of 0.5
#' boot2pvalue(x, null = 0, estimate = mean(x), alternative = "less")
#' ## expected value of 0.5
#'
#' #### positive effect ####
#' x <- rnorm(1e3, mean = 1)
#' boot2pvalue(x, null = 0, estimate = 1, alternative = "two.sided")
#' ## expected value of 0.32 = 2*pnorm(q = 0, mean = -1) = 2*mean(x<=0)
#' boot2pvalue(x, null = 0, estimate = 1, alternative = "greater")
#' ## expected value of 0.16 = pnorm(q = 0, mean = 1) = mean(x<=0)
#' boot2pvalue(x, null = 0, estimate = 1, alternative = "less")
#' ## expected value of 0.84 = 1-pnorm(q = 0, mean = 1) = mean(x>=0)
#'
#' #### negative effect ####
#' x <- rnorm(1e3, mean = -1)
#' boot2pvalue(x, null = 0, estimate = -1, alternative = "two.sided")
#' ## expected value of 0.32 = 2*(1-pnorm(q = 0, mean = -1)) = 2*mean(x>=0)
#' boot2pvalue(x, null = 0, estimate = -1, alternative = "greater")
#' ## expected value of 0.84 = pnorm(q = 0, mean = -1) = mean(x<=0)
#' boot2pvalue(x, null = 0, estimate = -1, alternative = "less") # pnorm(q = 0, mean = -1)
#' ## expected value of 0.16 = 1-pnorm(q = 0, mean = -1) = mean(x>=0)
## * boot2pvalue
#' @rdname boot2pvalue
#' @export
boot2pvalue <- function(x, null, estimate = NULL, alternative = "two.sided",
FUN.ci = quantileCI,
tol = .Machine$double.eps ^ 0.5){
x.boot <- na.omit(x)
n.boot <- length(x.boot)
statistic.boot <- mean(x.boot) - null
if(is.null(estimate)){
statistic <- statistic.boot
}else{
statistic <- estimate - null
if(sign(statistic.boot)!=sign(statistic)){
warning("the estimate and the average bootstrap estimate do not have same sign \n")
}
}
sign.statistic <- statistic>=0
if(abs(statistic) < tol){ ## too small test statistic
p.value <- 1
}else if(n.boot < 10){ ## too few bootstrap samples
p.value <- as.numeric(NA)
}else if(all(x.boot>null)){ ## clear p.value
p.value <- switch(alternative,
"two.sided" = 0,
"less" = 1,
"greater" = 0)
} else if(all(x.boot<null)){ ## clear p.value
p.value <- switch(alternative,
"two.sided" = 0,
"less" = 0,
"greater" = 1)
}else{ ## need search to obtain p.value
## when the p.value=1-coverage increases, does the quantile increases?
increasing <- switch(alternative,
"two.sided" = sign.statistic,
"less" = FALSE,
"greater" = TRUE)
## grid of confidence level
grid <- seq(0,by=1/n.boot,length.out=n.boot)
## search for critical confidence level
resSearch <- discreteRoot(fn = function(p.value){
CI <- FUN.ci(x = x.boot,
p.value = p.value,
alternative = alternative,
sign.estimate = sign.statistic)
return(CI[1]-null)
},
grid = grid,
increasing = increasing,
check = FALSE)
## check change sign
sign.before <- sign(FUN.ci(x = x.boot,
p.value = max(0,resSearch$par-1/n.boot),
alternative = alternative,
sign.estimate = sign.statistic)-null)
sign.after <- sign(FUN.ci(x = x.boot,
p.value = min(1,resSearch$par+1/n.boot),
alternative = alternative,
sign.estimate = sign.statistic)-null)
##
if (is.na(resSearch$value[[1]]) || is.na(sign.before[[1]])|| is.na(sign.after[[1]]) || length(resSearch$value)==0
|| resSearch$par[[1]]<0 || resSearch$par[[1]]>1 || sign.before[[1]]==sign.after[[1]]){
warning("incorrect convergence of the algorithm finding the critical quantile \n",
"p-value may not be reliable \n")
}
p.value <- resSearch$par
}
if(p.value %in% c(0,1)){
message("Estimated p-value of ",p.value," - consider increasing the number of bootstrap samples \n")
}
return(p.value)
}
## * quantileCI
quantileCI <- function(x, alternative, p.value, sign.estimate, ...){
probs <- switch(alternative,
"two.sided" = c(p.value/2,1-p.value/2)[2-sign.estimate], ## if positive p.value/2 otherwise 1-p.value/2
"less" = 1-p.value,
"greater" = p.value)
return(quantile(x, probs = probs)[1])
}
##----------------------------------------------------------------------
### discreteRoot.R ends here
|
64a00298bab31a9d4175ab0825c9045d0b7c6ea7
|
1f092c22a5746af10ce574af15002d53881c6ef7
|
/man/plotObject.Rd
|
9d1e24bd8c7e8ca7b638aaeb51b3539ed558dc75
|
[] |
no_license
|
cran/microsamplingDesign
|
50ce4ca2e1439049c20733194c5962e0e3b696c8
|
49a02faf102dfc9abef4d34bbdd7041a251f64f8
|
refs/heads/master
| 2021-10-27T09:12:27.351890
| 2021-10-13T12:52:04
| 2021-10-13T12:52:04
| 131,901,288
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,621
|
rd
|
plotObject.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aaaGenerics.R, R/objectPkModel.R
\docType{methods}
\name{plotObject}
\alias{plotObject}
\alias{plotObject,PkModel-method}
\alias{plotObject,PkData-method}
\title{generic function to plot an object}
\usage{
plotObject(object, ...)
\S4method{plotObject}{PkModel}(object, times, nCurves = 12,
nSamplesIntegration = 1000, seed = 134, sampleCurvesOnly = FALSE,
indSamplingPoints = FALSE)
\S4method{plotObject}{PkData}(object, nCurves = NULL,
nSamplesIntegration = 1000, sampleCurvesOnly = TRUE, seed = NULL,
indSamplingPoints = TRUE, addZeroIsZero = FALSE)
}
\arguments{
\item{object}{a S4 class object}
\item{...}{additional parameters}
\item{times}{numeric vector at of times at which the model should be simulated for \code{\link{PkModel-class}}}
\item{nCurves}{the number of sample curves defaults to 12 for \code{\link{PkModel-class}} ,
if \code{\link{PkData-class}} defaults to \code{NULL} meaning all data are plotted}
\item{nSamplesIntegration}{number of simulated curves to calculate averaged curve, defaults to 1000}
\item{seed}{specify the random seed to draw samples to get the same plot each time}
\item{sampleCurvesOnly}{logical value if \code{TRUE} only sample curves are displayed and the averaged curve omitted ,
defaults to \code{FALSE} for \code{\link{PkModel-class}} and \code{TRUE} for \code{\link{PkData-class}}}
\item{indSamplingPoints}{logical indicator if \code{TRUE} sample times are indicated on the plot,
defaults to \code{FALSE} for \code{\link{PkModel-class}} and \code{TRUE} for \code{\link{PkData-class}}}
\item{addZeroIsZero}{logical value, when \code{TRUE} the zero point is added
to the plot with value zero ( only for \code{\link{PkData-class}} , defaults to \code{FALSE} )}
}
\description{
generic function to plot an object
}
\examples{
\dontrun{
# examples with limited number of samples, increase samples in practice
plotObject( object = getExamplePkModel() ,
times = seq( 0 , 10 , 1 ) , nSamplesIntegration = 25 )
plotObject( object = getExamplePkModel() ,
times = seq( 0 , 10 , 1 ) , nCurves = 3 , nSamplesIntegration = 5 )
plotObject( object = getExamplePkModel() ,
times = seq( 0 , 10 , 1 ) , nCurves = 3 , sampleCurvesOnly = TRUE )
}
\dontrun{
pkData <- getPkData( getExamplePkModel() , 1:10 , 5 , 10 )
plotObject( object = pkData )
plotObject( object = pkData , nCurves = 2 )
plotObject( object = pkData , nCurves = 2 , addZeroIsZero = TRUE )
plotObject( object = pkData , nCurves = 3 ,
sampleCurvesOnly = FALSE , nSamplesIntegration = 25 )
}
}
|
516b1bae0664094a80c8af900a7f180b32dbc942
|
1a0e05d066f277b87e0b87b0a96af7cf8888d209
|
/R/fucks.r
|
8d1f4621e381f9d67fadd55d3ea1d9f0b7a171bf
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
wrathematics/idgaf
|
0213db40e06f155c597948982af094ba1d69c810
|
a2de7e8f0b9386d12332a3f786e66309847d7015
|
refs/heads/master
| 2021-01-24T06:30:20.196068
| 2014-07-30T00:17:49
| 2014-07-30T00:17:49
| 22,273,867
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 644
|
r
|
fucks.r
|
nfucks <- function(user, repo)
{
if (missing(user) && missing(repo))
stop("At least one of 'user' or 'repo' must be supplied.")
else if (missing(user))
{
tabled <- sort(table(fuck$repo), decreasing=TRUE)
query <- repo
}
else if (missing(repo))
{
tabled <- sort(table(fuck$user), decreasing=TRUE)
query <- user
}
else
{
fuckers <- paste(fuck$user, "/", fuck$repo, sep="")
tabled <- table(fuckers)
query <- paste(user, "/", repo, sep="")
}
ind <- which(names(tabled) == query)
ret <- tabled[ind]
names(ret) <- NULL
if (length(ret) == 0)
return(0)
else
return(ret)
}
|
aec3db730e43c8c7be11493e99402f4f64d9bc38
|
bd23162e4b8c3c779557160a774bffb765adce86
|
/shiny/repo.R
|
696687e96e657f627b21071b051b27fb2120dfb1
|
[
"MIT"
] |
permissive
|
ktmud/github-life
|
a8ab2ee91c85c2a62a348f6764742dcf1b00c338
|
421e46f9832879bb8c81d8731d3524ef20fc3065
|
refs/heads/master
| 2021-01-19T22:24:48.671526
| 2017-11-11T18:50:26
| 2017-11-11T18:50:26
| 88,812,727
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,672
|
r
|
repo.R
|
#
# Get Repository details
#
library(cacher)
if (!exists("cache")) {
cache <- LRUcache("100mb")
}
.cached <- function(prefix, FUN) {
return(function(repo, ...) {
if (is.null(repo)) return()
repo <- as.character(repo)
key <- str_c(prefix, "__", repo)
if (cache$exists(key)) {
return(cache$get(key))
}
ret <- FUN(repo, ...)
cache$set(key, ret)
ret
})
}
RepoStats <- .cached("stats", function(repo,
col_name = "commits",
group_others = TRUE) {
dat <- db_get(
sprintf(
"
SELECT week, author, %s FROM g_contributors
WHERE repo = %s
",
dbQuoteIdentifier(db$con, col_name),
dbQuoteString(db$con, repo)
)
)
if (nrow(dat) > 0) {
dat <- dat %>%
.[, c("week", "author", col_name)]
names(dat) <- c("week", "author", "count")
dat <- CollapseOthers(dat, "author", "count")
dat$week <- as.Date(dat$week)
} else {
# give an empty row
dat <- data.frame()
}
dat %>% FillEmptyWeeks()
})
CollapseOthers <-
function(dat,
keycol = "author",
valcol = "count",
keep_n = 5,
others = "<i>others</i>") {
dat$keycol <- dat[[keycol]]
dat$valcol <- dat[[valcol]]
top_author <- dat %>%
group_by(keycol) %>%
summarize(n = sum(valcol)) %>%
# we need this diff rate to filter out
# very small contributors
mutate(p = n / max(n)) %>%
arrange(desc(n)) %>%
filter(p > .1) %>%
head(keep_n)
dat.top <- dat %>%
filter(keycol %in% top_author$keycol)
dat <- dat %>%
filter(!(keycol %in% top_author$keycol)) %>%
group_by(week) %>%
# count of others authors
summarise(valcol = sum(valcol)) %>%
mutate(keycol = "<i>others</i>") %>%
bind_rows(dat.top, .)
dat[[keycol]] <- dat$keycol
dat[[valcol]] <- dat$valcol
dat$keycol <- NULL
dat$valcol <- NULL
dat
}
RepoIssues <- .cached("issues", function(repo) {
db_get(
sprintf(
"
SELECT
`repo`,
DATE(SUBDATE(SUBDATE(`created_at`, WEEKDAY(`created_at`)), 1)) AS `week`,
count(*) as `n_issues`
FROM `g_issues`
WHERE `repo` = %s
GROUP BY `week`
",
dbQuoteString(db$con, repo)
)
) %>% FillEmptyWeeks()
})
RepoIssueEvents <- .cached("issue_events", function(repo) {
db_get(
sprintf(
"
SELECT
`repo`,
DATE(SUBDATE(SUBDATE(`created_at`, WEEKDAY(`created_at`)), 1)) AS `week`,
`event`,
count(*) as `count`
FROM `g_issue_events`
WHERE `repo` = %s
GROUP BY `week`, `event`
",
dbQuoteString(db$con, repo)
)
) %>% CollapseOthers("event", keep_n = 6)
})
RepoStargazers <- .cached("stargazers", function(repo) {
dat <- db_get(sprintf("
SELECT
`repo`,
DATE(SUBDATE(SUBDATE(`starred_at`, WEEKDAY(`starred_at`)), 1)) AS `week`,
count(*) as `n_stargazers`
FROM `g_stargazers`
WHERE `repo` = %s
GROUP BY `week`
", dbQuoteString(db$con, repo))) %>%
FillEmptyWeeks()
})
PlotRepoTimeline <- function(repo) {
if (is.null(repo) || repo == "") {
return(EmptyPlot())
}
if (!(repo %in% repo_choices$repo)) {
return(EmptyPlot("No data found! :("))
}
issues <- RepoIssues(repo)
repo_stats <- RepoStats(repo)
stargazers <- RepoStargazers(repo)
# remove first week of data because for some large repositories
# the first week of data can be very very large,
# often screws the whole time line
if (nrow(stargazers) > 10) {
stargazers <- stargazers[-1, ]
issues <- issues[-1, ]
}
if (nrow(repo_stats) + nrow(issues) + nrow(stargazers) < 1) {
return(EmptyPlot("No data found! :("))
}
p <- plot_ly(repo_stats, x = ~week, y = ~count, opacity = 0.6,
color = ~author, type = "bar")
if (nrow(issues) > 0) {
p %<>% add_lines(data = issues, x = ~week, y = ~n_issues,
opacity = 1,
mode = "lines+markers",
# line = list(width = 1),
name = "<b>issues</b>", color = I("#28A845"))
}
if (nrow(stargazers) > 0) {
diffrate <- mean(issues$n_issues) / mean(stargazers$n_stargazers)
stargazers %<>%
mutate(star_scaled = n_stargazers * diffrate)
p %<>% add_lines(data = stargazers, x = ~week, y = ~star_scaled,
opacity = 1,
visible = "legendonly",
mode = "lines+markers",
# line = list(width = 1),
name = "<b>stars (scaled)</b>", color = I("#fb8532"))
}
mindate <- min(issues$week, repo_stats$week,
stargazers$week, na.rm = TRUE)
maxdate <- max(issues$week, repo_stats$week,
stargazers$week, na.rm = TRUE)
p %<>% layout(
barmode = "stack",
yaxis = list(title = "Count"),
xaxis = list(
title = "Week",
rangemode = "nonnegetive",
rangeselector = RangeSelector(repo_stats$week)
))
p
}
PlotRepoIssueEventsTimeline <- function(repo) {
if (is.null(repo) || repo == "") {
return(EmptyPlot(""))
}
if (!(repo %in% repo_choices$repo)) {
return(EmptyPlot(""))
}
repo_stats <- RepoStats(repo)
events <- RepoIssueEvents(repo) %>%
FillEmptyWeeks(mindate = min(repo_stats$week), max(repo_stats$week))
p <- plot_ly(
events,
x = ~ week,
y = ~ count,
color = ~ event,
type = "bar"
)
p %<>% layout(
barmode = "stack",
yaxis = list(title = "Count"),
xaxis = list(
title = "Week",
rangeselector = RangeSelector(events$week)
))
p
}
GetRepoDetails <- .cached("repo", function(repo) {
if (is.null(repo) || !str_detect(repo, ".+/.+")) return()
tmp <- str_split(repo, "/") %>% unlist()
dat <- db_get(sprintf(
"SELECT * from `g_repo`
WHERE `owner_login` = '%s' and `name` = '%s'"
, tmp[1], tmp[2]))
if (is.null(dat) || nrow(dat) < 1) {
return(tibble(repo = repo, exists = FALSE))
}
dat$repo <- repo
dat$exists <- TRUE
dat
})
RenderRepoDetails <- function(d) {
if (is.null(d)) {
return(
div(class = "repo-detail-placeholder",
"Please select a repository from the dropdown on the left.",
tags$br(),
"You may also type to search."
)
)
}
if (!d$exists) {
return(
div(class = "repo-detail-placeholder",
"Could not found data for this repository.",
tags$br(),
"Either it doesn't exists or we didn't scrape it yet."
)
)
}
div(
id = str_c("repo-detail-", d$id),
class = "repo-details",
div(
class = "desc",
tags$a(
class = "to-github",
target = "_blank",
href = str_c("http://github.com/", d$repo),
icon("external-link")
),
d$description
),
tags$ul(
class = "list-inline time-points",
tags$li(
tags$span("Created at"),
tags$strong(d$created_at)
),
tags$li(
tags$span("last updated at"),
tags$strong(d$updated_at)
),
tags$li(
tags$span("last pushed at"),
tags$span(d$pushed_at)
)
)
)
}
RenderRepoMeta <- function(d) {
if (is.null(d) || !d$exists) return()
tags$ul(
class = "list-inline repo-meta",
tags$li(
style = "width:6em",
tags$span(d$lang)
),
tags$li(
icon("star"),
tags$strong(fmt(d$stargazers_count)),
tags$span("stars")
),
tags$li(
icon("code-fork"),
tags$strong(fmt(d$forks_count)),
tags$span("forks")
)
)
}
|
9b89fcc582cd09415ef97795bd0216c848cd6e53
|
80e0469caa7900baaa6262b3b652e40db7088160
|
/complete_data_analysis/hourly_plots_functions.R
|
b9711fe0f92a9dbd0e2f36c9cdc0a70210cb5665
|
[] |
no_license
|
diegoleonbarido/flexbox-data-dump-analysis
|
8821e7bf897f5debbcaac0074bef46f15e3f5eb7
|
d895385a977fb2db3e7798856d92703fced5598f
|
refs/heads/master
| 2020-12-11T21:15:56.567109
| 2017-08-15T22:49:30
| 2017-08-15T22:49:30
| 39,653,179
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,240
|
r
|
hourly_plots_functions.R
|
#Daily plots
#Hourly aggregation of daily plots
hourly.plotting <- function(data){
data <- data[!duplicated(data$time_stamp),]
data <- date.vars.simple(data)
#Creating the unique ID and merging with the original data set so that all unique hours
#Comment out for now
#house_Power_sum <- aggregate(house_analysis$house_Power,list(house_analysis$hour,house_analysis$date),FUN=sum) %>% mutate(hour = Group.1,date=Group.2,power=x) %>% select(hour,date, power)
return(house_analysis)
}
#Second-by-second aggregation of daily plots
second.plotting <- function(data){
data <- data[!duplicated(data$time_stamp),]
data <- date.vars.simple(data) %>% select(time_stamp,house_Power,house_Energy,second,minute,hour)
#Creating the unique ID and merging with the original data set so that all pair 'hour - minute' values have a unique id
unique_day_combo <- unique(data[c("second","hour", "minute")])
unique_day_combo_ordered <- unique_day_combo[order(unique_day_combo$hour,unique_day_combo$minute,unique_day_combo$second),]
unique_day_combo_ordered$id <- c(1:length(unique_day_combo_ordered$hour))
unique_day_merge <- join(data,unique_day_combo_ordered,type='left',match='all')
return(unique_day_merge)
}
|
95904d5149cf1305b3da568f7eb0849aa7f94231
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/fExoticOptions/examples/LookbackOptions.Rd.R
|
bbdf69971c2f3946742e5db93432927ff9f14fd0
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,467
|
r
|
LookbackOptions.Rd.R
|
library(fExoticOptions)
### Name: LookbackOptions
### Title: Valuation of Lookback Options
### Aliases: LookbackOptions FloatingStrikeLookbackOption
### FixedStrikeLookbackOption PTFloatingStrikeLookbackOption
### PTFixedStrikeLookbackOption ExtremeSpreadOption
### Keywords: math
### ** Examples
## Examples from Chapter 2.9 in E.G. Haug's Option Guide (1997)
## Floating Strike Lookback Option [2.9.1]:
FloatingStrikeLookbackOption(TypeFlag = "c", S = 120,
SMinOrMax = 100, Time = 0.5, r = 0.10, b = 0.10-0.06,
sigma = 0.30)
## Fixed Strike Lookback Option [2.9.2]:
FixedStrikeLookbackOption(TypeFlag = "c", S = 100,
SMinOrMax = 100, X = 105, Time = 0.5, r = 0.10, b = 0.10,
sigma = 0.30)
## Partial Time Floating Strike Lookback Option [2.9.3]:
PTFloatingStrikeLookbackOption(TypeFlag = "p", S = 90,
SMinOrMax = 90, time1 = 0.5, Time2 = 1, r = 0.06, b = 0.06,
sigma = 0.20, lambda = 1)
## Partial Time Fixed Strike Lookback Option [2.9.4]:
PTFixedStrikeLookbackOption(TypeFlag = "c", S = 100, X = 90,
time1 = 0.5, Time2 = 1, r = 0.06, b = 0.06, sigma = 0.20)
## Extreme Spread Option [2.9.5]:
ExtremeSpreadOption(TypeFlag = "c", S = 100, SMin = NA,
SMax = 110, time1 = 0.5, Time2 = 1, r = 0.1, b = 0.1,
sigma = 0.30)
ExtremeSpreadOption(TypeFlag = "cr", S = 100, SMin = 90,
SMax = NA, time1 = 0.5, Time2 = 1, r = 0.1, b = 0.1,
sigma = 0.30)
|
3a86d0d35ce05d4e038c0a54be8f6bc37865531a
|
c459dd32d88158cb064c3af2bc2ea8c7ab77c667
|
/individual_sample/archives/examine_normal_PCA.20200131.v1.R
|
a4e77f688763596857cd7bb4a131d3d6a48de272
|
[] |
no_license
|
ding-lab/ccRCC_snRNA_analysis
|
d06b8af60717779671debe3632cad744467a9668
|
ac852b3209d2479a199aa96eed3096db0b5c66f4
|
refs/heads/master
| 2023-06-21T15:57:54.088257
| 2023-06-09T20:41:56
| 2023-06-09T20:41:56
| 203,657,413
| 6
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,241
|
r
|
examine_normal_PCA.20200131.v1.R
|
# Yige Wu @WashU Jan. 2020
## For the cell type assignment of the normal sample
# source ------------------------------------------------------------------
setwd(dir = "~/Box/")
source("./Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/ccRCC_snRNA_analysis/ccRCC_snRNA_shared.R")
# set run id ----------------------------------------------------------
version_tmp <- 1
run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp)
# set aliquot id ----------------------------------------------------------
snRNA_aliquot_id_tmp <- "CPT0075170013"
# input seurat processing summary ------------------------------------------------
seurat_summary <- fread(input = "./Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/Resources/snRNA_Processed_Data/scRNA_auto/summary/ccRCC_snRNA_Downstream_Processing - Seurat_Preprocessing.20200116.v1.tsv", data.table = F)
seurat_summary2process <- seurat_summary %>%
filter(Cellranger_reference_type == "pre-mRNA") %>%
filter(Proceed_for_downstream == "Yes") %>%
filter(FACS == "") %>%
filter(Aliquot %in% snRNA_aliquot_id_tmp) %>%
mutate(Path_seurat_object = paste0("./Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/Resources/snRNA_Processed_Data/scRNA_auto/outputs/", Aliquot, FACS,
"/pf", `pre-filter.low.nCount`, "_fmin", low.nFeautre, "_fmax", high.nFeautre, "_cmin", low.nCount, "_cmax", high.nCount, "_mito_max", high.percent.mito,
"/", Aliquot, FACS, "_processed.rds")) %>%
mutate(Path_deg_table = paste0("./Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/Resources/snRNA_Processed_Data/scRNA_auto/outputs/", Aliquot, FACS,
"/pf", `pre-filter.low.nCount`, "_fmin", low.nFeautre, "_fmax", high.nFeautre, "_cmin", low.nCount, "_cmax", high.nCount, "_mito_max", high.percent.mito,
"/", Aliquot, FACS, ".DEGs.Pos.txt"))
# input marker table ------------------------------------------------------
gene2cellType_tab <- fread(input = "./Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/Resources/Kidney_Markers/RCC_marker_gene_table_and_literature_review - Gene2CellType_Tab_w.HumanProteinAtlas.20200131.v1.tsv")
# set output directory ----------------------------------------------------
dir_out <- paste0(makeOutDir(), run_id, "/")
dir.create(dir_out)
# input seurat object -----------------------------------------------------
seurat_obj_path <- seurat_summary2process$Path_seurat_object[seurat_summary2process$Aliquot == snRNA_aliquot_id_tmp]
seurat_obj <- readRDS(file = seurat_obj_path)
# Re-do the scaling to include the cell type marker genes -----------------
all.genes <- rownames(seurat_obj)
seurat_obj <- ScaleData(seurat_obj, verbose = F, features = all.genes)
# run the PCA
genes_for_pca <- unique(c(seurat_obj@assays$RNA@var.features, gene2cellType_tab$Gene))
length(genes_for_pca) # 2140
seurat_obj <- RunPCA(seurat_obj, do.print = F, features = genes_for_pca)
# Warning messages:
# 1: In PrepDR(object = object, features = features, verbose = verbose) :
# The following 3 features requested have not been scaled (running reduction without them): LINC01272, GPX1, RP11-1143G9.4
# 2: In PrepDR(object = object, features = features, verbose = verbose) :
# The following 28 features requested have zero variance (running reduction without them): ATP6V1G3, CLDN8, AVPR2, CD79B, TCL1A, VPREB3, AHSP, ALAS2, HBA1, HBA2, HBB, HBD, HBE1, PRDX2, TPSAB1, TPSB2, CXCR3, ENHO, MS4A7, CTSD, CTSG, KLRB1, XCL1, XCL2, GZMK, CD3D, SEMA3G, SFRP2
# cluster
seurat_obj <- FindNeighbors(object = seurat_obj, dims = 30)
seurat_obj <- FindClusters(object = seurat_obj, resolution = 0.5)
# run UMAP
seurat_obj <- RunUMAP(seurat_obj, dims = 30)
# save object so far
saveRDS(seurat_obj,file = paste0(dir_out, snRNA_aliquot_id_tmp, "_processed.rds", sep=""))
p <- DimPlot(object = seurat_obj, label = TRUE) + NoLegend()
p
dim(seurat_obj@assays$RNA@scale.data)
dim(seurat_obj@assays$RNA@data)
head(seurat_obj@assays$RNA@data)
# identify differentially expressed cell type marker genes ----------------
## input DEG
deg_tab_path <- seurat_summary2process$Path_deg_table[seurat_summary2process$Aliquot == snRNA_aliquot_id_tmp]
deg_tab_path
deg_tab <- fread(input = deg_tab_path, data.table = F)
cluster_tmp <- 3
cluster_degs <- deg_tab$gene[deg_tab$cluster == cluster_tmp & deg_tab$p_val_adj < 0.1]
## cannot find much cell type marker genes within the DEGs
gene2cellType_cluster3 <- gene2cellType_tab %>%
filter(Gene %in% cluster_degs)
# get cluster-defining genes---------------------------------------
marker_genes <- FindAllMarkers(object = seurat_obj, test.use = "roc", only.pos = T, return.thresh = 0.5)
cluster_tmp <- 11
cluster_markers <- marker_genes %>%
filter(cluster == cluster_tmp) %>%
filter(gene %in% gene2cellType_tab$Gene)
## write as excel table
list_DEGs_by_cluster <- list()
# list_DEGs_by_cluster[["README"]] <- cluster2celltype_tab_tmp
for (i in unique(marker_genes$cluster)) {
df2write <- marker_genes %>%
filter(cluster == i) %>%
filter(power > 0)
df2write$Cell_Type_Group <- mapvalues(x = ifelse(df2write$gene %in% gene2cellType_tab$Gene, df2write$gene, NA), from = gene2cellType_tab$Gene, to = gene2cellType_tab$Cell_Type_Group)
df2write$Cell_Type1 <- mapvalues(x = ifelse(df2write$gene %in% gene2cellType_tab$Gene, df2write$gene, NA), from = gene2cellType_tab$Gene, to = gene2cellType_tab$Cell_Type1)
df2write$Cell_Type2 <- mapvalues(x = ifelse(df2write$gene %in% gene2cellType_tab$Gene, df2write$gene, NA), from = gene2cellType_tab$Gene, to = gene2cellType_tab$Cell_Type2)
df2write$Cell_Type3 <- mapvalues(x = ifelse(df2write$gene %in% gene2cellType_tab$Gene, df2write$gene, NA), from = gene2cellType_tab$Gene, to = gene2cellType_tab$Cell_Type3)
list_DEGs_by_cluster[[as.character(i)]] <- df2write
}
file2write <- paste0(dir_out, snRNA_aliquot_id_tmp, ".AllCluster.ROCTestMarkers.Pos.", run_id, ".xlsx")
write.xlsx(list_DEGs_by_cluster, file = file2write)
# Dotplot -----------------------------------------------------------------
genes2plot <- gene2cellType_tab$Gene
length(genes2plot)
# genes2plot <- intersect(rownames(seurat_obj@assays$RNA@scale.data), genes2plot)
# length(genes2plot)
p <- DotPlot(object = seurat_obj, features = genes2plot, col.min = 0)
p$data$gene_cell_type_group <- plyr::mapvalues(p$data$features.plot, from = gene2cellType_tab$Gene, to = gene2cellType_tab$Cell_Type_Group)
p$data$gene_cell_type1 <- plyr::mapvalues(p$data$features.plot, from = gene2cellType_tab$Gene, to = gene2cellType_tab$Cell_Type1)
p <- p + RotatedAxis()
p <- p + facet_grid(.~gene_cell_type_group + gene_cell_type1, scales = "free", space = "free", drop = T)
p <- p + theme(panel.spacing = unit(0, "lines"),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"),
strip.text.x = element_text(angle = 90, vjust = 0.5, size = 10),
axis.text.x = element_text(size = 9),
strip.placement = "outside")
file2write <- paste0(dir_out, snRNA_aliquot_id_tmp,".Individual_Clustered.Dotplot.CellTypeMarkers.", run_id, ".png")
png(file = file2write, width = 4000, height = 2000, res = 150)
print(p)
dev.off()
|
dbcdae7ab7be0a57bc5288b12f6ec77043144daa
|
99c3c6d5859f87b0e9c057c2b0e9079ff239ca2b
|
/man/checkdata_twolevels.Rd
|
0beafe9a60d64d6b43af04da3cb6572f0aa4177f
|
[
"MIT"
] |
permissive
|
HansonMenghan/glottospace
|
bd54f5cf6dbc42d15964e474b6ee6138b6119822
|
65a5284c22273a14da0a4e6dbe8e1c1d226c097b
|
refs/heads/main
| 2023-08-20T21:23:32.611883
| 2021-10-13T12:49:28
| 2021-10-13T12:49:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 687
|
rd
|
checkdata_twolevels.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glottocheck.R
\name{checkdata_twolevels}
\alias{checkdata_twolevels}
\title{Check whether each variable has at least two levels (excluding NA).}
\usage{
checkdata_twolevels(data)
}
\arguments{
\item{data}{}
}
\value{
Besides diagnostic messages, this function invisibly returns TRUE if check is passed (all IDs are unique) and FALSE otherwise
}
\description{
This function checks whether each variable has at least two levels (excluding NA). Use function checkall_varlevels to get an overview of the levels in each variable.
}
\examples{
suppressMessages(checkall_twolevels(data = data))
}
\keyword{internal}
|
438216d4aa425a0408bb258d7262968925a628a2
|
b5b47ebc15c385b1a6bb1fb6fd12352a028fbf87
|
/clustering.R
|
20149d41c30d28c54736c9a05658e7b01dee569a
|
[] |
no_license
|
KayandTheBlack/DataMiningOverDiabetics
|
e3803e05d1bfb509c3e2aa8cde3a9fd47f98500d
|
ed069bfa2d7c448e9facf8d7dc9d12e2eadadce1
|
refs/heads/master
| 2020-03-29T09:58:45.414431
| 2018-12-29T17:18:48
| 2018-12-29T17:18:48
| 149,783,642
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,423
|
r
|
clustering.R
|
base_path <- "C:/Users/danie/Documents/MD/diab/DataMiningOverDiabetics"
setwd(file.path(base_path))
diabetic_data <- read.csv("processed_data.csv", na.strings = c("?"))
names(diabetic_data)
dim(diabetic_data)
summary(diabetic_data)
attach(diabetic_data)
#hierarchical clustering
library(cluster)
#install.packages("fpc")
library(fpc)
#dissimilarity matrix
actives<-c(4:11,13:28,30) #exclude row identifiers, non significant variables and response variable
dissimMatrix <- daisy(diabetic_data[,actives], metric = "gower", stand=TRUE)
distMatrix<-dissimMatrix^2
h1 <- hclust(distMatrix,method="ward.D")
K<-10 #we want to see 10 partitions
CHIndexes <- array(dim=10)
Silhouettes <- array(dim=10)
for (k in 2:K) {
ck <- cutree(h1,k)
stats <- cluster.stats(distMatrix, ck)
CHIndexes[k] <- stats$ch
Silhouettes[k] <- stats$avg.silwidth
}
plot(CHIndexes, type="o", xlab="Number of clusters", ylab="CH index")
plot(Silhouettes, type="o", xlab="Number of clusters", ylab="Average silhouette")
#The number of clusters is the max of CH indexes and Silhouette (excluding the 2 clusters partition)
n_clusters = 4
c1 <- cutree(h1,n_clusters)
plot(h1, labels = FALSE)
rect.hclust(h1, k = n_clusters)
#insert again the response variable
dcon <- data.frame (race, gender, age, weight, adm_type_id, disch_id, adm_source_id, time_in_hpt, specialty, n_lab_proc, n_proc, n_med, n_outp, n_emerg, n_inp, diag_1, diag_2, diag_3, A1Cresult, metformin, insulin, change, diabetesMed, readmitted, other_meds)
png("all_vars_pairs.png", width=20, height=20, units="in", res=500)
pairs(dcon[,1:25], col=c1)
dev.off()
dcon <- data.frame (age, n_lab_proc, n_med, time_in_hpt, n_outp, disch_id)
png("some_vars_pairs2.png", width=20, height=20, units="in", res=500)
pairs(dcon[,1:6], col=c1)
dev.off()
plot(n_med, n_lab_proc,col=c1,main="Clustering of credit data in 3 classes")
plot(n_med, age,col=c1,main="Clustering of credit data in 3 classes")
plot(n_med, time_in_hpt,col=c1,main="Clustering of credit data in 3 classes")
plot(weight, adm_type_id,col=c1,main="Clustering of credit data in 3 classes")
plot(n_med, disch_id,col=c1,main="Clustering of credit data in 3 classes")
#trying to display the discrete variables as continuous to avoid problems
for (row in 1:nrow(diabetic_data)) {
diabetic_data[row, "disch_id"] <- diabetic_data[row, "disch_id"] + runif(1, -1.0, 1.0)
#disch_id <- disch_id + runif(1, -1.0, 1.0)
}
|
499742c611b1b2301e53406bff30f6f9c074e442
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/dkanr/examples/dkanr_setup.Rd.R
|
bf0ab65ec25ae95dbb10a242a96ebeb6030e5e8f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 555
|
r
|
dkanr_setup.Rd.R
|
library(dkanr)
### Name: dkanr_setup
### Title: Configure default DKAN settings
### Aliases: dkanr_setup
### ** Examples
## Not run:
##D DKAN users without admin/editor privileges could run:
##D dkanr_setup(url = "http://demo.getdkan.com")
##D
##D Privileged DKAN editor/admin users can run:
##D dkanr_setup(url = "http://demo.getdkan.com", username = "your-username", password = "your-password")
##D
##D Not specifying the default DKAN URL will reset the DKAN URL to its default
##D "http://demo.getdkan.com":
##D dkanr_setup()
## End(Not run)
|
558945309ff7cfc594d7539b67c3d943aa9239d9
|
3ab8001ca2e877c291618ae4782beff6ab41fe00
|
/man/get_otp_cipher.Rd
|
d56a2272a4340f30f099fccaf4ae62b5da0af45f
|
[] |
no_license
|
dads2busy/safr
|
595969c739c5b192d8a6fa438e4c86113b6f533f
|
707597be2cfcb534559fa0a5581391f972be4d7d
|
refs/heads/master
| 2021-05-06T20:56:02.720737
| 2017-11-30T21:29:09
| 2017-11-30T21:29:09
| 112,429,722
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 602
|
rd
|
get_otp_cipher.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/otp.R
\name{get_otp_cipher}
\alias{get_otp_cipher}
\title{Create a Cipher of a Message using a One-Time Pad.}
\usage{
get_otp_cipher(message_str, otp_str,
alphanum_str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
}
\arguments{
\item{message_str}{A string to be ciphered.}
\item{otp_str}{One-time pad (string)}
\item{alphanum_str}{Optional. The alphanumeric string to be used creating the message cipher.}
}
\description{
Create a Cipher of a Message using a One-Time Pad.
}
\examples{
get_otp_cipher("Talley", get_otp())
}
|
2247fc2368daa46c72591a37eeb027a3ffea96b5
|
01f88e5f43662eafda366fa289ac6e47ebb039d4
|
/R/03_augment.R
|
24f6a632143e7560a553f62190e644d59481af81
|
[] |
no_license
|
rforbiodatascience21/2021_group_13_final_project
|
8c5b687cacde9ac28b5bee7eae4950964f3711af
|
0a7d6a160331b23d0d354e0e9ab31de1568f8ad9
|
refs/heads/main
| 2023-04-20T16:36:24.063131
| 2021-05-10T21:57:43
| 2021-05-10T21:57:43
| 361,680,463
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,236
|
r
|
03_augment.R
|
# Clear workspace --------------------------------------------------------------
#rm(list = ls())
# Load libraries ---------------------------------------------------------------
library("tidyverse")
# Define functions -------------------------------------------------------------
source(file = "R/99_project_functions.R")
# Load data --------------------------------------------------------------------
# Patient_1
patient_1 <- read_csv(file = "Data/02_patient_1I.csv.gz")
patient_1 <- patient_1 %>%
mutate(Patient_ID = "025I",
.before = Cell_Barcode) %>%
select(Patient_ID:ncol(patient_1)) %>%
filter(Cell_Barcode != "X1")
# Patient_2 ---------------------------
patient_2 <- read_csv(file = "Data/02_patient_2C.csv.gz")
patient_2 <- patient_2 %>%
mutate(Patient_ID = "465C",
.before = Cell_Barcode) %>%
select(Patient_ID:ncol(patient_2)) %>%
filter(Cell_Barcode != "X1")
# Patient_3 ---------------------------
patient_3 <- read_csv(file = "Data/02_patient_3C.csv.gz")
patient_3 <- patient_3 %>%
mutate(Patient_ID = "003C",
.before = Cell_Barcode) %>%
select(Patient_ID:ncol(patient_3)) %>%
filter(Cell_Barcode != "X1")
# Patient_4 ---------------------------
patient_4 <- read_csv(file = "Data/02_patient_4CO.csv.gz")
patient_4 <- patient_4 %>%
mutate(Patient_ID = "207CO",
.before = Cell_Barcode) %>%
select(Patient_ID:ncol(patient_4)) %>%
filter(Cell_Barcode != "X1")
# Patient_5 ---------------------------
patient_5 <- read_csv(file = "Data/02_patient_5CO.csv.gz")
patient_5 <- patient_5 %>%
mutate(Patient_ID = "056CO",
.before = Cell_Barcode)%>%
select(Patient_ID:ncol(patient_5)) %>%
filter(Cell_Barcode != "X1")
# Patient_6 ---------------------------
patient_6 <- read_csv(file = "Data/02_patient_6I.csv.gz")
patient_6 <- patient_6 %>%
mutate(Patient_ID = "177I",
.before = Cell_Barcode) %>%
select(Patient_ID:ncol(patient_6)) %>%
filter(Cell_Barcode != "X1")
# Combining patients------------------------------------------------------------
data <- bind_rows(
patient_1,
patient_2,
patient_3,
patient_4,
patient_5,
patient_6)
# Replacing NAs (gene not present in the patient but in another) by 0
data <- data %>%
mutate(across(everything(),
~replace_na(.x,
0))) %>%
select(-starts_with("ENSG"))
# Clearing environment
rm(patient_1,
patient_2,
patient_3,
patient_4,
patient_5,
patient_6)
# Loading metadata -------------------------------------------------------------
metadata <- read_tsv(
"Data/_raw/GSE136831_AllCells.Samples.CellType.MetadataTable.txt") %>%
tibble() %>%
select(CellBarcode_Identity,
nUMI,
nGene,
CellType_Category,
Subclass_Cell_Identity,
Subject_Identity) %>%
rename(Patient_ID = Subject_Identity,
Cell_Barcode = CellBarcode_Identity) %>%
filter(Patient_ID == "025I" |
Patient_ID == "465C" |
Patient_ID == "003C" |
Patient_ID == "207CO"|
Patient_ID == "056CO"|
Patient_ID == "177I") %>%
mutate(Cell_Barcode = str_replace_all(Cell_Barcode,
"(.+_)(.+)",
"\\2"))
# Join metadata and patient data------------------------------------------------
data <- left_join(data,
metadata,
by = c("Patient_ID",
"Cell_Barcode")) %>%
relocate("nGene",
"nUMI",
"CellType_Category",
"Subclass_Cell_Identity",
.after = "Cell_Barcode")
# Introduce group label and make that and Patient_ID factors
data <-
data %>%
mutate(group =
factor(
case_when(
str_detect(Patient_ID,"I") ~ "IPF",
str_detect(Patient_ID,"CO") ~ "COPD",
TRUE ~ "Control")),
.after = Patient_ID,
Patient_ID = factor(Patient_ID))
rm(metadata)
# Now there are quite some NAs as the metadata apparently contains only cells
# filtered by the authors.
# We will filter further to second-check this filtering. Thus if it works well,
# the NAs will mostly be gone
# Wrangle data -----------------------------------------------------------------
# Combine the patients' gene expression to the metadata data of the cells.
# Run patient_slicer, meta_slicer, combiner and binder
# If there are cells with fewer than 2000 transcripts recorded, these cells are
# filtered out from the 10000 starting cell count per patient
data <- data %>%
filter(nUMI > 2000)
# Filtering out the cells where the transcripts of the mitochondrial genes
# represent more than 20% of the sum of the transcripts for a cell
# Run mito_filter
mt_selection <- select(data,
starts_with("MT"))
mt_sum <- mt_selection %>%
mutate(mito_sum = rowSums(mt_selection))
data <- data %>%
mutate(select(mt_sum,
mito_sum),
.after = Cell_Barcode) %>%
filter(mito_sum / nUMI < 0.2)
# Now check if there are any NAs remaining
data %>%
filter(
across(.cols = everything(),
.fns = ~ is.na(.x)))
# --> no NAs left
#Breaking the complete data set into patient groups
Control <- data%>%
filter(Patient_ID !="025I")%>%
filter(Patient_ID !="207CO")%>%
filter(Patient_ID !="056CO")%>%
filter(Patient_ID !="177I")
COPD <- data%>%
filter(Patient_ID !="025I")%>%
filter(Patient_ID !="465C")%>%
filter(Patient_ID !="003C")%>%
filter(Patient_ID !="177I")
IPF <- data%>%
filter(Patient_ID !="465C")%>%
filter(Patient_ID !="207CO")%>%
filter(Patient_ID !="056CO")%>%
filter(Patient_ID !="003C")
# Write data -------------------------------------------------------------------
write_csv(data,
file = "Data/03_data.csv")
write_csv(Control,
file = "Data/03_Control.csv")
write_csv(COPD,
file = "Data/03_COPD.csv")
write_csv(IPF,
file = "Data/03_IPF.csv")
# Remove Data ------------------------------------------------------------------
rm(data,
mt_selection,
mt_sum)
|
f2f09c203ab110cbe67a7bafb05f2da7d748d1a5
|
0e6977d8b1f917d01edceae9b90ac55b9d06252c
|
/man/ivqr_eg.Rd
|
8f192dbacfff8475d55333498cb114b8b66a92a4
|
[] |
no_license
|
geyh96/IVQR
|
0abb7f13307ceaa5b649a7ea2d14b46612180537
|
f36b124faa732bb5b51d817c12a5371329b75fe1
|
refs/heads/master
| 2023-03-16T05:47:35.389263
| 2018-09-25T18:03:11
| 2018-09-25T18:03:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 897
|
rd
|
ivqr_eg.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{ivqr_eg}
\alias{ivqr_eg}
\title{An example data created by simulation.}
\format{A data frame with 10000 rows and 4 variables:
\describe{
\item{y}{outcome variable}
\item{d}{binary endogenous variable}
\item{z}{binary instrumental variable}
\item{x}{control variable}
...
}}
\usage{
ivqr_eg
}
\description{
A dataset illustrating the usage of the IVQR pacakge.
}
\examples{
# The data generation process
sim_ivqr_eg <- function(n = 10 ^ 4){
u <- runif(n)
x <- rbinom(n, 1,0.2)
z <- rbinom(n, 1, 0.37)
v <- rnorm(n)
d <- z * (u > 0.5 * v)
y0 <- 0 + x * 2 + qnorm(u,0,1)
y1 <- (u - 0.5) + x * 2 + qnorm(u,0,1)
y <- d * y1 + (1 - d) * y0
value <- list()
value$y <- y
value$d <- d
value$z <- z
value$x <- x
value <- data.frame(value)
return(value)
}
}
\keyword{datasets}
|
6452e281da310f11b4cefc4fc45852d7f24275b5
|
fe7fe7daaae1aefa393c4a6c1cc6da06f8c419ae
|
/man/rich_html_document.Rd
|
6af9e7b785bd6191cbc5033399baf38ca5ef04b2
|
[
"MIT"
] |
permissive
|
atusy/tokyor85down
|
cd9a43c6e05f5ccd5c549016e898511a460c1694
|
1344a77ca46ee598a0a676b0ec564ede2215c4ca
|
refs/heads/master
| 2022-08-22T11:20:15.238366
| 2020-05-23T01:21:00
| 2020-05-23T01:21:00
| 266,152,970
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,500
|
rd
|
rich_html_document.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rich-html-document.R
\name{rich_html_document}
\alias{rich_html_document}
\title{リッチなrmarkdown::html_document}
\usage{
rich_html_document(
toc = TRUE,
toc_float = TRUE,
number_sections = TRUE,
code_folding = "show",
code_download = TRUE,
...
)
}
\arguments{
\item{toc}{\code{TRUE} to include a table of contents in the output}
\item{toc_float}{\code{TRUE} to float the table of contents to the left of the
main document content. Rather than \code{TRUE} you may also pass a list of
options that control the behavior of the floating table of contents. See the
\emph{Floating Table of Contents} section below for details.}
\item{number_sections}{\code{TRUE} to number section headings}
\item{code_folding}{Enable document readers to toggle the display of R code
chunks. Specify \code{"none"} to display all code chunks (assuming
they were knit with \code{echo = TRUE}). Specify \code{"hide"} to hide all R
code chunks by default (users can show hidden code chunks either
individually or document-wide). Specify \code{"show"} to show all R code
chunks by default.}
\item{code_download}{Embed the Rmd source code within the document and provide
a link that can be used by readers to download the code.}
\item{...}{\code{rmarkdown::html_document}に渡す引数。}
}
\description{
\code{toc}, \code{toc_float}, \code{numbber_sections}, \code{code_folding}, \code{code_download}を
有効にした。
}
|
ba1b49d013fe9926c0c474e774f30f7bfda69036
|
edd86f6c445ada652ec641deff0535c17a35c3b5
|
/R/WriteExcel.R
|
c687be6c78577366bf4ca4dad591ca0747e7cc1a
|
[] |
no_license
|
zhezhangsh/DEGandMore
|
7bb1231f93c6fdb37224e86e63f793b434efbc7b
|
27fb1b256185f2f79c9f86e1e372a1ca68693b9d
|
refs/heads/master
| 2022-10-17T19:47:22.258491
| 2022-09-20T03:10:56
| 2022-09-20T03:10:56
| 35,493,825
| 8
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,751
|
r
|
WriteExcel.R
|
# Write one or more data matrixes to an Excel file
WriteExcel<-function(values, fileName=paste(Sys.Date(), 'R2Excel', sep='_'), sheetNames=names(values), type='xlsx', settings=rep(list(), length(values)), verbose=TRUE) {
# values a list of data.frame or matrix
# fileName the prefix of output file
# sheetNames the names of individual worksheets
# settings a list of extra formatting parameters used by WriteExcelSheet, each element of the list corresponds to a worksheet
# example: rep(list(row.names=FALSE, format=data.frame(ind=3:5, fmt=c('0', '0.00', '0.0E+0'))), 5)
options(java.parameters = "-Xmx102400m");
library(xlsx);
#.jinit(classpath="myClasses.jar", parameters="-Xmx512m");
# errorproof for sheet names
if (length(sheetNames)<length(values)) sheetNames[(length(sheetNames)+1):length(values)]<-(length(sheetNames)+1):length(values);
# errorproofing for sheet format setting
if (length(settings)<length(values)) settings[(length(settings)+1):length(values)]<-lapply((length(settings)+1):length(values), function(i) list());
# errorproof for file extension
ext<-paste('\\.', type, '$', sep='');
if (regexpr(ext, fileName, ignore.case=T)==-1) fileName<-paste(fileName, '.', type, sep='');
wb<-createWorkbook(type=type);
#parameters<-lapply(1:length(values), function(i, wb, v, nm, st) append(list(wb=wb, values=v[[i]], name=nm[i]), st[[i]]), wb=wb, v=values, nm=as.character(sheetNames), st=settings);
for (i in 1:length(values)) {
do.call(WriteExcelSheet, append(list(wb=wb, values=values[[i]], name=as.character(sheetNames[i])), settings[[i]]));
if (verbose) print(paste('Create worksheet:', sheetNames[i]));
try(getSheets(wb), silent=TRUE); # this is a dummy step due to a possible Java bug, as every time the Workbook was modified, the access to this object returns an error
}
saveWorkbook(wb, file=fileName);
} # end of function WriteExcel
####################################################################################################
# Create and format a worksheet in a given Excel Workbook
# Takes some time if the data matrix is large (about 1 min for a 10000X8 matrix)
WriteExcelSheet<-function(wb, values, name, col.names=TRUE, row.names=TRUE, id.name='ID',
zoom=125, freezeRow=1, freezeCol=0, fontName='Arial', format=data.frame(ind=0, format='')) {
# wb the Workbook of the Excel file
# values a data.frame or matrix to write
# name name of the worksheet
# col.names whether to write column names
# row.names whether to write row names
# zoom adjust the zooming size
# freezeRow where to put the horizontal "freeze pane" bar, no freezing if 0
# freezeCol where to put the vertical "freeze pane" bar, no freezing if 0
# fontName name of the character font
# format cell format, such as '0.0000', '0.0E+00' and '0.00%'. A data frame with 2 fields, the first is the column index and the second is the format name
library(xlsx);
if(row.names) numCol=ncol(values)+1 else numCol=ncol(values);
if(col.names) numRow=nrow(values)+1 else numRow=nrow(values);
colStart<-numCol-ncol(values)+1;
rowStart<-numRow-nrow(values)+1;
header<-colnames(values);
font<-Font(wb, name=fontName);
format<-format[format[,1]>0&format[,1]<=numCol, ];
fmt<-rep('', ncol(values));
fmt[format[,1]]<-as.vector(format[,2]);
styles<-lapply(fmt, function(fmt, wb, font) CellStyle(wb, font=font, dataFormat=DataFormat(fmt)), wb=wb, font=font);
sh<-createSheet(wb, sheetName=name);
cells<-createCell(createRow(sh, rowIndex=1:numRow), colIndex=1:numCol);
assign<-function(c, v, s=CellStyle(wb)) {
sapply(1:length(c), function(i, c, v, s) {setCellStyle(c[[i]], s); setCellValue(c[[i]], v[i]); c[[i]]}, c=c, v=as.vector(v), s=s)
};
# write column names
if (col.names) {
if (row.names) header<-c(id.name, header);
#cells[1,]<-assign(cells[1,], header, CellStyle(wb, font=Font(wb, name=fontName, isBold=TRUE)));
assign(cells[1,], header, CellStyle(wb, font=Font(wb, name=fontName, isBold=TRUE)));
}
# write row names
#if (row.names) cells[rowStart:numRow,1]<-assign(cells[rowStart:numRow,1], rownames(values), CellStyle(wb, font=font));
assign(cells[rowStart:numRow,1], rownames(values), CellStyle(wb, font=font));
# assign values to cells
#cells[rowStart:numRow, colStart:numCol]<-
# sapply(1:ncol(values), function(i, c, v, s) assign(c[, i], v[, i], s[[i]]), c=cells[rowStart:numRow, colStart:numCol], v=values, s=styles);
sapply(1:ncol(values), function(i, c, v, s) if (class(c)[1]=='matrix') assign(c[, i], v[, i], s[[i]]) else assign(c[i], v[, i], s[[i]]),
c=cells[rowStart:numRow, colStart:numCol], v=values, s=styles);
setZoom(sh, numerator=zoom, denominator=100);
if (freezeRow>0|freezeCol>0) createFreezePane(sh, rowSplit=freezeRow+1, colSplit=freezeCol+1);
autoSizeColumn(sh, 1);
}
|
3c58e357a133b8073df2404acf44ba417ed89549
|
88c18faabe83ce2c3a07a13791b3e6026619518f
|
/R/GO_enrichment.R
|
3088ebaf69ea34e26d2f76b78934114e5b176991
|
[] |
no_license
|
sq-96/heart_atlas
|
fd98edc9b305f1ab6fa5d327fe9c9034f4c1114b
|
3deed4c3d382072ccfd78d43459d1b53d93eff3f
|
refs/heads/master
| 2023-06-25T13:26:05.273996
| 2021-07-29T20:35:20
| 2021-07-29T20:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,176
|
r
|
GO_enrichment.R
|
library(ArchR)
library(clusterProfiler)
library(tidyverse)
require(org.Hs.eg.db)
require(plyranges)
setwd('/project2/gca/aselewa/heart_atlas_project/')
source('R/analysis_utils.R')
# load a bunch of stuff
satac <- suppressMessages(loadArchRProject('ArchR/ArchR_heart_latest_noAtrium/'))
peaks <- getPeakSet(satac)
peaks$peakID <- GRToString(peaks)
peak.markers <- readRDS('ArchR/ArchR_heart_latest_noAtrium/PeakCalls/DA_MARKERS_FDRP_1_log2FC_1.rds')
other.ranges <- readRDS('ArchR/ArchR_heart_latest_noAtrium/CoAccessibility/Coacc_ENHANCERS_AllCellTypes_overlapCutoff50_k200_corr_cut_-1_maxDist_1Mb_hg38.gr.rds')
other.ranges <- other.ranges[other.ranges$correlation > 0.5,]
#annotate all peaks with DA test results
celltype_ideal_order <- c("Cardiomyocyte","Smooth Muscle","Pericyte","Endothelial","Fibroblast","Neuronal", "Lymphoid","Myeloid")
peak.markers <- lapply(celltype_ideal_order, function(x){peak.markers[[x]]})
names(peak.markers) <- celltype_ideal_order
peak.markers.str <- unlist(lapply(peak.markers, function(x){GRToString(x)}), use.names = F)
peaks.celltypes <- data.frame(peakID=peak.markers.str, cellType = factor(rep(names(peak.markers), lengths(peak.markers)), levels = names(peak.markers)))
peak.info.df <- peaks %>% as_tibble() %>% left_join(., peaks.celltypes, on = "peakID")
levels(peak.info.df$cellType) <- c(levels(peak.info.df$cellType), "Shared")
peak.info.df$cellType[is.na(peak.info.df$cellType)] <- "Shared"
# GO
enrich.res <- list()
for(ct in celltype_ideal_order){
curr <- peak.info.df[peak.info.df$cellType == ct,]
curr.coacc.genes <- other.ranges$coacc_gene_name[other.ranges$peakID %in% curr$peakID]
enrich.res[[ct]] <- enrichGO(gene = unique(curr.coacc.genes),
OrgDb = org.Hs.eg.db::org.Hs.eg.db,
keyType = "SYMBOL",
ont = "ALL",
qvalueCutoff = 0.05)
}
saveRDS(enrich.res, file = 'misc/enrichGO_Coaccessible_Genes_results.rds')
enrich.res <- readRDS('misc/enrichGO_Coaccessible_Genes_results.rds')
enrich.res <- enrich.res[c(1,4,5,8)]
# Add fold-change results
for (ct in names(enrich.res)){
enrich_df <- enrich.res[[ct]]@result
tg_ratio <- enrich_df %>% pull(GeneRatio)
tg_mat <- do.call(rbind, strsplit(tg_ratio, split = "/"))
enrich_df$tg_1 <- as.numeric(tg_mat[, 1])
enrich_df$tg_2 <- as.numeric(tg_mat[, 2])
bg_ratio <- enrich_df %>% pull(BgRatio)
bg_mat <- do.call(rbind, strsplit(bg_ratio, split = "/"))
enrich_df$bg_1 <- as.numeric(bg_mat[, 1])
enrich_df$bg_2 <- as.numeric(bg_mat[, 2])
enrich.res[[ct]]@result <- enrich_df %>% mutate(FoldChange = (tg_1/tg_2) / (bg_1/bg_2))
}
# arrange by fold change, pvalue and select relevant columns
top.GO.ids <- list()
for (ct in names(enrich.res)){
top.GO.ids[[ct]] <- enrich.res[[ct]]@result %>%
filter(ONTOLOGY == "BP") %>%
arrange(-FoldChange, pvalue) %>% distinct(geneID, .keep_all = T) %>%
head(200) %>%
dplyr::select(ONTOLOGY, ID, Description, FoldChange, pvalue, qvalue, geneID) %>%
mutate(celltype = ct)
}
for (ct in names(enrich.res)){
interest.GO.ids <- top.GO.ids[[ct]]$ID
top.qvalue.mat <- matrix(nrow = length(interest.GO.ids),
ncol = length(enrich.res))
rownames(top.qvalue.mat) <- interest.GO.ids
colnames(top.qvalue.mat) <- names(enrich.res)
for (ct2 in names(enrich.res)){
match.indx <- match(interest.GO.ids, enrich.res[[ct2]]$ID)
top.qvalue.mat[, ct2] <- enrich.res[[ct2]]$FoldChange[match.indx] #
}
top.qvalue.mat[is.na(top.qvalue.mat)] <- 1
neg.log10.qvalue.df <- reshape2::melt(top.qvalue.mat, value.name = "FC") %>% #
rename(GO_term = Var1, Cell_type = Var2)
plot_out <- ggplot(neg.log10.qvalue.df,
aes(x = Cell_type, y = GO_term, size = FC, color = Cell_type)) + #
geom_point() +
labs(title = paste("Top GO BP terms in", ct)) +
ggClean()
print(plot_out)
}
top.GO.df <- do.call(rbind, top.GO.ids) %>% distinct(ID, Description)
# interest.GO.ids <- c("GO:0060047", "GO:0030048", "GO:0007160", "GO:0043542", "GO:0030199", "GO:0010001", "GO:0030098", "GO:0042119") # By pvalue
#interest.GO.ids <- c("GO:0055003", "GO:1905065", "GO:0038166", "GO:0060312", "GO:0030199", "GO:0099560", "GO:0043379", "GO:0002281") # By fold change
#interest.GO.ids <- c("GO:0031033", "GO:0030049","GO:0060837","GO:0060055","GO:1901201","GO:0001941","GO:0002291","GO:0001780")
interest.GO.ids <- c(top.GO.ids$Cardiomyocyte$ID[c(9,11,14,20,21)], top.GO.ids$Endothelial$ID[c(6,13,15,38,82)], top.GO.ids$Fibroblast$ID[c(19,27,29,30,37)],
top.GO.ids$Myeloid$ID[c(2,13,19,22,26)])
interest.GO.terms <- top.GO.df$Description[match(interest.GO.ids, top.GO.df$ID)]
top.qvalue.mat <- matrix(nrow = length(interest.GO.ids),
ncol = length(enrich.res))
rownames(top.qvalue.mat) <- interest.GO.terms
colnames(top.qvalue.mat) <- names(enrich.res)
top.FC.mat <- top.qvalue.mat
for (ct in names(enrich.res)){
match.indx <- match(interest.GO.ids, enrich.res[[ct]]$ID)
top.FC.mat[, ct] <- enrich.res[[ct]]$FoldChange[match.indx] #
top.qvalue.mat[, ct] <- -log10(enrich.res[[ct]]$qvalue[match.indx])
}
top.qvalue.mat[is.na(top.qvalue.mat)] <- 0
neg.log10.qvalue.df <- reshape2::melt(top.qvalue.mat, value.name = "mlogqval") %>% #
rename(GO_term = Var1, Cell_type = Var2)
top.FC.mat[is.na(top.FC.mat)] <- 0.1
qval.foldchange.df <- reshape2::melt(top.FC.mat, value.name = "FC") %>% #
rename(GO_term = Var1, Cell_type = Var2) %>% left_join(., neg.log10.qvalue.df) %>%
mutate(GO_term = factor(GO_term, levels = rev(unique(interest.GO.terms))))
pdf('manuscript_figures/figure3/Fig3_EnrichGO_Coaccess.pdf', width=12, height=9)
ggplot(qval.foldchange.df) +
geom_point(aes(x = Cell_type, y = GO_term, size = FC, color = mlogqval)) +
scale_size_continuous(range = c(1, 10)) +
ggClean(rotate_axis = T) +
scale_color_gradientn(colors = c('navy',"yellow",'red')) +
geom_hline(yintercept = c(5.5,10.5,15.5), linetype='dashed')
dev.off()
# enrichGO analysis of high pip genes
gene.map <- read_csv('GWAS/aFib_Finemapped_GenePIP_0.1_ActiveProm_07222021.csv')
high.pip.genes <- unique(gene.map$gene_name[gene.map$gene_pip > 0.5])
gene.map.enrich <- enrichGO(gene = high.pip.genes,
OrgDb = org.Hs.eg.db::org.Hs.eg.db,
keyType = "SYMBOL",
ont = "ALL",
qvalueCutoff = 0.05)
enrich_df <- gene.map.enrich@result
tg_ratio <- enrich_df %>% pull(GeneRatio)
tg_mat <- do.call(rbind, strsplit(tg_ratio, split = "/"))
enrich_df$tg_1 <- as.numeric(tg_mat[, 1])
enrich_df$tg_2 <- as.numeric(tg_mat[, 2])
bg_ratio <- enrich_df %>% pull(BgRatio)
bg_mat <- do.call(rbind, strsplit(bg_ratio, split = "/"))
enrich_df$bg_1 <- as.numeric(bg_mat[, 1])
enrich_df$bg_2 <- as.numeric(bg_mat[, 2])
gene.map.enrich@result <- enrich_df %>% mutate(FoldChange = (tg_1/tg_2) / (bg_1/bg_2))
saveRDS(gene.map.enrich, file = 'misc/gene_mapping_enrich_GO.rds')
go.res <- gene.map.enrich@result %>% arrange(-FoldChange) %>% distinct(geneID, .keep_all = T)
top.bp.go <- go.res %>% filter(ONTOLOGY == "BP") %>% slice(1:10)
top.mf.go <- go.res %>% filter(ONTOLOGY == "MF") %>% slice(1:10)
top.bp.go$Description <- factor(top.bp.go$Description, levels = rev(top.bp.go$Description))
top.mf.go$Description <- factor(top.mf.go$Description, levels = rev(top.mf.go$Description))
pdf('manuscript_figures/GWAS_bp_genes_GOenrich.pdf',width=12,height=8)
ggplot(top.bp.go, aes(x = FoldChange, y = Description)) +
geom_bar(stat='identity', fill="grey") +
scale_size_continuous(range = c(1, 10)) +
ggClean(rotate_axis = T) +
xlab('Enrichment')
dev.off()
pdf('manuscript_figures/GWAS_mf_genes_GOenrich.pdf',width=16,height=8)
ggplot(top.mf.go, aes(x = FoldChange, y = Description)) +
geom_bar(stat='identity', fill = "slategray1") +
scale_size_continuous(range = c(1, 10)) +
ggClean(rotate_axis = T) +
xlab('Enrichment')
dev.off()
go.res %>% arrange(-FoldChange) %>% write_csv('High_PIP_Genes_GO_Results.csv')
|
2277981abaf7e15fc928dd5faef795faabba302a
|
f8ecf9a6c5729b7eab34bf8fe73f358e63058aba
|
/데이터 분석/1. 데이터 조작 및 EDA.R
|
755cefd8f3c046eec5c92cbe80807533c2001fe2
|
[] |
no_license
|
Sehee99/R-Study
|
edefb9e80e0532a39792219d85598968b564dd40
|
227a86648d6e2cb138f031e1dd27e7ec0582c9ab
|
refs/heads/main
| 2023-07-13T05:44:11.114530
| 2021-08-31T11:29:05
| 2021-08-31T11:29:05
| 398,975,880
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,708
|
r
|
1. 데이터 조작 및 EDA.R
|
### 1 데이터 대표값 탐색
## 1.1 평균과 중앙값
# 평균
A_salary <- c(25, 28, 50, 60, 30, 35, 40, 70, 40, 70, 40, 100, 30, 30) # 백만원 단위
B_salary <- c(20, 40, 25, 25, 35, 25, 20, 10, 55, 65, 100, 100, 150, 300)
mean(A_salary)
mean(B_salary)
# 결측값(NA)이 있는 경우 결측값을 제거하고 평균을 구할 때는 na.rm = T 인자를 사용
mean(A_salary, na.rm = T)
# 중앙값
median(A_salary)
median(B_salary)
# 결측값(NA)이 있는 경우 결측값을 제거하고 중앙값을 구할 때는 na.rm = T 인자를 사용
median(A_salary, na.rm = T)
## 1.2 절사평균
mean(A_salary, trim = 0.1) # 양끝 10%씩 값을 제외하고 평균을 구함함
mean(B_salary, trim = 0.1)
### 2 데이터 분산도 탐색
## 2.1 최소값, 최대값으로 범위 탐색
# 범위: range()
range(A_salary)
range(B_salary)
# 최소값: min(), 최대값: max()
min(A_salary)
max(A_salary)
min(B_salary)
max(B_salary)
## 2.2 분산과 표준편차
# 분산
var(A_salary)
var(B_salary)
# 표준편차
sd(A_salary)
sd(B_salary)
### 3 데이터 분포 탐색
## 3.1 백분위수와 사분위수
# 90% 백분위수
quantile(A_salary, 0.9)
quantile(B_salary, 0.9)
# 사분위수
quantile(A_salary)
quantile(B_salary)
## 3.2 상자그림
boxplot(A_salary, B_salary, names = c("A회사 salary", "B회사 salary"))
## 3.3 히스토그램
hist(A_salary, xlab = "A사 salary", ylab = "인원수, break =5")
hist(B_salary, xlab = "B사 salary", ylab = "인원수, break =5")
## 3.4 도수분포표
# 수치 데이터 -> 도수분포표 생성시 cut() 함수
cut_value <- cut(A_salary, breaks = 5)
freq <- table(cut_value)
freq
# 범주형 데이터 -> table() 함수로 도수분포표 생성
A_gender <- as.factor(c('남', '남', '남', '남', '남', '남', '남', '남', '남', '여', '여', '여', '여', '여'))
B_gender <- as.factor(c('남', '남', '남', '남', '여', '여', '여', '여', '여', '여', '여', '남', '여', '여'))
A <- data.frame(gender <- A_gender, salary <- A_salary)
B <- data.frame(gender <- B_gender, salary <- B_gender)
freqA <- table(A$gender)
freqA
freqB <- table(B$gender)
freqB
# 상대적 빈도표
# A사의 남녀 도수분포표를 구해 저장한 freqA를 이용
prop.table(freqA)
# B사의 남녀 도수분포표를 구해 저장한 freqB를 이용
prop.table(freqB)
## 3.5 막대 그래프
# A사의 남녀 도수분포표를 구해 저장한 freqA를 이용
barplot(freqA, names = c("남", "여"), col = c("skyblue", "pink"), ylim = c(0, 10))
title(main = "A사")
# B사의 남녀 도수분포표를 구해 저장한 freqB를 이용
barplot(freqB, names = c("남", "여"), col = c("skyblue", "pink"), ylim = c(0, 10))
title(main = "B사")
## 3.6 파이 그래프
pie(x = freqA, col = c("skyblue", "pink"), main = "A사")
pie(x = freqB, col = c("skyblue", "pink"), main = "B사")
### 4 변수 간 관계 탐색
## 4.1 산점도 그래프
A_salary <- c(25, 28, 50, 60, 30, 35, 40, 70, 40, 70, 40, 100, 30, 30) # 연봉 변수
A_hireyears <- c(1, 1, 5, 6, 3, 3, 4, 7, 4, 7, 4, 10, 3, 3) # 근무년차 변수
A <- data.frame(salary <- A_salary, hireyears <- A_hireyears)
# 산점도 그래프
plot(A$hireyears, A$salary, xlab = "근무년수", ylab = "연봉(백만원단위)")
# pairs() 함수: 여러가지 변수의 산점도 그래프를 한눈에 볼 수 있도록 작성
pairs(iris[, 1:4], main = "iris data")
## 4.2 상관계수
cor(A$hireyears, A$salary)
## 4.3 상관행렬
cor(iris[, 1:4])
## 4.4 상관행렬 히트맵
heatmap(cor(iris[, 1:4]))
|
5e49b6a0593e4261dc1239521fa2793677619005
|
58c13145c7f371e6f9e9e82c03396f8b5589ef1b
|
/Sim_a.R
|
74d45a370e2bff1bcadc608a852c81610d48fa00
|
[] |
no_license
|
TIANHUI8/Dissertation_Simulations
|
2fdba84221353fcde2cb50a037d38e26d2b74715
|
6ce291d0c62fdf41c17c9d9f08dd876497cc8597
|
refs/heads/master
| 2020-03-22T11:44:55.449328
| 2018-07-12T16:57:11
| 2018-07-12T16:57:11
| 139,993,475
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,130
|
r
|
Sim_a.R
|
library(glmnet)
library(BGLR)
sims = 4 #number of replication
p = 1000 # number of markers
n = c(10, 45, 100) #number of individual
lenn = length(n)
h = c(200, 100, 0) #number of misimputated marker
lenh = length(h)
nIter = 12000; burnIn = 2000
beta.true = c(0.25, 0.5, 0.75, 1, 2, 5, rep(0, p-6))
p.cutoff = c(1:p)*0.05/p
#beta.cutoff = 0.01563455
beta.cutoff = 0.03086782
r = c(0.8, 0.4)
lenr = length(r)
fp1 = fp2 = fp3 = matrix(0, sims, 243)
tp1 = tp2 = tp3 = matrix(0, sims, 243)
fdr1 = fdr2 = fdr3 = matrix(0, sims, 243)
fpr1 = fpr2 = fpr3 = matrix(0, sims, 243)
tpr1 = tpr2 = tpr3 = matrix(0, sims, 243)
mse1 = mse2 = mse3 = matrix(0, sims, 243)
nmv1 = nmv2 = nmv3 = matrix(0, sims, 243)
s2 = s3 = matrix(0, sims, 243)
q = runif(p, min = 0, max = 1)
for(s in 1:sims){
count = 1
for(i in 1:lenn){
for(j in 1:lenh){
for(l in 1:3){ # loop over cor_x
if(l == 3){
X = t(matrix(rbinom(n[i]*p, 2, rep(q, n[i])), ncol = n[i]))
}
else{
Z1 = rnorm(n[i], 0, 1)
Z2 = r[l]*Z1 + rnorm(n[i], 0, sd = sqrt(1 - r[l]^2))
Z.1 = c()
for(k in 1:n[i]){
if(Z1[k] < -0.67){Z.1[k] = 0}
else if(Z1[k] >= -0.67 & Z1[k] <= 0.67){Z.1[k] = 1}
else{Z.1[k] <- 2}
}
Z.2 = c()
for(k in 1:n[i]){
if(Z2[k] < -0.67){Z.2[k] <- 0}
else if(Z2[k] >= -0.67 & Z2[k] <= 0.67){Z.2[k] = 1}
else{Z.2[k] <- 2}
}
Z3 = rnorm(n[i], 0, 1)
Z4 = r[l]*Z3 + rnorm(n[i], 0, sd = sqrt(1-r[l]^2))
Z.3 = c()
for(k in 1:n[i]){
if(Z3[k] < -0.67){Z.3[k] <- 0}
else if(Z3[k] >= -0.67 & Z3[k] <= 0.67){Z.3[k] <- 1}
else{Z.3[k] <- 2}
}
Z.4 = c()
for(k in 1:n[i]){
if(Z4[k] < -0.67){Z.4[k] <- 0}
else if(Z4[k] >= -0.67 & Z4[k] <= 0.67){Z.4[k] <- 1}
else{Z.4[k] <- 2}
}
Z5 = rnorm(n[i], 0, 1)
Z6 = r[l]*Z5 + rnorm(n[i], 0, sd = sqrt(1-r[l]^2))
Z.5 = c()
for(k in 1:n[i]){
if(Z5[k] < -0.67){Z.5[k] <- 0}
else if(Z5[k] >= -0.67 & Z5[k] <= 0.67){Z.5[k] <- 1}
else{Z.5[k] <- 2}
}
Z.6 = c()
for(k in 1:n[i]){
if(Z6[k] < -0.67){Z.6[k] <- 0}
else if(Z6[k] >= -0.67 & Z6[k] <= 0.67){Z.6[k] <- 1}
else{Z.6[k] <- 2}
}
Z = t(matrix(rbinom(n[i]*(p-6), 2, rep(q[-(1:6)], n[i])), ncol = n[i]))
X = cbind(Z.1, Z.2, Z.3, Z.4, Z.5, Z.6, Z)
}
for(t in 1:3){# loop over cor_y
if(t == 3){
mu = rep(0, n[i])
}
else if (t == 1){
mu = c(rep(5, floor(n[i]/3)), rep(1, floor(n[i]/3)), rep(5, n[i]-2*floor(n[i]/3)))
}
else{
mu = c(rep(4, floor(n[i]/3)), rep(3, floor(n[i]/3)), rep(2, n[i]-2*floor(n[i]/3)))
}
for(m in 1:3){
if(m ==3){
Y = mu + 3 + X %*% beta.true + rnorm(n[i], 0, 1)
}
else if(m ==2){
Y = mu + 3 + X %*% beta.true + X[,1]*X[,6] + X[,2]*X[,5] + 2* X[,3]^2 + rnorm(n[i], 0, 1)
}
else {
Y = ((X[,1] + X[,2] + X[,3]) %% 2) * (X %*% beta.true) + rnorm(n[i], 0, 1)
}
idx = sample(7:p, h[j], replace = F)
for(c in 1:p){
if(c %in% idx){
id_c = sample(1:n[i], floor(n[i]/2), rep = F)
X[id_c, c] = X[id_c, c] + 1
X[-id_c, c] = X[-id_c, c] + 2
}
}
X = X%%3
###single marker regression##
pValues=c(); beta.hat1 = c();
for(a in 1:p){
fm = lm(Y ~ X[,a])
#print(c(s,i,j,l,t,m,a))
if(length(unique(X[,a])) == 1){
pValues = c(pValues, 1)
beta.hat1 = c(beta.hat1, 0)
}
if(length(unique(X[,a])) > 1){
pValues = c(pValues, summary(fm)$coef[2,4])
beta.hat1 = c(beta.hat1, summary(fm)$coef[2,1])
}
}
index = order(pValues)
p.sort = sort(pValues)
if(length(which(p.sort < p.cutoff)) == 0 ){
Positive = 0
}
else{
Positive = index[which(p.sort < p.cutoff)]
}
fp1[s,count] = sum(Positive > 6)
tp1[s,count] = sum(Positive <= 6)
fdr1[s, count] = sum(Positive > 6)/length(Positive)
fpr1[s, count] = sum(Positive > 6)/(p - 6)
tpr1[s, count] = sum(Positive <= 6)/6
mse1[s,count] = var(beta.hat1) + sum((beta.hat1 - beta.true)^2)
marker1 = Positive
##### LASSO with glmnet ######
cv.fit = cv.glmnet(X, Y, alpha = 1)
fit.lasso = glmnet(X, Y, family = "gaussian", alpha = 1, lambda = cv.fit$lambda.1se)
beta.hat2 = coef(fit.lasso)[-1]
fp2[s,count] = sum(beta.hat2[7:p] != 0)
tp2[s,count] = sum(beta.hat2[1:6] != 0)
s2[s,count] = sum(beta.hat2 != 0)
fdr2[s,count] = sum(beta.hat2[7:p] != 0) / max(s2[s,count],1)
tpr2[s,count] = sum(beta.hat2[1:6] != 0) / 6
fpr2[s,count] = sum(beta.hat2[7:p] != 0) / (p - 6)
mse2[s,count] = var(beta.hat2) + sum((beta.hat2 - beta.true)^2)
id.l = order(abs(beta.hat2), decreasing = TRUE)
marker2 = id.l[1:s2[s,count]]
##########BayesA########################################
ETA = list(MRK = list(X = X, model = 'BL'))
ETA$MRK$model = 'BL'
fmBA = BGLR(y = Y, ETA = ETA, nIter = nIter, burnIn = burnIn, saveAt = 'BL_')
beta.hat3 = fmBA$ETA[[1]]$b
fp3[s,count] = sum(abs(beta.hat3)[7:p] > beta3.cutoff)
tp3[s,count] = sum(abs(beta.hat3)[1:6] > beta3.cutoff)
s3[s,count] = sum(abs(beta.hat3) > beta.cutoff)
fdr3[s,count] = sum(abs(beta.hat3)[7:p] > beta.cutoff) / max(s3[s,count],1)
tpr3[s,count] = sum(abs(beta.hat3)[1:6] > beta.cutoff) / 6
fpr3[s,count] = sum(abs(beta.hat3)[7:p] > beta.cutoff) / (p - 6)
mse3[s,count] = var(beta.hat3) + sum((beta.hat3 - beta.true)^2)
id.b = order(abs(beta.hat3), decreasing = TRUE)
marker3 = id.b[1:s3[s,count]]
nmv1[s,count] = sum(marker1 %in% marker2)
nmv2[s,count] = sum(marker1 %in% marker3)
nmv3[s,count] = sum(marker2 %in% marker3)
count = count + 1
#M = rbind(M, c(i,l,t,j,m))
}
}
}
}
}
}
#write.csv(M, file = "design.csv")
result1 = rbind(fdr1, fdr2, fdr3)
result2 = rbind(fpr1, fpr2, fpr3)
result3 = rbind(tpr1, tpr2, tpr3)
result4 = rbind(mse1, mse2, mse3)
result5 = rbind(nmv1, nmv2, nmv3)
result6 = rbind(fp1, fp2, fp3)
result7 = rbind(tp1, tp2, tp3)
save(result1, file = "fdr.RData")
save(result2, file = "fpr.RData")
save(result3, file = "tpr.RData")
save(result4, file = "mse.RData")
save(result5, file = "nmv.RData")
save(result6, file = "fp.RData")
save(result7, file = "tp.RData")
|
3b3e18e04a69b4428cdf1c61c1fc95295d6d0244
|
fd634124a7a77b53e46adb939fe5469d44c59d2c
|
/man/DB.connections.Rd
|
78dc82fca298bf7dc63c199b1f316e333b6392c5
|
[] |
no_license
|
blueraleigh/db
|
d315141fc091a62326e5d3938ec803bc217f24e4
|
4cba3e6dfe646b37da1ebaaa0ff2293331cd1262
|
refs/heads/main
| 2023-09-02T18:17:53.955924
| 2021-11-16T00:37:03
| 2021-11-16T00:37:03
| 347,817,168
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 518
|
rd
|
DB.connections.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db.R
\docType{data}
\name{DB.connections}
\alias{DB.connections}
\title{An environment to store database connections}
\format{
An object of class \code{environment} of length 0.
}
\usage{
DB.connections
}
\description{
An environment to store database connections
}
\details{
Database connections are registered under their
file name. Temporary on-disk and in-memory databases are
never registered in this environment.
}
\keyword{datasets}
|
e0237fc562bd6a7d1fca993e8000cdb7aa9a92fa
|
810fc2ef81f54eba261ae92acb8ad2eb9d7655ba
|
/00-clean-data-old.R
|
7097de731c96a0c2f8bff2895402d2e4d1afe817
|
[] |
no_license
|
sriramesh/Analysis-of-Extreme-Political-Violence-in-Sudan-2005-2020
|
ded91823096a63054fb52f3dd45569bc8a25dba1
|
337399cd7942d562b759da691cf4d7d77effa6b0
|
refs/heads/main
| 2023-04-30T01:34:38.941654
| 2023-04-12T17:30:56
| 2023-04-12T17:30:56
| 318,114,363
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 915
|
r
|
00-clean-data-old.R
|
options(stringsAsFactors = F)
#acled <- read.csv("1900-01-01-2020-05-06-Sri_Lanka.csv", sep = ";") #Sri Lanka
labs <- lapply(seq(nrow(acled)), function(i) {
paste0( '<p>', "Source: ", acled[i, "source"], '<p>',
"Event Type: ", acled[i, "event_type"], '</p>',
"Event Date: ", acled[i, "event_date"], '</p>',
"Fatalities: ", acled[i, "fatalities"], '</p>' )
})
pal = colorFactor("Dark2", acled$event_type)
acled_map <- acled %>%
leaflet() %>% addTiles(options = tileOptions(opacity = .6)) %>%
addCircleMarkers(lng = ~longitude, lat = ~latitude,
fillOpacity=1,
fillColor = ~pal(event_type),
radius=~fatalities*.03,
weight=0.1,
stroke=TRUE,
label = lapply(labs, htmltools::HTML)) %>%
addLegend(pal = pal, values = ~event_type, title = "Event Type")
acled_map
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.