blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a093e02b2ce226dd6e00b7b10ec33b053c74d163
|
16dcba576362af261592e4a94bab06f23c9f9b6e
|
/analysis/scripts/viznetwork.R
|
ba3058fb297b103b8715db4dbdb5183a5284c93f
|
[] |
no_license
|
confunguido/prioritizing_interventions_basic_training
|
1bcaa53a20774a4a47d2ca73faa505f623c661b8
|
19911fb4f251bac67ee42c50ffb4e11137f28c6e
|
refs/heads/master
| 2023-06-26T05:49:01.596517
| 2021-07-12T14:32:02
| 2021-07-12T14:32:02
| 384,942,119
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,281
|
r
|
viznetwork.R
|
library(igraph)
load('FB_fit.RData')
############################################################################
# plot network graph
x = 102
edges1 = edges[[x]]
date.symptoms1 = date.symptoms[x,]
isolate.date1 = isolate.date[x,]
quarantine.date1 = quarantine.date[x,]
campusCases = edges1[which(edges1[,1]!=0),]
symptomatic = !is.na(date.symptoms1[campusCases])
isolated = !is.na(isolate.date1[campusCases])
quarantined = !is.na(quarantine.date1[campusCases])
samecocoon = recruit.cocoon[campusCases[,1]]==recruit.cocoon[campusCases[,2]]
samecompany = recruit.company[campusCases[,1]]==recruit.company[campusCases[,2]]
staffinf = which(campusCases[,1]>numrecruits)
edgecol = rep('gray',length(campusCases[,1]))
edgecol[which(samecompany)] = 'red'
edgecol[which(samecocoon)] = 'blue'
edgecol[staffinf] = 'green'
contactGraph = graph_from_data_frame(campusCases)
plot(contactGraph, edge.arrow.size = 0, edge.color = edgecol,
vertex.size = 5,
vertex.label=NA, edge.width = 4, vertex.frame.color = NA,
vertex.color = 'black',
layout = layout_as_tree)
recruit.cocoon[campusCases[,1]]
recruit.cocoon[campusCases[,2]]
length(which(recruit.cocoon[campusCases[,1]]==recruit.cocoon[campusCases[,2]]))/(length(campusCases)/2)
length(which(recruit.company[campusCases[,1]]==recruit.company[campusCases[,2]]))/(length(campusCases)/2)
legend('topright', c('cocoon','company','staff','other'), col = c('blue','red','green','gray'), lty = 1, lwd = 4)
############################################################################
# stacked bar chart for contact breakdown
samecocoon = vector()
samecompany = vector()
staffinf = vector()
other = vector()
for (x in 1:500){
#print(x)
edges1 = edges[[x]]
campusCases = edges1[which(edges1[,1]!=0),]
if (length(campusCases)/2 > 1){
samecocoon[x] = length(which(
recruit.cocoon[campusCases[,1]]==recruit.cocoon[campusCases[,2]]))/(length(campusCases)/2)
samecompany[x] = length(which(
recruit.company[campusCases[,1]]==recruit.company[campusCases[,2]]))/(length(campusCases)/2)
staffinf[x] = length(which(campusCases[,1]>numrecruits))/(length(campusCases)/2)
other[x] = 1-samecocoon[x]-samecompany[x]-staffinf[x]
} else if (length(campusCases)/2== 1){
samecocoon[x] = length(which(recruit.cocoon[campusCases[1]]==recruit.cocoon[campusCases[2]]))/(length(campusCases)/2)
samecompany[x] = length(which(recruit.company[campusCases[1]]==recruit.company[campusCases[2]]))/(length(campusCases)/2)
staffinf[x] = length(which(campusCases[1]>numrecruits))/(length(campusCases)/2)
other[x] = 1-samecocoon[x]-samecompany[x]-staffinf[x]
}
}
idx = c(11,52,100,202,450) # selection of simulations to plot
dat = matrix(c(samecocoon[idx],samecompany[idx],staffinf[idx],other[idx]),ncol = 5,byrow = T)
colnames(dat) = c('sim1','sim2','sim3','sim4','sim5')
par(mar=c(5, 4, 4, 10), xpd=TRUE)
barplot(dat, col = c('blue','red','green','gray'))
legend('topright', inset=c(-0.3,0), c('cocoon','company','staff','other'),
col = c('blue','red','green','gray'),lty = 1, lwd = 4)
|
892828ba662b4347bc99c623384c88eb979c4328
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pivottabler/vignettes/v02-datagroups.R
|
e9ef9b07104c9ce9f30ef496e1904542af1ec3c3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,013
|
r
|
v02-datagroups.R
|
## ---- message=FALSE, warning=FALSE---------------------------------------
library(pivottabler)
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addColumnDataGroups("PowerType")
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$renderPivot()
## ---- message=FALSE, warning=FALSE---------------------------------------
library(pivottabler)
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addColumnDataGroups("PowerType", onlyCombinationsThatExist=FALSE)
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$renderPivot()
## ---- message=FALSE, warning=FALSE---------------------------------------
library(pivottabler)
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addColumnDataGroups("PowerType", fromData=FALSE, explicitListOfValues=list("DMU", "EMU"))
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$renderPivot()
## ---- message=FALSE, warning=FALSE---------------------------------------
library(pivottabler)
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addColumnDataGroups("PowerType", fromData=FALSE, explicitListOfValues=list("DMU", "EMU"), visualTotals=TRUE)
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$renderPivot()
## ---- message=FALSE, warning=FALSE---------------------------------------
library(pivottabler)
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addColumnDataGroups("PowerType")
pt$addRowDataGroups("TOC", fromData=FALSE, explicitListOfValues=list(
"London Midland", "CrossCountry", c("Arriva Trains Wales", "Virgin Trains")))
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$renderPivot()
## ---- message=FALSE, warning=FALSE---------------------------------------
library(pivottabler)
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addColumnDataGroups("PowerType")
pt$addRowDataGroups("TOC", fromData=FALSE, explicitListOfValues=list(
"London Midland", "CrossCountry", "Other"=c("Arriva Trains Wales", "Virgin Trains")))
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$renderPivot()
## ---- message=FALSE, warning=FALSE---------------------------------------
# derive the date of each train (from the arrival/dep times),
# then the month of each train from the date of each train
library(dplyr)
library(lubridate)
trains <- mutate(bhmtrains,
GbttDate=if_else(is.na(GbttArrival), GbttDeparture, GbttArrival),
GbttMonth=make_date(year=year(GbttDate), month=month(GbttDate), day=1))
library(pivottabler)
pt <- PivotTable$new()
pt$addData(trains)
pt$addColumnDataGroups("GbttMonth")
pt$addColumnDataGroups("PowerType")
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$renderPivot()
## ---- message=FALSE, warning=FALSE---------------------------------------
# derive the date of each train (from the arrival/dep times),
# then the month of each train from the date of each train
library(dplyr)
library(lubridate)
trains <- mutate(bhmtrains,
GbttDate=if_else(is.na(GbttArrival), GbttDeparture, GbttArrival),
GbttMonth=make_date(year=year(GbttDate), month=month(GbttDate), day=1))
library(pivottabler)
pt <- PivotTable$new()
pt$addData(trains)
pt$addColumnDataGroups("GbttMonth", dataFormat=list(format="%B %Y"))
pt$addColumnDataGroups("PowerType")
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$renderPivot()
## ---- message=FALSE, warning=FALSE---------------------------------------
# derive the date of each train (from the arrival/dep times), then the month of each train from the date of each train
library(dplyr)
library(lubridate)
trains <- mutate(bhmtrains,
GbttDate=if_else(is.na(GbttArrival), GbttDeparture, GbttArrival),
GbttMonth=make_date(year=year(GbttDate), month=month(GbttDate), day=1))
# define a custom formatting function
formatDate <- function(x) {
base::format(x, format="%B %Y")
}
library(pivottabler)
pt <- PivotTable$new()
pt$addData(trains)
pt$addColumnDataGroups("GbttMonth", dataFormat=formatDate)
pt$addColumnDataGroups("PowerType")
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$renderPivot()
## ---- message=FALSE, warning=FALSE---------------------------------------
library(pivottabler)
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addColumnDataGroups("PowerType")
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$renderPivot()
## ---- message=FALSE, warning=FALSE---------------------------------------
library(pivottabler)
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addColumnDataGroups("PowerType")
pt$addRowDataGroups("TOC", dataSortOrder="desc")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$renderPivot()
## ---- message=FALSE, warning=FALSE---------------------------------------
library(pivottabler)
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addColumnDataGroups("PowerType")
pt$addRowDataGroups("TOC", dataSortOrder="desc")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$sortRowDataGroups(levelNumber=1, orderBy="calculation", sortOrder="desc")
pt$renderPivot()
## ---- message=FALSE, warning=FALSE---------------------------------------
library(pivottabler)
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addColumnDataGroups("PowerType")
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
pt$sortColumnDataGroups(levelNumber=2, orderBy="calculation", sortOrder="desc")
pt$renderPivot()
## ---- message=FALSE, warning=FALSE---------------------------------------
library(pivottabler)
pt <- PivotTable$new()
pt$addData(bhmtrains)
pt$addColumnDataGroups("TrainCategory")
pt$addColumnDataGroups("PowerType")
pt$addRowDataGroups("TOC")
pt$defineCalculation(calculationName="TotalTrains", summariseExpression="n()")
# the follow line sorts the data based on the totals
pt$sortColumnDataGroups(levelNumber=2, orderBy="calculation", sortOrder="desc")
# the following line resets the sort order back, i.e. removes the sort applied in the line above
pt$sortColumnDataGroups(levelNumber=2, orderBy="value", sortOrder="asc")
pt$renderPivot()
|
1a6dfd8d4852e7b3e50d895633ee6fcff509a9fc
|
2dfc0a0f7339c62f03498b17fbd4666d30d829c7
|
/inst/doc/ChemoSpec.R
|
a4e3872b8d809a9587fe940370c143898c50aae7
|
[] |
no_license
|
msgama/ChemoSpec
|
a8780f47c62f59ca31e7a9ced733ea910b78b34c
|
ae7e81956608e46df0e09ac5613efac55a105d26
|
refs/heads/master
| 2020-07-12T05:00:09.288588
| 2019-07-25T18:00:02
| 2019-07-25T18:00:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,914
|
r
|
ChemoSpec.R
|
## ----SetUp, echo = FALSE, eval = TRUE, results = "hide"----
# R options & configuration:
set.seed(9)
rm(list = ls())
suppressMessages(library("knitr"))
suppressMessages(library("ChemoSpec"))
suppressMessages(library("ChemoSpecUtils"))
suppressMessages(library("mclust"))
suppressMessages(library("RColorBrewer"))
options(width = 30) # for pinp specifically (narrow cols)
desc <- packageDescription("ChemoSpec")
# Stuff specifically for knitr:
#opts_chunk$set(eval = FALSE)
## ----Chunk1, results = "hide", eval = FALSE----
# source("My_First_ChemoSpec.R")
## ----Chunk2, results = "hide", eval = FALSE----
# ssp <- files2SpectraObject(
# gr.crit = c("sspA", "sspB"),
# gr.cols = c("red", "blue"),
# freq.unit = "ppm",
# int.unit = "peak intensity",
# descrip = "Subspecies Study",
# out.file = "subsp")
## ----Chunk3, results = "hide", eval = FALSE----
# SubspeciesNMR <- loadObject("subsp.RData")
## ----Chunk5-----------------
data(SrE.IR) # makes the data available
sumSpectra(SrE.IR)
## ----Chunk8, fig.cap = "\\label{plot}Sample plot."----
# We'll make a fancy title here
# and re-use in other plots
myt <- expression(
bolditalic(Serenoa)~
bolditalic(repens)~
bold(Extract~IR~Spectra))
plotSpectra(SrE.IR,
main = myt,
which = c(1, 2, 14, 16),
yrange = c(0, 1.6),
offset = 0.4,
lab.pos = 2200)
## ----Chunk10, fig.cap = "\\label{subplot}Detail of the carbonyl region."----
plotSpectra(SrE.IR,
main = myt,
which = c(1, 2, 14, 16),
yrange = c(0, 0.6),
offset = 0.1,
lab.pos = 1775,
xlim = c(1650, 1800))
## ----Chunk9-----------------
# if there are only a few spectra
# show all of the names
SrE.IR$names
# if there are a lot of spectra,
# grep for the desired names
grep("OO", SrE.IR$names)
## ----Chunk10b, fig.cap = "\\label{baseline}Correcting baseline drift."----
SrE2.IR <- baselineSpectra(SrE.IR,
int = FALSE,
method = "modpolyfit",
retC = TRUE)
## ----Chunk18----------------
tmp <- binSpectra(SrE.IR, bin.ratio = 4)
sumSpectra(tmp)
## ----Chunk11----------------
noTD <- removeSample(SrE2.IR,
rem.sam = c("TD_adSrE"))
sumSpectra(noTD)
grep("TD_adSrE", noTD$names)
## ----Chunk12----------------
SrE <- grep("SrE", SrE2.IR$names)
# show the name(s) that contain "SrE"
SrE2.IR$names[SrE]
SrE # gives the corresponding indices
## ----Chunk14, fig.cap = "\\label{surv}Checking for regions of no interest.", fig.dim = c(7,7), out.width = "\\linewidth", out.height = "\\linewidth"----
surveySpectra(SrE2.IR,
method = "iqr",
main = myt,
by.gr = FALSE)
## ----Chunk14d, fig.cap = "\\label{surv2}Checking for regions of no interest."----
surveySpectra2(SrE2.IR,
method = "iqr",
main = myt)
## ----Chunk14a, fig.cap = "\\label{survA}Detail of carbonyl region.", fig.dim = c(7,7), out.width = "\\linewidth", out.height = "\\linewidth"----
surveySpectra(SrE2.IR,
method = "iqr",
main = "Detail of Carbonyl Region",
by.gr = FALSE,
xlim = c(1650, 1800))
## ----Chunk14b, fig.cap = "\\label{survB}Detail of carbonyl region by group.", fig.dim = c(7,7), out.width = "\\linewidth", out.height = "\\linewidth"----
surveySpectra(SrE2.IR,
method = "iqr",
main = "Detail of Carbonyl Region",
by.gr = TRUE,
xlim = c(1650, 1800))
## ----Chunk14c, fig.cap = "\\label{survC}Inspection of an uninteresting spectral region.", fig.dim = c(7,7), out.width = "\\linewidth", out.height = "\\linewidth"----
surveySpectra(SrE2.IR,
method = "iqr",
main = "Detail of Empty Region",
by.gr = FALSE,
xlim = c(1800, 2500),
ylim = c(0.0, 0.05))
## ----Chunk15----------------
SrE3.IR <- removeFreq(SrE2.IR,
rem.freq = SrE2.IR$freq > 1800 &
SrE2.IR$freq < 2500)
sumSpectra(SrE3.IR)
## ----Chunk7, fig.cap = "\\label{gaps}Identifying gaps in a data set."----
check4Gaps(SrE3.IR$freq, SrE3.IR$data[1,])
## ----Chunk19, fig.cap = "\\label{hca}Hierarchical cluster analysis."----
HCA <- hcaSpectra(SrE3.IR, main = myt)
## ----Chunk10a, fig.cap = "\\label{classPCA}Classical PCA scores."----
c_res <- c_pcaSpectra(SrE3.IR,
choice = "noscale")
plotScores(SrE3.IR, c_res,
main = myt,
pcs = c(1,2),
ellipse = "rob",
tol = 0.01)
## ----Chunk21, fig.cap = "\\label{robPCA}Robust PCA scores."----
r_res <- r_pcaSpectra(SrE3.IR,
choice = "noscale")
plotScores(SrE3.IR, r_res,
main = myt,
pcs = c(1,2),
ellipse = "rob",
tol = 0.01)
## ----Chunk22, fig.cap = "\\label{OD}Diagnostics: orthogonal distances."----
diagnostics <- pcaDiag(SrE3.IR, c_res,
pcs = 2,
plot = "OD")
## ----Chunk23, fig.cap = "\\label{SD}Diagnostics: score distances."----
diagnostics <- pcaDiag(SrE3.IR, c_res,
pcs = 2,
plot = "SD")
## ----Chunk24, fig.cap = "\\label{scree}Scree plot."----
plotScree(c_res, main = myt)
## ----Chunk24a, fig.cap = "\\label{scree2}Alternate style scree plot."----
plotScree(c_res, style = "alt", main = myt)
## ----Chunk25, fig.cap = "\\label{boot}Bootstrap analysis for no. of principal components."----
out <- cv_pcaSpectra(SrE3.IR,
pcs = 5)
## ----Chunk26, results = "hide", eval = FALSE----
# plotScoresRGL(SrE3.IR, c_res,
# main = "S. repens IR Spectra",
# leg.pos = "A",
# t.pos = "B") # not run - it's interactive!
## ----Chunk27, fig.cap = "\\label{s3D}Plotting scores in 3D using plotScores3D.", fig.dim = c(7,7), out.width = "\\linewidth", out.height = "\\linewidth"----
plotScores3D(SrE3.IR, c_res,
main = myt,
ellipse = FALSE)
## ----Chunk29, fig.cap = "\\label{load}Loading plot.", fig.dim = c(7,7), out.width = "\\linewidth", out.height = "\\linewidth"----
plotLoadings(SrE3.IR, c_res,
main = myt,
loads = c(1, 2),
ref = 1)
## ----Chunk30, fig.cap = "\\label{load2}Plotting one loading vs. another."----
res <- plot2Loadings(SrE3.IR, c_res,
main = myt,
loads = c(1, 2),
tol = 0.002)
## ----Chunk30a, fig.cap = "\\label{splot}s-Plot to identify influential frequencies."----
spt <- sPlotSpectra(SrE3.IR, c_res,
main = myt,
pc = 1,
tol = 0.001)
## ----Chunk30b, fig.cap = "\\label{splot2}s-Plot detail."----
spt <- sPlotSpectra(SrE3.IR, c_res,
main = "Detail of s-Plot",
pc = 1,
tol = 0.05,
xlim = c(-0.04, -0.01),
ylim = c(-1.05, -0.9))
## ----Chunk31, results = "hide", eval = FALSE----
# hcaScores(SrE3.IR, c_res,
# scores = c(1:5),
# main = myt)
## ----Chunk35, fig.cap = "\\label{mclust1}mclust chooses an optimal model.", results = "hide"----
model <- mclustSpectra(SrE3.IR, c_res,
plot = "BIC",
main = myt)
## ----Chunk36, fig.cap = "\\label{mclust2}mclust's thoughts on the matter.", results = "hide"----
model <- mclustSpectra(SrE3.IR, c_res,
plot = "proj",
main = myt)
## ----Chunk37, fig.cap = "\\label{mclust3}Comparing mclust results to the TRUTH.", results = "hide"----
model <- mclustSpectra(SrE3.IR, c_res,
plot = "errors",
main = myt,
truth = SrE3.IR$groups)
## ----Chunk33, results = "hide", eval = FALSE----
# # not run - it's interactive!
# mclust3dSpectra(SrE3.IR, c_res)
|
3ccb20f5ae6217bdac9c47c5fe436ba6a2d3736d
|
1e8f633ed2d20379e6f2fb52ccd2a86668d57b13
|
/analyses/analysis_script.r
|
9e91487ae12a408579cd06f403de0265867aedca
|
[
"MIT"
] |
permissive
|
JannisBush/Object-biases-in-visual-attention
|
618385c8ac08b6b66d32978ff98c9f202ffc3205
|
97292ebdd7c8cd57cc4ac3367f47edf0d4bedbeb
|
refs/heads/master
| 2020-03-18T14:58:31.353317
| 2018-07-12T14:11:37
| 2018-07-12T14:11:37
| 134,878,728
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,497
|
r
|
analysis_script.r
|
library(tidyverse)
setwd("F:/SS18/Kurse/PsyLab/")
#setwd("/data/Share/SS18/Kurse/PsyLab")
#setwd("C:/Users/tillh/Desktop/Programming/R/Homework")
# read the data
d = readr::read_csv("results_final.csv") %>%
# only look at main trials
filter(block != "practice") %>%
# kick out all participants with less than 85% correct in the main trials
group_by(submission_id) %>% mutate(correctnessScore = mean(ifelse(correct == 'true', 1, 0))) %>%
filter(correctnessScore > 0.85) %>% ungroup() %>%
# only look at correct trials (correct==true) and kick out catch trials (target!=false)
filter(correct == "true" & target != "false") %>%
# change some columns to be in the right format
mutate(org = as.integer(org),
timeBCT = as.factor(timeBCT),
orgPrime = org,
target = as.integer(target),
targetPrime = target
) %>%
# get the main conditions valid_cue vs invalid_cue
mutate(conditionCue = factor(
case_when( orgPrime == targetPrime ~ "valid_cue",
TRUE ~ "invalid_cue"),
ordered = T, levels = c("valid_cue", "invalid_cue"))) %>%
# get the condition left vs right visual field
mutate(conditionField = factor(
case_when((targetPrime == 0 | targetPrime == 4) ~ "left_field",
TRUE ~ "right_field"),
ordered = T, levels = c("left_field", "right_field"))) %>%
# get the condition horizontal vs vertical orientation
mutate(conditionOrientation = factor(
case_when(rotate == "false" ~ "vertical",
TRUE ~ "horizontal"),
ordered = T, levels = c("horizontal", "vertical")))
# remove outliers
d_clean = d %>% group_by(conditionCue, conditionField, conditionOrientation) %>%
# kick out fastest 2.5% for all three main conditions (conditionCue, conditionField, conditionOrientation)
# and kick out slowest 2.5% for all conditions
mutate(outlier = ifelse(log(RT) > quantile(log(RT), probs = 0.975), 1,
ifelse(log(RT) < quantile(log(RT), probs = 0.025), 1, 0))) %>% ungroup() %>%
filter(outlier == 0)
# summarize the RTs
dsummary = d_clean %>% group_by(conditionCue, conditionField, conditionOrientation, timeBCT) %>%
summarize(meanRT = mean(RT)) %>%
ungroup()
dsummary
meanCue = d_clean %>% #group_by(conditionCue) %>%
summarize(meanRT = mean(RT)) %>%
ungroup()
meanCue
# plot condition valid cue vs invalid cue
ggplot(d_clean, aes(y = log(RT), x = conditionCue)) + geom_violin()
ggplot(d_clean, aes(x = log(RT), color = conditionCue)) + geom_density()
ggplot(d_clean, aes(x = happy, color = happy)) + geom_density()
# plot condition left_field vs right_field
ggplot(d_clean, aes(y = log(RT), x = conditionField)) + geom_violin()
ggplot(d_clean, aes(x = log(RT), color = conditionField)) + geom_density()
# plot condition horizontal vs vertical orientation
ggplot(d_clean, aes(y = log(RT), x = conditionOrientation)) + geom_violin()
ggplot(d_clean, aes(x = log(RT), color = conditionOrientation)) + geom_density()
# plot condition timeBCT
ggplot(d_clean, aes(y = log(RT), x = timeBCT)) + geom_violin()
ggplot(d_clean, aes(x = log(RT), color = timeBCT)) + geom_density()
# do a linear model to predict log RT
# valid_cue vs invalid_cue, left vs right and horizontal vs vertical and timeBCT
modLM = lm(log(RT) ~ conditionCue + conditionField + conditionOrientation + timeBCT, data = d_clean)
summary(modLM)
# check if all combinations of conditions are normal distributed
qqnorm(modLM$residuals)
qqline(modLM$residuals)
######################SECOND ANALYSIS ONLY INVALID CUES###########################
# second analysis only for the invalid cues
d_invalid = d %>%
# kick out all valid_cues
filter(conditionCue == "invalid_cue") %>%
# divide invalid_cue in between_object (cued) and within_object (uncued)
mutate(conditionRectangle = factor(
case_when((rotate == "false" & abs(orgPrime - targetPrime) > 1)
| (rotate == "true" & abs(orgPrime - targetPrime) == 1) ~ "within_object",
TRUE ~ "between_object"),
ordered = T, levels = c("within_object", "between_object"))) %>%
# divide invalid_cue in between_field (horizontal) and within_field (vertical)
mutate(conditionShift = factor(
case_when((rotate == "false" & conditionRectangle == "within_object")
| (rotate == "true" & conditionRectangle == "between_object") ~ "vertical_shift",
TRUE ~ "horizontal_shift"),
ordered = T, levels = c("vertical_shift", "horizontal_shift")))
# remove outliers
d_invalid_clean = d_invalid %>% group_by(conditionField, conditionRectangle, conditionShift) %>%
# kick out fastest 2.5% for all three main conditions (conditionField, conditionRectangle, conditionShift)
# and kick out slowest 2.5% for all conditions
mutate(outlier = ifelse(log(RT) > quantile(log(RT), probs = 0.975), 1,
ifelse(log(RT) < quantile(log(RT), probs = 0.025), 1, 0))) %>% ungroup() %>%
filter(outlier == 0)
# summarize the RTs
d_invalid_summary = d_invalid_clean %>% group_by(conditionRectangle, conditionField, conditionShift) %>%
summarize(meanRT = mean(RT)) %>%
ungroup()
d_invalid_summary
# plot condition between_object vs within_object
ggplot(d_invalid_clean, aes(y = log(RT), x = conditionRectangle)) + geom_violin()
ggplot(d_invalid_clean, aes(x = log(RT), color = conditionRectangle)) + geom_density()
# plot condition left_field vs right_field
ggplot(d_invalid_clean, aes(y = log(RT), x = conditionField)) + geom_violin()
ggplot(d_invalid_clean, aes(x = log(RT), color = conditionField)) + geom_density()
# plot condition horizontal vs vertical shift
ggplot(d_invalid_clean, aes(y = log(RT), x = conditionShift)) + geom_violin()
ggplot(d_invalid_clean, aes(x = log(RT), color = conditionShift)) + geom_density()
# do a linear model to predict log RT
# between_object vs within_object, left_field vs right_field and horizontal_shift vs vertical_shift
modInvalidLM = lm(log(RT) ~ conditionRectangle + conditionField + conditionShift, data = d_invalid_clean)
summary(modInvalidLM)
# check if all combinations of conditions are normal distributed
qqnorm(modInvalidLM$residuals)
qqline(modInvalidLM$residuals)
######
# Additional exploratory stuff
# add analysis of blocks
# questions at start etc.
# maybe do a hierachial model
# library(lme4)
# modInvalidLMER = lmer(log(RT) ~ conditionRectangle + conditionField + conditionShift + (1 | submission_id), data = d_invalid)
# summary(modInvalidLMER)
# add further plots and analyses
|
fec9b3045bfaf91676b8626a5ae9a095aec0ba28
|
4003354cbb4c85712659ed69897a583b8764367b
|
/man/standardise_names.Rd
|
837083108c932c855be387049f88f875550cf3c4
|
[] |
no_license
|
RandhirBilkhu/rarc
|
a11afedef24daec87c346186d738117168201026
|
6d9c2cd970e8c09ea70592348872c774102a15ae
|
refs/heads/master
| 2023-08-13T18:42:10.928732
| 2021-09-16T09:20:12
| 2021-09-16T09:20:12
| 407,087,673
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 495
|
rd
|
standardise_names.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility.R
\name{standardise_names}
\alias{standardise_names}
\title{Standardise names}
\usage{
standardise_names(dt, name_col, clean_patt)
}
\arguments{
\item{dt}{data.table}
\item{name_col}{vector of names to standardise}
\item{clean_patt}{string of patterns to remove before applying clustering algorithm}
}
\value{
data.table with an additional column called std_Insured_Name
}
\description{
Standardise names
}
|
4ae8a3ebea2953c99a098569a279c8fb15a314b0
|
50fd8f6425e04985cb897b7b7f798d28df0dcdb1
|
/man/ilc-package.Rd
|
8ef0b2f3f0925eecf4322cc30351b7ad760efff7
|
[] |
no_license
|
valentinamiot/ilc
|
60462445c98c3ffc31bdb855dd808c687a6851f6
|
c32d1333359b952c9bf661464ed106a41befc9b4
|
refs/heads/master
| 2020-12-24T10:10:30.156649
| 2014-11-19T00:00:00
| 2014-11-19T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,538
|
rd
|
ilc-package.Rd
|
\name{ilc-package}
\alias{ilc-package}
\alias{ilc}
\docType{package}
\title{
Generalised Lee-Carter models using iterative fitting algorithms
}
\description{
The package implements a specialised iterative regression method for the analysis of age-period mortality based on a class of generalised Lee-Carter type modelling structure. Within the modelling framework of Renshaw and Haberman (2006), we use a Newton-Raphson iterative process to generate parameter estimates based on Poisson (or Gaussian) likelihood. In addition, we develop and implement here a stratified Lee-Carter model.
}
\details{
The package contains methods for the analysis of a class of six different types of log-linear models in the GLM framework with Poisson or Gaussian errors that includes the basic LC model too. Also, this package includes tools for the fitting and analysis of the stratified LC model using an additional covariate (other than age and period). There are also made available some general diagnostic tools to analyse the data and the graduation results.
}
\author{
Zoltan Butt, Steven Haberman and Han Lin Shang
Maintainer: Zoltan Butt <Z.Butt@city.ac.uk>
}
\references{
Lee, R. and Carter, L. (1992), ``Modelling and forecasting U.S. mortalit'', \emph{Journal of the American Statistical Association} \bold{87}, 659-671.
Lee, L. (2000), ``The Lee-Carter method for forecasting mortality, with various extensions and applications'', \emph{North American Actuarial Journal} \bold{4}, 80-93.
Renshaw, A. E. and Haberman, S. (2003a), ``Lee-Carter mortality forecasting: a parallel generalised linear modelling approach for England and Wales mortality projections'', \emph{Journal of the Royal Statistical Society, Series C}, \bold{52}(1), 119-137.
Renshaw, A. E. and Haberman, S. (2003b), ``Lee-Carter mortality forecasting with age specific enhancement'', \emph{Insurance: Mathematics and Economics}, \bold{33}, 255-272.
Renshaw, A. E. and Haberman, S. (2006), ``A cohort-based extension to the Lee-Carter model for mortality reduction factors'', \emph{Insurance: Mathematics and Economics}, \bold{38}, 556-570.
Renshaw, A. E. and Haberman, S. (2008), ``On simulation-based approaches to risk measurement in mortality with specific reference to Poisson Lee-Carter modelling'', \emph{Insurance: Mathematics and Economics}, \bold{42}(2), 797-816.
Renshaw, A. E. and Haberman, S. (2009), ``On age-period-cohort parametric mortality rate projections'', \emph{Insurance: Mathematics and Economics}, \bold{45}(2), 255-270.
}
\keyword{package}
|
364e24f5e6c449003ead1f3d7d63060c1fb23120
|
66bd89427a891617a3d3ae46173dc0d4e9930788
|
/tests/testthat/test_report.R
|
1c73f670b0064085220fc86e3ad892cc99e2802c
|
[] |
no_license
|
StevenMMortimer/roas
|
8a664635f8a07f0adf47f9f54b23fa73bd79feff
|
301dca0cc6c8426d9abcda89f94b7390b6ecaafa
|
refs/heads/master
| 2021-06-18T18:24:04.089744
| 2017-04-28T20:29:16
| 2017-04-28T20:29:16
| 44,409,233
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 972
|
r
|
test_report.R
|
context("OAS Report")
roas_setup <- readRDS("roas_setup.rds")
options(roas.account = roas_setup$account)
options(roas.username = roas_setup$username)
options(roas.password = roas_setup$password)
credentials <- oas_build_credentials()
site <- roas_setup$site
test_that("oas_report", {
site_delivery <- oas_report(credentials=credentials,
report_type='Site Delivery',
report_name='Executive Summary',
id=site,
start_date='2015-09-01',
end_date='2015-09-30')
expected_names <- c('Site', 'Impressions', 'Clicks',
'CTR', 'ReportStart',
'ReportEnd')
expect_true(is.data.frame(site_delivery))
expect_equal(names(site_delivery), expected_names)
expect_true(all(c('TimethattheReportwasRun', 'StartDate', 'EndDate') %in% names(attributes(site_delivery))))
})
|
1700ff1fb5368faa80422adf5f5bd151be320407
|
3dbb408ab830a572260dd9c8f755d7ee00cdf89c
|
/day04/part2.R
|
ed2007e9773ff3acd32e6c59e69a592adb7c5ca0
|
[] |
no_license
|
ryanbthomas/adventofcode2020
|
32bf46be3b91479ccab69ae67d6cc95dbb2d6da6
|
2dfd227538c08e6d6fdf30c25b8ac5b6b72574c2
|
refs/heads/main
| 2023-04-09T08:50:51.176833
| 2021-04-23T19:36:58
| 2021-04-23T19:36:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,140
|
r
|
part2.R
|
input_file <- "day4/input/real"
req_fields <- c(
"byr", # (Birth Year)
"iyr", # (Issue Year)
"eyr", # (Expiration Year)
"hgt", # (Height)
"hcl", # (Hair Color)
"ecl", # (Eye Color)
"pid", # (Passport ID)
"cid" # (Country ID)
)
library(purrr)
library(stringr)
library(tibble)
library(dplyr)
raw_input <- readLines(input_file)
record_idx <- 1 + cumsum(raw_input == "")
records <- map_chr(
seq_len(max(record_idx)),
~ trimws(paste0(raw_input[record_idx == .x], collapse = " "))
)
record_fields <- str_extract_all(records, pattern = "[a-z]{3}(?=:)")
record_values <- str_extract_all(records, pattern = "(?<=:)[^ ]+")
#record_tbl <- map(seq_along(record_values), ~ set_names(record_values[[.x]], record_fields[[.x]]))
record_tbl <- map_dfr(seq_along(record_values),
~ tibble::as_tibble(
set_names(
as.list(record_values[[.x]]),
record_fields[[.x]]
)
)
)
validate_byr <- function(x) {
str_detect(x, "^(19[2-9][0-9]|200[0-2])$")
}
validate_iyr <- function(x) {
str_detect(x, "^(201[0-9]|2020)$")
}
validate_eyr <- function(x) {
str_detect(x, "^(202[0-9]|2030)$")
}
validate_hgt <- function(x) {
str_detect(x, "^(1[5-8][0-9]cm|19[0-3]cm|59in|6[0-9]in|7[0-6]in)$")
}
validate_hcl <- function(x) {
str_detect(x, "^#[0-9a-f]{6}$")
}
validate_ecl <- function(x) {
str_detect(x, "^(amb|blu|brn|gry|grn|hzl|oth)$")
}
validate_pid <- function(x) {
str_detect(x, "^[0-9]{9}$")
}
validate_passport <- function(x) {
check <- req_fields %in% x
all(check[-length(check)])
}
validate_cid <- function(x) {
TRUE
}
valid_passport_info <- record_tbl %>%
filter(validate_byr(byr),
validate_iyr(iyr),
validate_eyr(eyr),
validate_hgt(hgt),
validate_hcl(hcl),
validate_ecl(ecl),
validate_pid(pid)
)
num_valid_passports <- nrow(valid_passport_info)
usethis::ui_done("Valid Passports: {num_valid_passports}")
|
8cf5cb0ac98daae19f4464b9428abbd7fc51337f
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615848353-test.R
|
41a805fa286e3f5da52a8a6453082225e0aef3f0
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 972
|
r
|
1615848353-test.R
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(1.67426818175e-308, 5.07589131392394e-116, 2.37468745902918e-249, 4.17274709511642e-255, -5.21627137864989e-108, -4.55414938104631e-200, -2.03359480463139e-82, 7.98607285654083e-294, 9.53216075507615e+121, -1.91273986076876e-272, 5.62067397049541e-104, 1.35426349889611e-309, 8.84662638470377e-160, 1.3468020202225e-20, -6.33872710878606e+128, 1.50720591788451e+180, 3.11534528805072e+163, -1.3181581074235e-163, 8.34284951095443e+270, -2.58497492572612e+45, -5.18732940836494e+222, -8.9070509982283e+295, -6.99993544073769e-281, -3.63875683405274e+101, 5.6464292943395e-141, -1.28159577326572e-227, 7.27044868124648e-308, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
5d073b0f41ebd0602baf25d8c0af9b10aae89534
|
63e88fdead24a8e1a13ca667ed3e806b17fda334
|
/man/add_rowgroup.Rd
|
fc452e2ec7cc446c8b2a5fa6024728dbe9fabe54
|
[] |
no_license
|
eeenilsson/layout
|
d0b225e8b5ef63b420f4e388882ffe6dba559b9a
|
b5c643aa3571e7b2409d953365a1b3dffda65495
|
refs/heads/master
| 2022-09-24T06:21:05.145699
| 2022-09-19T14:08:02
| 2022-09-19T14:08:02
| 101,881,638
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,436
|
rd
|
add_rowgroup.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/layout.r
\name{add_rowgroup}
\alias{add_rowgroup}
\title{Add row groups}
\usage{
add_rowgroup(object, object2, rgroup_names = "")
}
\arguments{
\item{object}{A layout object including a body containing the table.}
\item{object2}{A layout object including a body containing in its @body the row group to be added.}
\item{rgroup_names}{A character vector specifying the names (labels) to be used for the row groups.}
}
\value{
An S4 object with rows- and row group specifications added.
}
\description{
\code{add_rowgroup} Adds a row group to a layout object and assigns separate row group headers displayed when printing as html. The original object@body will be used as the first row group.
}
\examples{
example_layout <- add_rowgroup(example_layout,
example_layout_width,
rgroup_names = c("Sepal Length", "Sepal Width"))
}
\seealso{
\code{\link{prepare}} for making a layout object and \code{\link{layout_html}} for printing a html table from such an object.
Other table layout:
\code{\link{add_colgroup}()},
\code{\link{add_cols}()},
\code{\link{add_units}()},
\code{\link{firstrow_css}()},
\code{\link{layout_html}()},
\code{\link{order_by_vector}()},
\code{\link{order_layout}()},
\code{\link{prepare}()},
\code{\link{print_abbrev}()},
\code{\link{query_abbrev}()}
}
\concept{table layout}
|
bcd200bb9e76a59d375f6c5e2a8d607cb388faf7
|
b4ad0c0f735ba84e5c261398ad16ccf5ecd2ccfd
|
/Pre_CashFlow_RT.R
|
9c5971da318fdf769c47330e3b4553f5160f688b
|
[] |
no_license
|
zhiguo-wang/MCMC_2
|
50eeb30b6767b4e559486785fcaafe88abfe8b2f
|
a083a7b264b67e17881c86b251d2ed6315daf15b
|
refs/heads/master
| 2021-01-10T01:44:22.295666
| 2015-12-07T04:59:12
| 2015-12-07T04:59:12
| 44,928,814
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,841
|
r
|
Pre_CashFlow_RT.R
|
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# Program Name: LIFP - Cash Flow - Retirement Plan (401k)
# Version: 1.1
#
# Created By: Justin Xu
# Modified By: Justin Xu
#
# Database Used: Investment Portfolio.csv
# Source Tables: \\Inputs\\Financial Products
# Target Tables: \\Outputs\\
#
# Purpose: To create cash flow matrix of 401k investment portfolio
#
# Version Date Author change
# | | |
# 1.1 | 10/16/15 | Justin | First Version
#
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# 2. Locate The Appropriate MCMC Matrix for The Individual
#
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# 4. Cash Flows Related to 401k Plan
#
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Cash Outflow: Unit Annual Investment is 1
cashOut_401k <- matrix(-1, nrow(pre_mcmc), ncol(pre_mcmc))
cashOut_401k[which(pre_mcmc != 1)] <- 0
## Cash Inflow:
### Low Risk
ad_pre_mcmc <- pre_mcmc
ad_pre_mcmc[which(ad_pre_mcmc == 3)] <- 0
raw_cashIn_401k_Low <- ad_pre_mcmc * yield_Low
raw_cashIn_401k_Low[which(raw_cashIn_401k_Low == 0)] <- 1
cashIn_401k_Low <- matrix(0, nrow(pre_mcmc), ncol(pre_mcmc))
for(i in 1 : ncol(pre_mcmc)) {
accumulatedValue <- 0
for (j in 1 : max(which(pre_mcmc[ , i] == 1))) {
accumulatedValue <- accumulatedValue +
prod(raw_cashIn_401k_Low[j : max(which(pre_mcmc[ , i] == 1)), i])
}
cashIn_401k_Low[max(which(pre_mcmc[ , i] == 1)) , i] <- accumulatedValue
}
### Moderate Risk
raw_cashIn_401k_Mid <- ad_pre_mcmc * yield_Mid
raw_cashIn_401k_Mid[which(raw_cashIn_401k_Mid == 0)] <- 1
cashIn_401k_Mid <- matrix(0, nrow(pre_mcmc), ncol(pre_mcmc))
for(i in 1 : ncol(pre_mcmc)) {
accumulatedValue <- 0
for (j in 1 : max(which(pre_mcmc[ , i] == 1))) {
accumulatedValue <- accumulatedValue +
prod(raw_cashIn_401k_Mid[j : max(which(pre_mcmc[ , i] == 1)), i])
}
cashIn_401k_Mid[max(which(pre_mcmc[ , i] == 1)) , i] <- accumulatedValue
}
### Aggressive Risk
raw_cashIn_401k_Agg <- ad_pre_mcmc * yield_Agg
raw_cashIn_401k_Agg[which(raw_cashIn_401k_Agg == 0)] <- 1
cashIn_401k_Agg <- matrix(0, nrow(pre_mcmc), ncol(pre_mcmc))
for(i in 1 : ncol(pre_mcmc)) {
accumulatedValue <- 0
for (j in 1 : max(which(pre_mcmc[ , i] == 1))) {
accumulatedValue <- accumulatedValue +
prod(raw_cashIn_401k_Agg[j : max(which(pre_mcmc[ , i] == 1)), i])
}
cashIn_401k_Agg[max(which(pre_mcmc[ , i] == 1)) , i] <- accumulatedValue
}
|
68034d3d79d9ebd4c82251d821dd64ed04ef79d7
|
85663bd7121504705a0de1837f21307b1dbd4e7d
|
/man/bioC_downloads.Rd
|
5f1d8d7615166b4f81922efef30331cb04470bc5
|
[] |
no_license
|
cran/bioC.logs
|
228b6a5499246067b3638e12272bf8382346cf1e
|
1867670988f473aa1a907b3ccb5fcdb11c7609cb
|
refs/heads/master
| 2023-01-15T12:47:20.129216
| 2023-01-06T21:00:02
| 2023-01-06T21:00:02
| 239,394,596
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,669
|
rd
|
bioC_downloads.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bioC_logs.R
\name{bioC_downloads}
\alias{bioC_downloads}
\title{function to download logs from bioConductor stats}
\usage{
bioC_downloads(
pckg = NULL,
format = "bioC",
from = NULL,
to = NULL,
when = NULL,
verbose = TRUE
)
}
\arguments{
\item{pckg}{list of packages names}
\item{format}{two options: "bioC" (default) will report the downloads as reported by bioconductor, ie. "Year Month Nb_of_distinct_IPs Nb_of_downloads"; or, "CRAN" will report as CRAN logs does, ie. "Date count package_Name" (in cranlogs 'Nb_of_downloads' are referred as 'count')}
\item{from}{date in "MM-YYYY" format, specifying the initial date to be considered (optional argument)}
\item{to}{date in "MM-YYYY" format, specifying the final date to be considered (optional argument)}
\item{when}{optional argument, to specify pre-defined range dates; ie. 'ytd', 'year-to-date', 'last-year'}
\item{verbose}{boolean flag indicating whether to print information about the processes...}
}
\value{
a list containing a dataframe per package entered with columns as indicated by the format argument
}
\description{
function to download logs from bioConductor stats
}
\examples{
bioC_downloads(c("ABarray","a4Classif"))
bioC_downloads("edgeR", verbose=FALSE)
edgeR.logs <- bioC_downloads("edgeR", format="CRAN")
edgeR.logs <- bioC_downloads("edgeR", when='last-year', format='bioC')
edgeR.logs <- bioC_downloads("edgeR", to='03-2015', format='bioC')
edgeR.logs <- bioC_downloads("edgeR", from='03-2015', format='bioC')
edgeR.logs <- bioC_downloads("edgeR", from='03-2015',to='05-2016', format='bioC')
}
|
cc8a1427cfdf5a06a349daf7677239b877053ba5
|
0e94e1e4e71a941f921ccb8a5a17bde88b72796f
|
/extra/PRM Functions/RunCatchMSY.R
|
a3852c6fa32a1f654c1ad8d53e644d8543754b1d
|
[] |
no_license
|
OHI-Science/datalimited
|
c83d6245cc0745cbe7c03429584eb3c82d984715
|
46eced5a8274447e6715cb9a43df749a3aed3be7
|
refs/heads/master
| 2021-04-29T07:24:31.029164
| 2017-03-21T22:39:48
| 2017-03-21T22:39:48
| 77,954,118
| 0
| 0
| null | 2017-03-20T23:16:40
| 2017-01-03T21:17:22
|
R
|
UTF-8
|
R
| false
| false
| 6,585
|
r
|
RunCatchMSY.R
|
######################################
#Calculate MSY--------------------------------------------------
#This code runs CatchMSY on fisheries
######################################
RunCatchMSY<- function(Data,ErrorSize,sigR,Smooth,Display,BestValues,ManualFinalYear,n,NumCPUs,CatchMSYTrumps)
{
# Data<- GlobalStatus$Data
Data$RanCatchMSY<- FALSE
Data$HasRamMSY<- is.na(Data$MSY)==F
Data$HasRamFvFmsy<- is.na(Data$FvFmsy)==F
Data$HasRamBvBmsy<- is.na(Data$BvBmsy)==F & Data$Dbase=='RAM'
Data$BtoKRatio<- 1/((Data$phi+1)^(1/Data$phi))
MsyData<- Data
# MsyData$r<- NA
MsyData$g<- NA
MsyData$k<- NA
MsyData$MSYLogSd<- NA
# MsyData$rLogSd<- NA
MsyData$gLogSd<- NA
MsyData$KLogSd<- NA
MsyData$CatchMSYBvBmsy<- NA
MsyData$CatchMSYBvBmsy_LogSd<- NA
CommonError<- mean(MsyData$BvBmsySD,na.rm=T)
if (is.na(CommonError))
{
CommonError<- ErrorSize
}
# find mean range between final bio priors to pass to SnowCatchMSY_Matrix for stocks with finalbio>1
MeanRange<-MsyData[is.na(MsyData$BvBmsySD)==F & MsyData$Year==2012,c('IdOrig','BvBmsy','BvBmsySD','BtoKRatio')]
# MeanRange$BoverK<-pmin(1,MeanRange$BvBmsy/2)
MeanRange$BoverK<-pmin(1,MeanRange$BvBmsy*MeanRange$BtoKRatio)
MeanRange<-MeanRange[MeanRange$BoverK<0.95,]
# MeanRange$Bioerror<-MeanRange$BvBmsySD/2
MeanRange$Bioerror<-MeanRange$BvBmsySD*MeanRange$BtoKRatio
MeanRange$Bioerror[is.na(MeanRange$Bioerror)]<-CommonError
MeanRange$FbLow<-pmax(0,qnorm(0.25,MeanRange$BoverK,MeanRange$Bioerror))
MeanRange$FbHigh<-pmin(1,qnorm(0.75,MeanRange$BoverK,MeanRange$Bioerror))
MeanRange$BioRange<-MeanRange$FbHigh-MeanRange$FbLow
CommonRange<-mean(MeanRange$BioRange,na.rm=T) # Common range to apply to all stocks with B/K >=0.95
stock_id <- unique((Data[,IdVar][Data$HasRamMSY==F & Data$BvBmsy!=999 & is.infinite(Data$BvBmsy)==F]))
if (NumCPUs>1)
{
CMSYResults <- (mclapply(1:length(stock_id), MatrixSnowCatchMSY,mc.cores=NumCPUs,Data=Data,CommonError=CommonError,CommonRange=CommonRange,sigR=sigR,Smooth=Smooth,Display=Display,BestValues=BestValues,ManualFinalYear=ManualFinalYear,n=n,NumCPUs=NumCPUs,
CatchMSYTrumps=CatchMSYTrumps,stock_id=stock_id,IdVar=IdVar))
# CMSYResults <- (mclapply(1:length(stock_id), SnowCatchMSY,mc.cores=NumCPUs,Data=Data,CommonError=CommonError,sigR=sigR,Smooth=Smooth,Display=Display,BestValues=BestValues,ManualFinalYear=ManualFinalYear,n=n,NumCPUs=NumCPUs,
# CatchMSYTrumps=CatchMSYTrumps,stock_id=stock_id,IdVar=IdVar))
# sfInit( parallel=TRUE, cpus=NumCPUs,slaveOutfile="SnowfallMSY_ProgressWorkPlease.txt" )
#
# sfExport('Data','ErrorSize','CommonError','sigR','Smooth','Display','BestValues','ManualFinalYear','n','NumCPUs','CatchMSYTrumps','stock_id','IdVar')
#
# CMSYResults <- (sfClusterApplyLB(1:(length(stock_id)), SnowCatchMSY))
# sfStop()
}
if (NumCPUs==1)
{
pdf(file=paste(FigureFolder,'Catch-MSY Diagnostics.pdf',sep=''))
CMSYResults <- (mclapply(1:length(stock_id), MatrixSnowCatchMSY,mc.cores=NumCPUs,Data=Data,CommonError=CommonError,CommonRange=CommonRange,sigR=sigR,Smooth=Smooth,Display=Display,BestValues=BestValues,ManualFinalYear=ManualFinalYear,n=n,NumCPUs=NumCPUs,
CatchMSYTrumps=CatchMSYTrumps,stock_id=stock_id,IdVar=IdVar))
# CMSYResults <- (mclapply(1:length(stock_id), SnowCatchMSY,mc.cores=NumCPUs,Data=Data,CommonError=CommonError,sigR=sigR,Smooth=Smooth,Display=Display,BestValues=BestValues,ManualFinalYear=ManualFinalYear,n=n,NumCPUs=NumCPUs,
# CatchMSYTrumps=CatchMSYTrumps,stock_id=stock_id,IdVar=IdVar))
dev.off()
# pdf(file=paste(FigureFolder,'Catch-MSY Diagnostics Normal.pdf',sep=''))
#
#
# CMSYResults <- (mclapply(1, SnowCatchMSY,mc.cores=NumCPUs,Data=Data,CommonError=CommonError,sigR=sigR,Smooth=Smooth,Display=Display,BestValues=BestValues,ManualFinalYear=ManualFinalYear,n=n,NumCPUs=NumCPUs,
# CatchMSYTrumps=CatchMSYTrumps,stock_id=stock_id,IdVar=IdVar))
#
# dev.off()
}
CmsyStore<- as.data.frame(matrix(NA,nrow=0,ncol=dim(CMSYResults[[1]]$CatchMSY)[2]))
PossibleParams <- lapply(seq(along = CMSYResults), function(i) CMSYResults[[i]]$PossibleParams)
EmptyParams <- lapply(seq(along = PossibleParams), function(i) sum(is.na(PossibleParams[[i]]))==0)
HasData<- ldply(EmptyParams)
PossibleParams<- PossibleParams[which(HasData==T)]
CmsyStore <- lapply(seq(along = CMSYResults), function(i) CMSYResults[[i]]$CatchMSY)
PossibleParams<- ldply(PossibleParams)
if (dim(PossibleParams)[1]>0 & sum(PossibleParams$Fail==0,na.rm=T)>=1)
{
PossibleParams<- PossibleParams[,c('IdOrig','g','phi','K','MSY','FinalFvFmsy','FinalBvBmsy')]
}
CmsyStore<- ldply(CmsyStore)
# pdf(file='Diagnostics/Initial Biomass Prior Diagnostic.pdf')
# for (l in 1:length(CMSYResults))
# {
# CmsyResults<- CMSYResults[[l]]
# if (CmsyResults$RanCatchMSY==T)
#
# {
# show(i)
# ParamSpace<- CmsyResults$PossibleParams
#
# BioData<- ParamSpace[,grepl('X',colnames(ParamSpace))]
#
# ParamSpace$FinalBvBmsy<- 2*(BioData[,dim(BioData)[2]]/ParamSpace$K)
#
# print(ggplot(data=ParamSpace,aes(StartBio,FinalBvBmsy))+geom_point()+geom_smooth(method='lm')+ylab('FinalBvBmsy')+xlab('StartBvBmsy'))
#
# print(ggplot(data=ParamSpace,aes(StartBio,MSY))+geom_point()+geom_smooth(method='lm')+xlab('StartBvBmsy')+ylab('MSY'))
#
# print(ggplot(data=ParamSpace,aes(StartBio,FinalFvFmsy))+geom_point()+geom_smooth(method='lm')+xlab('StartBvBmsy')+ylab('FinalFvFmsy'))
#
#
#
#
# # print(ggplot(data=ParamSpace,aes(StartBio))+geom_histogram(binwidth=.025))
# CmsyStore<- rbind(CmsyStore,CmsyResults$CatchMSY)
# }
# }
# dev.off()
ConCatDat<- paste(MsyData$IdOrig,MsyData$Year,sep='-')
ConCatCmsy<- paste(CmsyStore$IdOrig,CmsyStore$Year,sep='-')
Where<- ConCatDat %in% ConCatCmsy
MsyData[Where,]<- CmsyStore
return(list(MsyData=MsyData,PossibleParams=PossibleParams))
} #Close function
|
a19de84918118124f51c4159df3bf3ed474cecd5
|
8b760b801d253df7f0ac77aa2c4188cb37094604
|
/bash/rtags
|
182e654d650507b6ac2cbcc40c2a0cead1b26bae
|
[
"MIT"
] |
permissive
|
mertnuhoglu/stuff
|
5a5c9841136cda69d9cea6a754e2184e457ba31c
|
cb9be48bbc84a84d6fca2d0891cc1dccb86c0326
|
refs/heads/master
| 2021-01-21T14:08:29.680536
| 2020-05-17T15:07:07
| 2020-05-17T15:07:07
| 34,980,477
| 0
| 0
|
MIT
| 2020-05-17T15:07:50
| 2015-05-03T09:25:42
|
R
|
UTF-8
|
R
| false
| false
| 45
|
rtags
|
#!/usr/bin/env Rscript
rtags(ofile = 'tags')
|
|
7e1fee8a8e688afe484ccf177348857f6b1829de
|
c4a77d19da5d46766311c3b9eb68131bc696daf9
|
/man/ResistanceGA-package.Rd
|
c081604b2d5888fc65417611500b4c86f8861b7b
|
[] |
no_license
|
rmarrotte/ResistanceGA
|
ac57bff6c3e2bd05006e923f4af93eec9e1e18c1
|
6934cf25cf025baec0dccc3bf67311ca170c9808
|
refs/heads/master
| 2020-03-27T18:24:03.423010
| 2018-09-14T17:30:07
| 2018-09-14T17:30:07
| 146,920,028
| 1
| 0
| null | 2018-08-31T16:57:59
| 2018-08-31T16:57:59
| null |
UTF-8
|
R
| false
| true
| 2,421
|
rd
|
ResistanceGA-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ResistanceGA.R
\docType{package}
\name{ResistanceGA-package}
\alias{ResistanceGA-package}
\title{About this R package}
\description{
This package contains functions to optimize resistance surfaces using Genetic Algorithms. Continuous and categorical surfaces can be optimized, and multiple surfaces can be simultaneously optimized to create novel resistance surfaces.
}
\details{
\tabular{ll}{
Package: \tab ResistanceOptimization\cr
Type: \tab Package\cr
License: \tab >=GPL-2\cr
}
This package provides functions to prepare data and execute a number of functions to optimize continuous and categorical resistance surfaces using CIRCUITSCAPE and Genetic Algorithms within R. You must have CIRCUITSCAPE (4.0-Beta or higher) installed to run these functions. Output from functions in this package include: a summary table with AIC, AICc, conditional and marginal R2 values, and log-likelihood values for each optimized surface, parameters that optimized each of the top models, coefficients from the fitted mixed effects models, plots of the fitted response curves, diagnostic plots of model fit, and Circuitscape outputs for each of the optimized resistance surfaces. Resistance surfaces can also be optimized using least cost paths, which are implemented using the `gdistance` package.
*** Use of this package to run CIRCUITSCAPE is limited to Windows machines due its use of the Circuitscape .exe file. Anyone interested in adapting the code to accommodate command-line execution on other platforms is welcome to do so.
In order to use this package with CIRCUITSCAPE, you must have CIRCUITSCAPE v4.0 or greater installed.
Official release: \url{http://www.circuitscape.org/downloads}
}
\references{
Please cite:
Peterman, W.E., G.M. Connette, R.D. Semlitsch, and L.S. Eggert. 2014. Ecological resistance surfaces predict fine-scale genetic differentiation in a terrestrial woodland salamander. Molecular Ecology 23:2402--2413. \href{http://goo.gl/RJb6Go}{Peterman et al.}
Peterman, W. E. 2018. ResistanceGA: An R package for the optimization of resistance surfaces using genetic algorithms. Methods in Ecology and Evolution doi:10.1111/2041-210X.12984. \href{https://besjournals.onlinelibrary.wiley.com/doi/abs/10.1111/2041-210X.12984}{"MEE Publication"}
}
\author{
Bill Peterman \email{bill.peterman@gmail.com}
}
|
02569296178ce18cb15bb8a0b9ac83d1969b62dd
|
ad235e29d57ff1fd2f1efe822989a24de3491740
|
/Code/resample.r
|
e37420e52f3136e4579a56a892ee9efcbe957ac1
|
[] |
no_license
|
asgr/samimaxi
|
12410ea8a2f934f4a84b6907230f471a2bdfea2f
|
1fb8c3944ad83f2980005a1a4469438de3e5ef8e
|
refs/heads/master
| 2021-05-02T10:33:00.841753
| 2019-07-31T02:25:27
| 2019-07-31T02:25:27
| 13,374,828
| 0
| 2
| null | 2019-07-31T02:25:29
| 2013-10-07T04:01:45
|
R
|
UTF-8
|
R
| false
| false
| 189
|
r
|
resample.r
|
resample=function (x, size, ...){
if (length(x)<size){size=length(x)}
if (length(x) <= 1) {
if (!missing(size) && size == 0)
x[FALSE]
else x
} else sample(x, size, ...)
}
|
7cb521e029eb03dd6efe4553f65e842085b2063a
|
4a00886d627412c19bfa4c6e664dc44740a3d675
|
/man/themeXYblank.Rd
|
db02acf10d44659d381aa0f14c4312b7e1e61945
|
[] |
no_license
|
mireia-bioinfo/plotRegulome
|
a668abe92445594bbdba54014b08388cfea7378d
|
21a46e971c4f249dd84073faa437350a8b17d290
|
refs/heads/master
| 2021-06-25T12:34:50.322389
| 2020-12-30T16:30:54
| 2020-12-30T16:30:54
| 175,781,508
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 496
|
rd
|
themeXYblank.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/themesBlank.R
\name{themeXYblank}
\alias{themeXYblank}
\title{Blank X and Y axis ggplot2 theme}
\usage{
themeXYblank(title = FALSE, ...)
}
\arguments{
\item{title}{Logical indicating if the title of the axis should be included. Default = FALSE.}
\item{...}{Additional parameters to pass to \link[ggplot2]{theme} function.}
}
\description{
When added to a ggplot, will set all elements in the x and y axis to blank.
}
|
eef84cef03d83360159cdfe6553a9839cd950329
|
c15f2824bc90ca148b77fde9a2a40d8bd7f5927d
|
/ui.R
|
e24f144443723e7981385ac1358939ba1695681d
|
[] |
no_license
|
marwanmuhamad/first-shiny-app
|
0499495444c670d943e0a29df9e1b32bdb8dcdf7
|
e587ce0376e080e6a54bfe156fb03fcfab173259
|
refs/heads/main
| 2023-07-26T10:19:29.315761
| 2021-09-06T14:40:54
| 2021-09-06T14:40:54
| 403,490,863
| 0
| 0
| null | 2021-09-06T07:09:29
| 2021-09-06T05:00:54
|
R
|
UTF-8
|
R
| false
| false
| 1,941
|
r
|
ui.R
|
library(tidyverse)
library(shiny)
library(shinythemes)
# ui ----------------------------------------------------------------------
shinyUI(fluidPage(theme = shinytheme(theme = "yeti"),
title = "Shiny App",
sidebarLayout(
sidebarPanel = sidebarPanel(width = 3,
helpText("Select Country to Preview Average Life Expectancy"),
selectInput("countryInput", "Show One", selected = country[1], choices = c(country[], "All")),
conditionalPanel(
condition = "input.tabset == 'graph'",
checkboxInput("showGraphInput", "Show Bar Graph"),
conditionalPanel(
condition = "input.showGraphInput == true",
radioButtons("radioInput", label = "Select Preview", choices = c("All", "Top 10", "Bottom 10"),
selected = NULL)
)
)
# checkboxInput("showGraphInput", "Show Bar Graph"),
# conditionalPanel(
# condition = "input.showGraphInput == true",
# radioButtons("radioInput", label = "Select Preview", choices = c("All", "Top 10", "Bottom 10"),
# selected = NULL)
# )
),
mainPanel = mainPanel(width = 9,
h3("Life Expectancy"),
hr(),
# h4("Average Life Expectancy"),
tabsetPanel(type = "pills", id = "tabset",
tabPanel(title = "Graph", icon = icon("bar-chart-o"), value = "graph",
br(),
textOutput(outputId = "meanCountry"),
hr(),
plotOutput(outputId = "lineGraph"),
hr(),
br(),
plotOutput(outputId = "meanLifeGraph"),
hr()
),
tabPanel(title = "Table", icon = icon("table"), value = "tablex",
br(),
dataTableOutput(outputId = "table")
)
)
)
)
)
)
|
285cbbbd875fd9896489668d1c1408d8d4cf509e
|
4306c2158515df6dd1d74197684f1b3030f1225f
|
/pulentiwi/app.R
|
0bd88024c86431a590f5df4f890d2c02a07fc7c3
|
[] |
no_license
|
jbkunst/usach-ingemat-intro-elementos-ds-201802
|
d7b47d0b3cd95303823197dc77f77f23967bf30d
|
70378cdc90901a9b4e1b51fb644d99a9a8034e83
|
refs/heads/master
| 2022-04-29T05:27:02.533727
| 2022-04-20T20:54:55
| 2022-04-20T20:54:55
| 142,888,540
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,047
|
r
|
app.R
|
library(shiny)
library(tidyverse)
library(lubridate)
library(rvest)
urlr <- "https://www.rankia.cl/blog/analisis-ipsa/3229498-que-empresas-forman-parte-ipsa-2018"
textos <- read_html(urlr) %>%
html_nodes("td") %>%
html_text()
textos
imp <- seq(1, length(textos), by = 2)
empresas <- textos[imp]
ui <- fluidPage(
sidebarLayout(
sidebarPanel(
selectInput("empresa", "Empresa", choices = empresas)
),
mainPanel(
plotOutput("distPlot")
)
)
)
server <- function(input, output) {
output$distPlot <- renderPlot({
# url <- "http://www.bolsadesantiago.com/DatosGraficosSebra/IPSA-days.txt"
url <- paste(
"http://www.bolsadesantiago.com/DatosGraficosSebra/",
input$empresa,
"-days.txt",
sep = ""
)
data <- read_csv(url)
data <- data %>%
mutate(fecha = ymd(`<DTYYYYMMDD>`))
ggplot(data) +
geom_line(aes(fecha, `<OPEN>`))
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
ebf6292453789b80241940a9e6ba37207d508294
|
02fae1d676e16cf1db8bd556ca9491e5f40aacfd
|
/Code/Iteration_15.R
|
3b431ce0bf228e8dc37f8fb30c4ef36cd6528962
|
[] |
no_license
|
astronerma/Amazon
|
0581d99286f01d0d035d3647655ff06be6e3df3f
|
e8351935bc52fe94494265bd0bf476cc17f40da7
|
refs/heads/master
| 2016-09-05T22:17:20.508906
| 2013-07-31T18:54:16
| 2013-07-31T18:54:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,207
|
r
|
Iteration_15.R
|
# Info about this idea:
# http://www.kaggle.com/c/amazon-employee-access-challenge/forums/t/5060/using-response-for-new-features
setwd("/Users/aszostek/Projects/Kaggle/Amazon")
train <- read.csv(file="./Data/train.csv")
test <- read.csv(file="./Data/test.csv")
# Remove redundant column
train <- train[,c(-10)]
test <- test[,c(-10)]
source("../Utils/submission_utils.R")
iteration = 15
library("randomForest")
library("tree")
library("e1071")
library("hash")
library("verification")
library("caret")
# ---------------
# My new datasets
new_train <- as.data.frame(matrix(NA,nrow = nrow(train),ncol=17))
names(new_train) <- c("action","resource0","resource1","mgr0","mgr1","role1_0","role1_1","role2_0", "role2_1", "dept0","dept1","title0","title1","family0","family1","role0","role1")
new_train[[1]] <- train[[1]]
new_test <- as.data.frame(matrix(NA,nrow = nrow(test),ncol=17))
names(new_test) <- c("id","resource0","resource1","mgr0","mgr1","role1_0","role1_1","role2_0", "role2_1", "dept0","dept1","title0","title1","family0","family1","role0","role1")
new_test[[1]] <- test[[1]]
# -------------------------
# Now lets fill the training and test datasets
for (col in 2:9)
{
print(col)
c0 <- 2*col-2
c1 <- 2*col-1
uni <- unique(train[[col]])
uni_test <- unique(test[[col]])
diff <- setdiff(uni_test,uni)
slownik0 <- hash(uni,rep(0,length(uni)))
slownik1 <- hash(uni,rep(0,length(uni)))
# populate slownik
for (row in 1:nrow(train))
{
if (train[row,1] == 1)
{
slownik1[[ as.character(train[row,col]) ]] <- slownik1[[ as.character(train[row,col]) ]] + 1
}
else
{
slownik0[[ as.character(train[row,col]) ]] <- slownik0[[ as.character(train[row,col]) ]] + 1
}
}
# populate new_train
tmp <- as.data.frame(matrix(NA,nrow = length(uni),ncol=3))
for (row in 1:length(uni))
{
#print(row)
word <- uni[row]
s0 <- slownik0[[as.character(word)]]
s1 <- slownik1[[as.character(word)]]
tmp[row,1] <- word
tmp[row,2] <- s0/(s0+s1)
tmp[row,3] <- s1/(s0+s1)
}
new_train[[c0]] <- tmp[match(train[[col]],tmp[[1]]),2]
new_train[[c1]] <- tmp[match(train[[col]],tmp[[1]]),3]
new_test[[c0]] <- tmp[match(test[[col]],tmp[[1]],incomparables=diff,nomatch=NA),2]
new_test[[c1]] <- tmp[match(test[[col]],tmp[[1]],incomparables=diff,nomatch=NA),3]
}
system("say done")
# ------------------------
# Take log10 of the training ata
log_train<-new_train
log_train[,2:17]<-log10(new_train[,2:17]+1e-3)
log_train[[1]] <- as.factor(log_train[[1]])
# ----------------------------------------
# Replace missing values in test set
new_test <- as.data.frame(impute(new_test,"median"))
# ------------------------
# Take log10 of the test
log_test <- new_test
log_test[,2:17]<-log10(new_test[,2:17]+1e-3)
log_test[[1]] <- as.factor(log_test[[1]])
# -------------------------------
# GLM
t2 <- glm(action ~.,data=log_train[,c(1,2,3,4,5,12,14,17)],family="binomial")
summary(t2)
predict_train <- predict(t2,type="response")
#predict_train
roc.area(train[[1]], predict_train)$A
kfold.glm<-function(data,k)
{
n<-as.integer(nrow(data)/k)
err.vect<-rep(NA,k)
for (i in 1:k)
{
#print(i)
s1<-((i-1)*n+1)
s2<-(i*n)
#print(paste(s1,s2))
subset<-s1:s2
train<-data[-subset,]
test<-data[subset,]
# ----------
fit <- glm(action ~.,data=train[,c(1,2,3,4,5,12,14,17)],family="binomial")
prediction <- predict(fit,newdata=test,type="response")
# ----------
labels<-as.numeric(as.character(test[[1]]))
err <- roc.area(labels,prediction)$A
err.vect[i]<-err
}
return(err.vect)
}
kfold.glm<-function(data,k)
{
s<-sample(1:nrow(data),nrow(data),replace=FALSE)
n<-as.integer(nrow(data)/k)
err.vect<-rep(NA,k)
for (i in 1:k)
{
s1<-((i-1)*n+1)
s2<-(i*n)
subset<-s[s1:s2]
#print(subset[1:10])
train<-data[-subset,]
test<-data[subset,]
fit <- glm(action ~.,data=train,family="binomial")
prediction <- predict(fit,newdata=test,type="response")
labels<-as.numeric(as.character(test[[1]]))
err <- roc.area(labels,prediction)$A
err.vect[i]<-err
}
return(err.vect)
}
a<-kfold.glm(log_train[,c(1,2,3,4,5,12,14,17)],5)
a
# ------------------------
# Plots
plot(jitter(log_train[[2]]),jitter(log_train[[14]]),col=log_train[[1]])
# ----------------
# Predict test set
prediction_test <- predict(t2,newdata = log_test[,c(1,2,3,4,5,12,14,17)],type="response")
# -----------------------
# Submission file
id <- test[[1]]
prediction <- prediction_test
test_submission<-as.data.frame(matrix(data = NA, nrow = length(prediction),ncol=2))
test_submission[[1]] <- id
test_submission[[2]] <- prediction
names(test_submission)<-c("Id","Action")
# write file
submission_file_name = paste("./Submissions/submission",as.character(iteration),".csv",sep="")
submission_file_name
write.csv(test_submission,file=submission_file_name,row.names=FALSE,quote=FALSE)
diffsub(13,12,2,"Amazon")
# ---------------------
plot(log(new_train[[2]]+1e-7),log(new_train[[5]]+1e-7),col=new_train[[1]],pch=19)
|
89241cff4c4ffe1d7ee91009e385b76e0ff88a5c
|
2e5bcb3c8028ea4bd4735c4856fef7d6e46b5a89
|
/R/AffymetrixTabularFile.R
|
4e13c4fa8980756b2119dcb8b89ed23b928f02d9
|
[] |
no_license
|
HenrikBengtsson/aroma.affymetrix
|
a185d1ef3fb2d9ee233845c0ae04736542bb277d
|
b6bf76f3bb49474428d0bf5b627f5a17101fd2ed
|
refs/heads/master
| 2023-04-09T13:18:19.693935
| 2022-07-18T10:52:06
| 2022-07-18T10:52:06
| 20,847,056
| 9
| 4
| null | 2018-04-06T22:26:33
| 2014-06-15T03:10:59
|
R
|
UTF-8
|
R
| false
| false
| 1,478
|
r
|
AffymetrixTabularFile.R
|
# @author "HB"
setConstructorS3("AffymetrixTabularFile", function(...) {
extend(TabularTextFile(...), c("AffymetrixTabularFile",
uses("AromaPlatformInterface"), uses("FileCacheKeyInterface"))
)
})
setMethodS3("translateColumnNames", "AffymetrixTabularFile", function(this, names, ...) {
# Convert 'FOO_BAR_DOO' and 'FOO.BAR.DOO' to 'foo bar doo'?
if (any(regexpr("[_.]", names) != -1)) {
names <- tolower(gsub("[_.]", " ", names))
}
# Finally, convert 'Foo bar Doo' to 'fooBarDoo'
names <- toCamelCase(names)
names
}, protected=TRUE)
setMethodS3("findByChipType", "AffymetrixTabularFile", function(static, chipType, tags=NULL, pattern=NULL, ...) {
if (is.null(pattern)) {
name <- paste(c(chipType, tags), collapse=",")
pattern <- sprintf("^%s.*[.]...$", name)
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Search in annotationData/chipTypes/<chipType>/
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
pathname <- findAnnotationDataByChipType(chipType, pattern, ...)
pathname
}, static=TRUE, protected=TRUE)
setMethodS3("byChipType", "AffymetrixTabularFile", function(static, chipType, tags=NULL, ...) {
# Search for the genome information file
pathname <- findByChipType(static, chipType, tags=tags, ...)
if (is.null(pathname))
throw("Failed to located Affymetrix tabular file: ", chipType)
newInstance(static, pathname, ...)
}, static=TRUE)
|
0cafba5fea3db26e4be81df3edf45ed280dacebd
|
62ad2cfdf0461bfdca74ab7249769b5b2e67595b
|
/TCGA PanCancer/ATP analysis/Correlation heatmap (filtered).R
|
1ca12e02985717c39af0811fafb5e8f894ed338f
|
[
"MIT"
] |
permissive
|
jihunni/Bioinformatics_project_2021_spring
|
d60ba9e2adae1ac68208c68aa97a485323d9d3e0
|
dbbd83ea5051346f333352fef7ffb1fc3f6e2dcc
|
refs/heads/main
| 2023-07-18T10:15:17.045411
| 2021-08-31T14:34:04
| 2021-08-31T14:34:04
| 391,852,070
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,514
|
r
|
Correlation heatmap (filtered).R
|
library(readr)
library(stringr)
library(tidyverse)
#correlation
library("Hmisc") #correlation matrix with p-value
library(corrplot) #visualizing correlation matrix
library(pheatmap)
library(grid)
library(RColorBrewer) #color annotation
library(beepr) #beep() : alarm function
# #load pan-cancer data (small)
# tcga_tpm = read_tsv(file="../data/tcga_RSEM_gene_tpm",n_max=30)
# save(tcga_tpm, file = "../data/tcga_RSEM_gene_tpm_Only30.rda")
#load pan-cancer data (all)
tcga_tpm = read_tsv(file="../data/tcga_RSEM_gene_tpm")
save(tcga_tpm, file = "../data/tcga_RSEM_gene_tpm.rda")
<the numer of TCGA case per cancer type>
#load tissueSourceSite code data
tissueSourceSite = read_tsv(file="../tcga_code_tables/tissueSourceSite.tsv")
colname = colnames(tcga_tpm)
head(colname) #we need to consider the first column
#extract TSS code
TSS = substr(colname, start = 6, stop = 7) #tcga tpm data
head(TSS)
#HCC data.frame
TSS_code_HCC_boolian = str_detect(tissueSourceSite$`Study Name`, 'Liver hepatocellular carcinoma')
TSS_cide_HCC = tissueSourceSite$`TSS Code`[TSS_code_HCC_boolian]
Index_HCC = c(1, which(TSS %in% TSS_cide_HCC)) #sample column sholud be included
tcga_tpm_HCC = tcga_tpm[Index_HCC]
save(tcga_tpm_HCC, TSS_cide_HCC, Index_HCC, file = 'HCC_dataFrame.rda')
rm(tissueSourceSite, colname, Index_HCC, TSS, TSS_cide_HCC, TSS_code_HCC_boolian)
#sub-dataframe which contains ATP-consuming group and ATP-producing group
tcga_gene_list = substr(tcga_tpm_HCC$sample, start = 1, stop = 15) #ensemble ID has unqiue 11 digital
#gene list from HME data
load('../../GEM data/geneList_consuming.rda')
load('../../GEM data/geneList_producing.rda')
HCC_consuming = tcga_gene_list %in% geneList_consuming
HCC_producing = tcga_gene_list %in% geneList_producing
unique(HCC_consuming)
unique(HCC_producing)
HCC_consuming = tcga_tpm_HCC[c(HCC_consuming),]
HCC_producing = tcga_tpm_HCC[c(HCC_producing),]
save(HCC_consuming, HCC_producing, file='../data/HCC_consuming_and_producing_dataFrame')
rm(geneList_consuming, geneList_producing, mean_HCC, tcga_gene_list)
# #HCC gene expression median value
# plot(x = c(1:100), y = tcga_tpm_HCC[10,1:100], ylim=c(-15,15)) #to observe the overall trend
# mean_HCC_consuming = apply(HCC_consuming[,2:length(colnames(HCC_consuming))], 1, median)
# mean_HCC_producing = apply(HCC_producing[,2:length(colnames(HCC_producing))], 1, median)
# #tcga_tpm_HCC_mean = mutate(tcga_tpm_HCC, apply(tcga_tpm_HCC[,2:length(colnames(tcga_tpm_HCC))], 1,median))
# mean_HCC_consuming = data.frame(HCC_consuming$sample, mean_HCC_consuming)
# colnames(mean_HCC_consuming) = c('gene', 'tpm')
# mean_HCC_producing = data.frame(HCC_producing$sample, mean_HCC_producing)
# colnames(mean_HCC_producing) = c('gene', 'tpm')
# correlation_HCC = cor(mean_HCC_consuming$tpm, mean_HCC_producing$tpm)
# save(mean_HCC_consuming, mean_HCC_producing, file='HCC_mean_consuming_and_producing_dataFrame.rda')
<correlation analysis in gene level>
save(HCC_consuming, HCC_producing, tcga_tpm_HCC, file='continue.rda')
#Correlation matrix
##consuming group
row.names(HCC_consuming) = HCC_consuming$sample #change raw name into geneID
HCC_consuming_t = t(HCC_consuming) #transpose the dataframe (output: matrix)
HCC_consuming_t = HCC_consuming_t[2:nrow(HCC_consuming_t), ] #remove the first raw containing gene ID
##producing group
row.names(HCC_producing) = HCC_producing$sample #change raw name into geneID
HCC_producing_t = t(HCC_producing) #transpose the dataframe (output: matrix)
HCC_producing_t = HCC_producing_t[2:nrow(HCC_producing_t), ] #remove the first raw containing gene ID
##merge
unique(rownames(HCC_consuming_t) == rownames(HCC_producing_t)) #check whether they have identical raw
merged_matrix = merge(HCC_consuming_t, HCC_producing_t, by = "row.names", all = TRUE) #dataframe??? #merge two matrix
rownames(merged_matrix) = merged_matrix$Row.names
merged_matrix = merged_matrix[,2:ncol(merged_matrix)] #remove rowname column
is.na(merged_matrix) #check the data has N/A
which(is.na(merged_matrix))
merged_matrix_numberic = data.frame(lapply(merged_matrix, as.numeric)) #convert char dataframe into numeric dataframe.
rownames(merged_matrix_numberic) = rownames(merged_matrix)
merged_matrix = merged_matrix_numberic
rm(merged_matrix_numberic)
correlation_matrix = cor(merged_matrix)
save(HCC_consuming_t, HCC_producing_t, merged_matrix, correlation_matrix, file='correlation_matrix.rda')
load('correlation_matrix.rda')
#correlation analysis
Rcorrelation_matrix = rcorr(as.matrix(merged_matrix))
HCC_coeff = Rcorrelation_matrix$r
HCC_p = Rcorrelation_matrix$P
#correlogram
corrplot(correlation_matrix) #A default correlation matrix plot (called a Correlogram)
#Heatmap
palette = colorRampPalette(c("green", "white", "red")) (20) #Heatmap color
heatmap(x = correlation_matrix,
col = palette,
symm = TRUE,
RowSideColors = c(
rep("red", ncol(HCC_consuming_t)),
rep("blue", ncol(HCC_producing_t))),
# ColSideColors = c(
# rep("red", ncol(HCC_consuming_t)),
# rep("blue", ncol(HCC_producing_t))),
main = 'HCC correlation'
)
# ex = heatmap(x = correlation_matrix[1:10,1:10],
# col = palette,
# symm = TRUE,
# #RowSideColors = c(rep("red", 5), rep("blue", 5)),
# main = 'HCC correlation'
# )
#color code
gene_color_code = data.frame(gene= rep(c("consuming", "producing"), c(ncol(HCC_consuming_t), ncol(HCC_producing_t))))
row.names(gene_color_code) = colnames(merged_matrix)
#pheatmap
# small_pheatmap_HCC = pheatmap(mat = correlation_matrix[1:10,1:10],
# color = colorRampPalette(c('#2471A3','white','#C0392B'))(50),
# dispaly_numbers = FALSE,
# border_color = 'white',
# show_rownames = T,
# show_colnames = F,
# annotation_row = gene_color_code,
# annotation_col = gene_color_code
#
# )
pheatmap_HCC = pheatmap(mat = correlation_matrix,
color = colorRampPalette(c('#2471A3','white','#C0392B'))(50),
dispaly_numbers = FALSE,
border_color = 'white',
show_rownames = T,
show_colnames = F,
annotation_row = gene_color_code,
annotation_col = gene_color_code
# labels_row
)
#pheatmap_HCC_consuming_vs_producing
#To prepare the color code for consuming group
gene_to_subsystem_consuming = colnames(HCC_consuming_t)
gene_to_subsystem_consuming = data.frame(gene = substr(gene_to_subsystem_consuming, start = 1, stop = 15))
unique(duplicated(gene_to_subsystem_consuming)) #no duplication
gene_to_subsystem_consuming = gene_to_subsystem_consuming %>%
left_join(y= gene_to_HMRdata_df, by = ('gene' = 'gene')) %>%
select(gene, SUBSYSTEM)
gene_to_subsystem_consuming = distinct(gene_to_subsystem_consuming)
#eventhough this function remove duplicated one, it would not change sequence of gene list. Look at the example of left_join
##issue : dupliated element owing to multiple matching to subsystem
###multiple matching --> change into 'multiple'
1. find out duplicated
dupliated_consuming_list = gene_to_subsystem_consuming[duplicated(gene_to_subsystem_consuming$gene),]
2. remove duplicated list and make it one
gene_to_subsystem_consuming = gene_to_subsystem_consuming[!(duplicated(gene_to_subsystem_consuming$gene)),]
3. changed the duplicated list into 'multiple'
gene_to_subsystem_consuming[gene_to_subsystem_consuming$gene %in% dupliated_consuming_list$gene,]$SUBSYSTEM = 'multiple'
gene_to_subsystem_producing = colnames(HCC_producing_t)
gene_to_subsystem_producing = data.frame(gene = substr(gene_to_subsystem_producing, start = 1, stop = 15))
gene_to_subsystem_producing = gene_to_subsystem_producing %>%
left_join(y= gene_to_HMRdata_df, by = ('gene' = 'gene')) %>%
select(gene, SUBSYSTEM)
gene_to_subsystem_producing = distinct(gene_to_subsystem_producing)
##same issue : dupliated element owing to multiple matching to subsystem
###multiple matching --> change into 'multiple'
1. find out duplicated
dupliated_producing_list = gene_to_subsystem_producing[duplicated(gene_to_subsystem_producing$gene),]
2. remove duplicated list and make it one
gene_to_subsystem_producing = gene_to_subsystem_producing[!(duplicated(gene_to_subsystem_producing$gene)),]
3. changed the duplicated list into 'multiple'
gene_to_subsystem_producing[gene_to_subsystem_producing$gene %in% dupliated_producing_list$gene,]$SUBSYSTEM = 'multiple'
save(HCC_consuming_t, HCC_producing_t, gene_to_HMRdata_df, gene_to_subsystem_consuming, gene_to_subsystem_producing, dupliated_consuming_list, dupliated_producing_list, file = 'HCC_color_code.rda')
#color code for subsystem
color_code_subsystem_consuming = data.frame(subsystem = gene_to_subsystem_consuming$SUBSYSTEM)
color_code_subsystem_producing = data.frame(subsystem = gene_to_subsystem_producing$SUBSYSTEM)
#final check!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
unique(substr(colnames(HCC_consuming_t), start = 1, stop = 15) == gene_to_subsystem_consuming$gene)
unique(substr(colnames(HCC_producing_t), start = 1, stop = 15) == gene_to_subsystem_producing$gene)
#GOOOOOOOOOOOOOOD
rownames(color_code_subsystem_consuming) = colnames(HCC_consuming_t)
rownames(color_code_subsystem_producing) = colnames(HCC_producing_t)
ex_colorcode_col = data.frame(subsystem = rep(1, ncol(HCC_consuming_t)))
ex_colorcode_row = data.frame(subsystem = rep(2, ncol(correlation_matrix) - ncol(HCC_consuming_t)))
#to escape from collision
colnames(color_code_subsystem_consuming) = 'subsystem_consuming'
colnames(color_code_subsystem_producing) = 'subsystem_producing'
# to create colors for each group
##(ATP consuming group)
newCols_consuming <- colorRampPalette(grDevices::rainbow(length(unique(color_code_subsystem_consuming$subsystem_consuming))))
annoCol_consuming <- newCols_consuming(length(unique(color_code_subsystem_consuming$subsystem_consuming)))
names(annoCol_consuming) <- unique(color_code_subsystem_consuming$subsystem_consuming)
##(ATP producing group)
newCols_producing <- colorRampPalette(grDevices::rainbow(length(unique(color_code_subsystem_producing$subsystem_producing))))
annoCol_producing <- newCols_producing(length(unique(color_code_subsystem_producing$subsystem_producing)))
names(annoCol_producing) <- unique(color_code_subsystem_producing$subsystem_producing)
annoCol <- list(subsystem_consuming = annoCol_consuming, subsystem_producing = annoCol_producing)
#draw heatmap
pheatmap(mat = correlation_matrix[1:ncol(HCC_consuming_t), as.numeric(ncol(HCC_consuming_t)+1):ncol(correlation_matrix)],
color = colorRampPalette(c('#2471A3','white','#C0392B'))(50),
dispaly_numbers = FALSE,
border_color = 'white',
cutree_rows = 2,
cutree_cols = 2,
annotation_colors = annoCol,
annotation_row = color_code_subsystem_consuming, #it works
# annotation = color_code_subsystem_consuming, #Not works
# row_annotation_legend = FALSE, #It works
annotation_col = color_code_subsystem_producing, #It works
show_rownames = F,
show_colnames = F,
# cellwidth = 1000,
# cellheight = 1000,
main = "HCC (x: ATP producing_192, y: ATP consuming_1084)"
)
save(color_code_subsystem_consuming, color_code_subsystem_producing, annoCol_consuming, annoCol_producing, correlation_matrix, HCC_consuming_t, HCC_producing_t, file='continue.rda')
#this figure sholud be exported in big size due to labeling
# alarm()
# hclust_HCC = hclust(dist())
save(correlation_matrix, Rcorrelation_matrix, HCC_coeff, HCC_p, file='HCC_correlation.rda')
#plot(c(1,2), c(1,2)) #check RStuido deos work right now.
<Further analysis for oxidative phospohrylation>
producing_oxidative_phosphorylation_df = gene_to_subsystem_producing %>%
left_join(y=gene_to_HMRdata_df, by = c('gene' ='gene')) %>%
select(gene, RXNID, EQUATION, SUBSYSTEM.x)
producing_oxidative_phosphorylation_table = table(producing_oxidative_phosphorylation_df$RXNID)
plot(x=factor(c('HMR_4358', 'HMR_4421', 'HMR_4572', 'HMR_4675', 'HMR_5295', 'HMR_5429', 'HMR_6916', 'HMR_7629', 'HMR_7799', 'HMR_8474', 'HMR_8892', 'HMR_9570')), y=c(4,2,9,9,5,5,178,9,10,5,3,1), ylim=c(0,200))
#filter on ATP consuming group : #gene >10 per subsystem
|
f605233530f740ee58b425193c4db712daec834f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/charlatan/examples/elements.Rd.R
|
9d7df05fe1f4ebb0be59fea5d76d0d0903d4800e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 276
|
r
|
elements.Rd.R
|
library(charlatan)
### Name: elements
### Title: Get elements
### Aliases: elements ch_element_symbol ch_element_element
### ** Examples
ch_element_symbol()
ch_element_symbol(10)
ch_element_symbol(50)
ch_element_element()
ch_element_element(10)
ch_element_element(50)
|
c04204f567a94b5f6771f978227abb2d9a09cd6e
|
fe6100b141c19152a7a38c6b3022034853d8848a
|
/Figures/Fig.7/Fig7-TissueEnrich.R
|
b96e3dd334dec8e7a35d6c8ac524238845f9472d
|
[] |
no_license
|
YiweiNiu/STR_2022
|
fc213aa724730d728825a7ca5fc568b43e6156e3
|
d8cdca0c157bb893978fdfe0c0c4eef50e6cca27
|
refs/heads/main
| 2023-04-27T15:02:22.480097
| 2023-04-15T10:17:35
| 2023-04-15T10:17:35
| 574,407,342
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,036
|
r
|
Fig7-TissueEnrich.R
|
library(TissueEnrich)
genes <- "gene-unique.txt"
inputGenes<-scan(genes,character())
gs<-GeneSet(geneIds=inputGenes,organism="Homo Sapiens",geneIdType=SymbolIdentifier())
output<-teEnrichment(inputGenes = gs)
seEnrichmentOutput<-output[[1]]
enrichmentOutput<-setNames(data.frame(assay(seEnrichmentOutput),row.names = rowData(seEnrichmentOutput)[,1]), colData(seEnrichmentOutput)[,1])
enrichmentOutput$Tissue<-row.names(enrichmentOutput)
head(enrichmentOutput)
ggplot(enrichmentOutput,aes(x=reorder(Tissue,-Log10PValue),y=Log10PValue,label = Tissue.Specific.Genes,fill = Tissue))+
geom_bar(stat = 'identity')+
labs(x='', y = '-LOG10(P-Adjusted)')+
theme_bw()+
theme(legend.position="none")+
theme(plot.title = element_text(hjust = 0.5,size = 20),axis.title = element_text(size=15))+
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1),panel.grid.major= element_blank(),panel.grid.minor = element_blank())
ggsave(filename="tissueenrich.pdf", width = 20, height = 12, units = "cm")
|
0d3718b50bbfe78189ce58d189ee2168bff4fd87
|
dea374ddade7f98bfd036f4d753ff0ca8d1cdeb0
|
/wilcoxon.R
|
d4b491c204dbc1622767bd228f5fc3c5e3abdf2e
|
[] |
no_license
|
bcervantesg/itesm-dcc
|
66f314ae8f4d73e9a9be9d1d10ad0e9fdab748ad
|
dafc928fa3fbcc4bff7cd9b5d66a6647fc9755e9
|
refs/heads/master
| 2021-08-30T01:20:55.336152
| 2017-12-15T14:20:55
| 2017-12-15T14:20:55
| 112,964,608
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 939
|
r
|
wilcoxon.R
|
databasesall = read.csv("~/databases/databases120.csv")
binary= databasesall$classes ==2
loet10all = databasesall$atributes <=10
b11a100 = ((databasesall$atributes >10) & (databasesall$atributes <=100))
all = rep(TRUE,120)
bool = all
setwd("~/ProcessedOutput/")
A = read.csv("PRFW10/120/Exp3aFW10RP100AcC.csv") #RF
B = read.csv("UniformTree/Exp3aFW100ACC.csv") #INC
idA = "RP100"
idB = "RFUT100"
for (measure in c("Q","QP","H","HP","G","GP")){
print(paste0('----',measure,'----'))
wL = wilcox.test(A[[paste0(idA,measure)]][bool],B[[paste0(idB,measure)]][bool],paired = TRUE, alternative = "less")
sig = ""
if (wL$p.value < 0.05){ sig = "*** "}
print(paste0(wL$p.value,sig))
}
for (measure in c("Q","QP","H","HP","G","GP")){
print(measure)
x = sum(A[[paste0(idA,measure)]][bool]-B[[paste0(idB,measure)]][bool] <= 0)
print(x/sum(bool))
print(1-x/sum(bool))
}
rank(c(100-colSums(B[bool,-1]),100-colSums(A[bool,-1])))
|
68978c281dc51e4602b4d7ac6f524a380957c428
|
87065fbf9252d3157932a7de91c0064ec92d30c1
|
/R_attempts/Exercise_2_2.R
|
b982d7c528c48fb8584dc6949739dfe379263733
|
[] |
no_license
|
Skhurana136/ml_summerschool_102020
|
9a6ce02d68a0d32db798eb33b9a4a3060738dcad
|
87f4ee6315c169ca43fbb55924ecd9f4f3537a4b
|
refs/heads/master
| 2022-12-25T04:28:06.625269
| 2020-10-01T13:32:23
| 2020-10-01T13:32:23
| 300,288,315
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,594
|
r
|
Exercise_2_2.R
|
#Choose some of the classifiers already introduced in the lecture
#and visualize their decision boundaries for relevant hyperparameters.
#Use mlbench::mlbench.spirals to generate data and use plot learner prediction for visualization.
#To refresh your knowledge about mlr3 you can take a look at https://mlr3book.mlr-org.com/basics.html.
library(mlbench)
library(mlr3)
library(mlr3learners)
library(mlr3viz)
library(ggplot2)
library(gridExtra)
set.seed(401)
learners <- list(
# Softmax regression
softmax_learner = lrn("classif.multinom",
trace = FALSE,
predict_type = "prob"
),
# K-nearest neighbors
knn_learner = lrn("classif.kknn",
k = 50,
predict_type = "prob"
),
# Linear discriminant analysis
lda_learner = lrn("classif.lda",
predict_type = "prob"
),
# Quadratic discriminant analysis
qda_learner = lrn("classif.qda",
predict_type = "prob"
),
nb_learner = lrn("classif.naive_bayes",
predict_type = "prob"
)
)
spirals <- data.frame(mlbench.spirals(n = 500, sd = 0.11))
ggplot(spirals, aes(x.1, x.2, color = classes)) + geom_point()
spirals_task <- TaskClassif$new(id = "spirals", backend = spirals, target = "classes")
ggplot_list <- lapply(
learners,
function(learner) plot_learner_prediction(learner, spirals_task) +
theme_minimal(base_size = 10) + guides(alpha = "none", shape = "none") +
ggtitle(learner$id)
)
do.call(grid.arrange, ggplot_list)
|
c8675c36b611287c6e5d58da64b2ead80ce38ac8
|
2cc758c13a14e5044b08a7bafb6d9f923a83c773
|
/older/hus/Tabs_server/Tab11_SaveHusListInDropbox.R
|
cb0bda7980a76899d90dfac789f2621f89a3bb38
|
[] |
no_license
|
TBrach/shiny_apps
|
2cfdbbe11a1702251198d7186c16a87c8b42004d
|
c727095900d35d0c06107d7e3fdffffbd1d03ff2
|
refs/heads/master
| 2023-02-27T15:12:24.809444
| 2021-02-08T08:23:53
| 2021-02-08T08:23:53
| 326,055,438
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 600
|
r
|
Tab11_SaveHusListInDropbox.R
|
# - save Planner in your dropbox -
observeEvent(input$saveDropbox, {
if(is.null(rv$DFhus)){
rv$infoText = "I do not think you want to upload an empty hus list to your Dropbox"
return(NULL)
} else {
Name <- "Hus"
filename = paste(Name, ".rds", sep = "")
file_path <- file.path(tempdir(), filename)
saveRDS(rv$DFhus, file_path)
drop_upload(file_path, path = 'apps/hus')
rv$infoText = "Uploaded hus list to Dropbox"
}
})
# --
|
bf12ac77094b03b08cbba002bc607f11b34777c3
|
206ba0ae3465c8dc00a7a8e9ac4fa87150be5d47
|
/SexRatio.R
|
846ed0aab5ed703fe835ca9a134e3997dd5ecadf
|
[] |
no_license
|
IMASau/AbResearch
|
2169fead4c864fe450c05918cc5a8c3aed51d530
|
5f260058e2a103e15374cd843d1fdea2aa71f7cb
|
refs/heads/master
| 2023-08-17T12:25:37.148260
| 2023-08-08T04:50:16
| 2023-08-08T04:50:16
| 30,902,630
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 711
|
r
|
SexRatio.R
|
library(openxlsx)
SexRatio <- samdata %>% group_by(Site_Name, SamplePeriod, SizeC,Sex) %>% summarise(n=n()) %>%
pivot_wider(id_cols = c(Site_Name, SamplePeriod, SizeC), names_from = Sex, values_from = n, values_fill = list(freq =0))
wb <- loadWorkbook("CatchbyDiver.xlsx")
addWorksheet(wb, "blacklip")
addWorksheet(wb, "greenlip")
writeData(wb, sheet = "blacklip", ce.summary.bl, colNames = T)
writeData(wb, sheet = "greenlip", ce.summary.gl, colNames = T)
saveWorkbook(wb,"CatchbyDiver.xlsx",overwrite = T)
## Using openxlsx
wb <- createWorkbook("d:/SexRatio.xlsx")
addWorksheet(wb, "SexRatio")
writeData(wb, sheet = "SexRatio", SexRatio, colNames = T)
saveWorkbook(wb,"d:/SexRatio.xlsx",overwrite = T)
|
27bb8e81eb0bba559b1aa1f6a7dc835c5c3550fb
|
fe612f81a3118bf3ebef644bae3281bd1c156442
|
/man/h2o.get_leaderboard.Rd
|
5d70c594db572f30263b9e25b5ef81a32f91fd7f
|
[] |
no_license
|
cran/h2o
|
da1ba0dff5708b7490b4e97552614815f8d0d95e
|
c54f9b40693ae75577357075bb88f6f1f45c59be
|
refs/heads/master
| 2023-08-18T18:28:26.236789
| 2023-08-09T05:00:02
| 2023-08-09T06:32:17
| 20,941,952
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,527
|
rd
|
h2o.get_leaderboard.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/automl.R
\name{h2o.get_leaderboard}
\alias{h2o.get_leaderboard}
\title{Retrieve the leaderboard from the AutoML instance.}
\usage{
h2o.get_leaderboard(object, extra_columns = NULL)
}
\arguments{
\item{object}{The object for which to return the leaderboard. Currently, only H2OAutoML instances are supported.}
\item{extra_columns}{A string or a list of string specifying which optional columns should be added to the leaderboard. Defaults to None.
Currently supported extensions are:
\itemize{
\item{'ALL': adds all columns below.}
\item{'training_time_ms': column providing the training time of each model in milliseconds (doesn't include the training of cross validation models).}
\item{'predict_time_per_row_ms': column providing the average prediction time by the model for a single row.}
\item{'algo': column providing the algorithm name for each model.}
}}
}
\value{
An H2OFrame representing the leaderboard.
}
\description{
Contrary to the default leaderboard attached to the automl instance, this one can return columns other than the metrics.
}
\examples{
\dontrun{
library(h2o)
h2o.init()
prostate_path <- system.file("extdata", "prostate.csv", package = "h2o")
prostate <- h2o.importFile(path = prostate_path, header = TRUE)
y <- "CAPSULE"
prostate[,y] <- as.factor(prostate[,y]) #convert to factor for classification
aml <- h2o.automl(y = y, training_frame = prostate, max_runtime_secs = 30)
lb <- h2o.get_leaderboard(aml)
head(lb)
}
}
|
facd3bc5174b33e2ad925193b043f31d4f9a06e9
|
b400b9f91e21da74aa590b73c54cbacfb418257f
|
/R/white_train.R
|
3848ab66d092a82acbdf3fa7b6fc3a22982b9ac3
|
[] |
no_license
|
group-wine/sommelieR
|
5162335e92f8f2e6f53ba91d4001b70b9ad19b81
|
d99a83553e40e75590df633576dcd8f45199badc
|
refs/heads/master
| 2020-05-02T21:09:28.641195
| 2019-04-28T19:02:10
| 2019-04-28T19:02:10
| 178,212,398
| 0
| 2
| null | 2019-03-28T17:37:13
| 2019-03-28T13:45:55
|
R
|
UTF-8
|
R
| false
| false
| 770
|
r
|
white_train.R
|
#' Training dataset of quality ratings for white wine
#'
#' A training dataset containing the quality ratings and chemical characteristics of white wine.
#'
#' @format A data frame with 12 variables:
#' \describe{
#' \item{fixed.acidity}{Fixed acidity.}
#' \item{volatile.acidity}{Volatile acidity}
#' \item{citric.acid}{Citric acid}
#' \item{residual.sugar}{Residual sugar}
#' \item{chlorides}{chlorides}
#' \item{free.sulfur.dioxide}{Free sulfur dioxide}
#' \item{total.sulfur.dioxide}{Total sulfur dioxide}
#' \item{density}{density}
#' \item{pH}{pH}
#' \item{sulphates}{sulphates}
#' \item{alcohol}{alcohol}
#' \item{quality}{quality}
#' }
#' @source \url{http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/}
"white_train"
|
afa85eb6682465842e691f03e2576902c0728597
|
77f5651577d90091e045bd21e1dd86ce4bfa4711
|
/run_analysis.R
|
02554ad4c2c5ff7b017d0dd76c3fcee0e1c8c9a5
|
[] |
no_license
|
AbuNurullah/getting_and_cleaning_data
|
a80a6ed83130ac1458388f99734fb0223d6ad64e
|
5490f1a8e038d4d38d427c71bae75c3a1162c803
|
refs/heads/master
| 2016-09-01T15:16:10.765801
| 2015-10-27T23:33:47
| 2015-10-27T23:33:47
| 44,032,425
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,991
|
r
|
run_analysis.R
|
#Course: "Getting and Cleaning Data"
# Project R code
#Abu M. Nurullah
# 10/21/2015 revision : 10/23/2015
#----------------------------------
# Section: 1 merging of the two datasets #
##########################################
require(reshape2)
require(plyr)
#download and unzip the project data file
setwd("C:/temp/")
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",
destfile = "projectdata.zip" )
if(!file.exists("UCI HAR Dataset" )){
unzip("projectdata.zip")
}
#set path to the working data directory
#--------------------------------------------
path = paste(getwd(), "/UCI HAR Dataset/", sep = "")
setwd(path)
print("..reading training data sets....")
train.data = read.table("train/X_train.txt", sep = "")
train.label = read.table("train/y_train.txt", sep = " ")
train.subj = read.table("train/subject_train.txt", sep = " ")
train_df = cbind(train.data, "act_id" = train.label$V1, "subject_id" = train.subj$V1)
#read the test data similar to above
#--------------------------------------
print("..reading test data sets.... ")
test.data = read.table("test/X_test.txt", sep = "")
test.label = read.table("test/y_test.txt", sep = " ")
test.subj = read.table("test/subject_test.txt", sep = " ")
test_df = cbind(test.data, "act_id" = test.label$V1, "subject_id"= test.subj$V1)
# combine the two data frames into a singel data frame
print("...combining both train and test data frames.......")
df_merged = rbind(train_df, test_df)
###########################################################
# Section: 2. Extracts only the measurements on #
# the mean and standard deviation for each measurement. #
###########################################################
# extract the variable names from feature.txt
#---------------------------------------------
print("..reading feature labels...")
df_feature = read.table("features.txt", sep = "")
col_labels = c(as.vector(df_feature[,2]), "act_id", "subject_id")
#assign col-labels to combined data frame
colnames(df_merged) <- col_labels
#identifying position of varaibles in vector colnames for mean() and Std()
#---------------------------------------------------------------------------
v1 = agrep("-mean()-", col_labels)
v2 = agrep("-std()-", col_labels)
v = sort(c(v1, v2))
#creating dataset with only the mean and Std measurement variables
#-------------------------------------------------------------------
print("..creating dataset with selected variables.....")
df_sel = df_merged[, v]
df_sel = cbind(df_sel, "activity" = df_merged$"act_id", "subjectid"= df_merged$"subject_id")
#get the column lables of the selected set and assign tidy labels
selnames = names(df_sel)
selnames = gsub("-mean", "Mean", selnames)
selnames = gsub("-std", "Std", selnames)
selnames =gsub('[-()]', '', selnames)
names(df_sel) <- selnames
#################################################
# Section 3.Uses descriptive activity names #
# to name the activities in the data set #
#################################################
#read the activity label
#-------------------------
print("..assigning activity labels")
actlabel = read.table("activity_labels.txt", sep ="")
# converting the numeric activity values into a descriptive one
#---------------------------------------------------------------
df_sel$activity = factor(df_sel$activity, levels = actlabel[,1], labels = actlabel[,2])
df_sel$subjectid = as.factor(df_sel$subjectid)
#reshaping the df_sel data frame for concise summary
#----------------------------------------------------
print("reshaping data for concise presentation...")
df_melt = melt(df_sel, c("activity", "subjectid"))
df_final = dcast(df_melt, subjectid + activity ~variable, mean)
print("..writing final tidy data set...")
write.table(df_final, "tidyset.txt", row.names = FALSE, quote = FALSE)
#finally cleanup objects created during the process......
rm(df_merged, df_deature, df_sel, df_final, df_melt, sel_names )
|
7763dbe5fc69b7e39b19ba3c9486bd21f29b721d
|
2080044625e70fbe2407d923d1b3727da9ced1b2
|
/FCaldata.R
|
4000914bbb453ad8815e56871b165dbde7e65df4
|
[] |
no_license
|
kurla-sreevalli/DS_Rcode
|
01a26929ed49f239fdf1b7d7eff50633ec857758
|
7d527816715954c993112ce1dcabd1a30fc26a1e
|
refs/heads/master
| 2023-01-29T07:40:32.493397
| 2020-12-08T11:04:02
| 2020-12-08T11:04:02
| 294,662,354
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,130
|
r
|
FCaldata.R
|
library(readxl)
ALset <- read_excel(file.choose())
View(ALset)
attach(ALset)
#plot(ALset$Passengers...000.,type = "o")
# So creating 12 dummy variables
library(dummies)
X<- data.frame(outer(rep(Quarter,length = 96), Quarter,"==") + 0 )# Creating dummies for 4 quarters
View(X)
colnames(X)<-Month # Assigning month names
View(X)
ALdata<-cbind(ALset,X)
View(ALdata)
colnames(ALdata)[2]<-"PASSENGERS"
colnames(ALdata)
ALdata["t"]<- 1:96
View(ALdata)
ALdata["log_passengers"]<-log(ALdata["PASSENGERS"])
ALdata["t_square"]<-ALdata["t"]*ALdata["t"]
attach(ALdata)
View(ALdata)
train<-ALdata[1:67,]
test<-ALdata[68:96,]
########################### LINEAR MODEL #############################
linear_model<-lm(PASSENGERS~t,data=train)
summary(linear_model)
linear_pred<-data.frame(predict(linear_model,interval='predict',newdata =test))
#linear_pred in console window
View(linear_pred)
rmse_linear<-sqrt(mean((test$PASSENGERS-linear_pred$fit)^2,na.rm = T))
rmse_linear # 47.56252
######################### Exponential #################################
expo_model<-lm(log_passengers~t,data=train)
summary(expo_model)
expo_pred<-data.frame(predict(expo_model,interval='predict',newdata=test))
rmse_expo<-sqrt(mean((test$PASSENGERS-exp(expo_pred$fit))^2,na.rm = T))
rmse_expo # 42.21818
######################### Quadratic ####################################
Quad_model<-lm(PASSENGERS~t+t_square,data=train)
summary(Quad_model)
Quad_pred<-data.frame(predict(Quad_model,interval='predict',newdata=test))
rmse_Quad<-sqrt(mean((test$PASSENGERS-Quad_pred$fit)^2,na.rm=T))
rmse_Quad # 43.09008
######################### Additive Seasonality #########################
sea_add_model<-lm(PASSENGERS~ X1+X2+X3+X4+X5+X6+X7+X8+X9+X10+X11+X12,data=train)
summary(sea_add_model)
sea_add_pred<-data.frame(predict(sea_add_model,newdata=test,interval='predict'))
rmse_sea_add<-sqrt(mean((test$PASSENGERS-sea_add_pred$fit)^2,na.rm = T))
rmse_sea_add # 129.9164
######################## Additive Seasonality with Linear #################
Add_sea_Linear_model<-lm(PASSENGERS~t+X1+X2+X3+X4+X5+X6+X7+X8+X9+X10+X11+X12,data=train)
summary(Add_sea_Linear_model)
Add_sea_Linear_pred<-data.frame(predict(Add_sea_Linear_model,interval='predict',newdata=test))
rmse_Add_sea_Linear<-sqrt(mean((test$PASSENGERS-Add_sea_Linear_pred$fit)^2,na.rm=T))
rmse_Add_sea_Linear # 45.5606
######################## Additive Seasonality with Quadratic #################
Add_sea_Quad_model<-lm(PASSENGERS~t+t_square+X1+X2+X3+X4+X5+X6+X7+X8+X9+X10+X11+X12,data=train)
summary(Add_sea_Quad_model)
Add_sea_Quad_pred<-data.frame(predict(Add_sea_Quad_model,interval='predict',newdata=test))
rmse_Add_sea_Quad<-sqrt(mean((test$PASSENGERS-Add_sea_Quad_pred$fit)^2,na.rm=T))
rmse_Add_sea_Quad # 44.41453
######################## Multiplicative Seasonality #########################
multi_sea_model<-lm(log_passengers~X1+X2+X3+X4+X5+X6+X7+X8+X9+X10+X11+X12,data = train)
summary(multi_sea_model)
multi_sea_pred<-data.frame(predict(multi_sea_model,newdata=test,interval='predict'))
rmse_multi_sea<-sqrt(mean((test$PASSENGERS-exp(multi_sea_pred$fit))^2,na.rm = T))
rmse_multi_sea # 134.6355
######################## Multiplicative Seasonality Linear trend ##########################
multi_add_sea_model<-lm(log_passengers~t+X1+X2+X3+X4+X5+X6+X7+X8+X9+X10+X11+X12,data = train)
summary(multi_add_sea_model)
multi_add_sea_pred<-data.frame(predict(multi_add_sea_model,newdata=test,interval='predict'))
rmse_multi_add_sea<-sqrt(mean((test$PASSENGERS-exp(multi_add_sea_pred$fit))^2,na.rm = T))
rmse_multi_add_sea # 42.48108
# Preparing table on model and it's RMSE values
table_rmse<-data.frame(c("rmse_linear","rmse_expo","rmse_Quad","rmse_sea_add","rmse_Add_sea_Quad","rmse_multi_sea","rmse_multi_add_sea"),c(rmse_linear,rmse_expo,rmse_Quad,rmse_sea_add,rmse_Add_sea_Quad,rmse_multi_sea,rmse_multi_add_sea))
View(table_rmse)
colnames(table_rmse)<-c("model","RMSE")
View(table_rmse)
# exponential seasonality has least RMSE value
new_model <- expo_model<-lm(log_passengers~t,data=train)
resid <- residuals(new_model)
resid[1:10]
windows()
acf(resid,lag.max = 10)
# By principal of parcimony we will consider lag - 1 as we have so
# many significant lags
# Building Autoregressive model on residuals consider lag-1
k <- arima(resid, order=c(1,0,0))
str(k)
View(data.frame(res=resid,newresid=k$residuals))
windows()
acf(k$residuals,lag.max = 15)
pred_res<- predict(arima(k$residuals,order=c(1,0,0)),n.ahead = 12)
str(pred_res)
pred_res$pred
acf(k$residuals)
write.csv(ALdata,file="aldata.csv",col.names = F,row.names = F)
getwd()
####################### Predicting new data #############################
library(readxl) #ignore if have saved the trekdata in csv format.
test_data<-read.csv(file.choose())
View(test_data)
pred_new<-data.frame(predict(new_model,newdata=test_data,interval = 'predict'))
View(pred_new)
#pred_new$fit <- pred_new$fit +pred_res$pred
#View(pred_new)
#running the whole model for predicted data set
|
2799b3c9d9a220f13c7481964f55da5437b6c415
|
f03d90856f67713cf21f72b4bca85e57b8f73919
|
/plot2.R
|
5a8b1a6567a150935cc3ccf6c2857a05c7b1c278
|
[] |
no_license
|
svangaal/ExData_Plotting1
|
bb290d2b17bdad5c74a1b12c9415aec06ef7c0df
|
ed6cbbd0dffdd457c771569d125b7496a06d8bb1
|
refs/heads/master
| 2020-12-07T06:29:25.741876
| 2016-08-21T03:10:37
| 2016-08-21T03:10:37
| 66,112,846
| 0
| 0
| null | 2016-08-19T21:15:43
| 2016-08-19T21:15:43
| null |
UTF-8
|
R
| false
| false
| 293
|
r
|
plot2.R
|
png("Plot2.png",width=480,height=480)
plot(powerSubset$DateTime,
powerSubset$Global_active_power,
type = "n",
main = "",
ylab = "Global Active Power (kilowatts)",
xlab = "")
lines(powerSubset$DateTime,
powerSubset$Global_active_power,
type = "l")
dev.off()
|
6e0df5a634442d9a03c654f906ee66b2844bfd3f
|
1ebe6dc8c3124767c45536647b917f776ce3954d
|
/R/lm_diagnostics.R
|
bd14aa37a8bf9c04e61a965d10e7bb38c4d25e84
|
[] |
no_license
|
jenniferthompson/JTHelpers
|
bb25ffdd7bde3c5178ab389caaf0e2e1ed4c9a12
|
0ce6de6bca8603c4d237b668a64fa929344f3554
|
refs/heads/master
| 2021-01-13T11:50:16.819305
| 2017-02-22T21:12:04
| 2017-02-22T21:12:04
| 77,569,155
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,724
|
r
|
lm_diagnostics.R
|
#' Check assumptions of heteroscedasticity and normally distributed errors for a linear regression
#' model fit.
#'
#' @param lmObj Model fit of class `lm`.
#' @param outcomeString String to identify outcome in X axis label. Defaults to "Outcome."
#' @param titleString String to add to plot titles.
#'
#' @export
#'
#' @examples
#'
#' ## Linear regression model
#' mymod <- lm(Sepal.Width ~ Sepal.Length, data = iris)
#'
#' lm_diagnostics(mymod, outcomeString = 'Sepal.Width', titleString = 'Sepal.Width vs Sepal.Length')
#'
lm_diagnostics <- function(lmObj,
outcomeString = 'Outcome',
titleString){
if(!(inherits(lmObj, 'lm'))){
stop('lmObj must be a model fit of type lm', call. = FALSE)
}
if(missing(titleString)){
qq.title <- 'Q-Q of residuals'
rp.title <- 'RP plot'
} else{
qq.title <- paste('Q-Q of residuals,', titleString)
rp.title <- paste('RP plot,', titleString)
}
## fit.mult.impute objects work differently than non-imputed objects; use residuals and fitted
## values from model object directly rather than resid() and fitted()
if(inherits(lmObj, 'fit.mult.impute')){
plot(lmObj$residuals ~ lmObj$fitted.values,
xlab = paste('Predicted', outcomeString),
ylab = paste('Model residual'),
main = rp.title,
col = 'turquoise4')
abline(h = 0)
qqnorm(lmObj$residuals, datax = TRUE, main = qq.title)
} else{
plot(resid(lmObj) ~ fitted(lmObj),
xlab = paste('Predicted', outcomeString),
ylab = paste('Model residual'),
main = rp.title,
col = 'turquoise4')
abline(h = 0)
qqnorm(resid(lmObj), datax = TRUE, main = qq.title)
}
}
|
0b30787cff04ea9c6dd94dd65996187c958f4d3b
|
e47740fe0c4b2688f7b9e380ee9d903a68542af4
|
/man/label.endpoints.Rd
|
c7c7c64a3590ad524aea3b541fa25e0f1942322c
|
[] |
no_license
|
tdhock/directlabels
|
caa95eab62394181b0b3635bf705fa2d95cdf802
|
10e243b8671a2cff465955bd7e48f3c1d724d8a2
|
refs/heads/master
| 2022-06-29T00:56:27.733948
| 2022-06-17T05:01:20
| 2022-06-17T05:03:49
| 37,545,421
| 74
| 21
| null | 2021-07-15T04:49:53
| 2015-06-16T17:31:55
|
R
|
UTF-8
|
R
| false
| false
| 479
|
rd
|
label.endpoints.Rd
|
\name{label.endpoints}
\alias{label.endpoints}
\title{label endpoints}
\description{Make a Positioning Method that labels a certain x value.}
\usage{label.endpoints(FUN,
HJUST)}
\arguments{
\item{FUN}{FUN(d$x) should return an index of which point to label. for
example you can use which.min or which.max.}
\item{HJUST}{hjust of the labels.}
}
\value{A Positioning Method like \code{\link{first.points}} or \code{\link{last.points}}.}
\author{Toby Dylan Hocking}
|
79594722233845a564dc3616a74f430557743d8e
|
85d6645766e27f8fbc0303c714835eeae66b4fbb
|
/man/dynamicddm.Rd
|
dbeef3d3fb06d2129c8d8d3902aae8541e5dc8c3
|
[
"MIT"
] |
permissive
|
AlexanderFengler/addmtoolbox
|
f0b1fcfd03cb8bd6574c327a31342de5c30f0e89
|
e60dec1761c03600a9823158e8e4f1b608ae2ea8
|
refs/heads/master
| 2016-09-05T21:16:52.479209
| 2015-03-13T21:16:09
| 2015-03-13T21:16:09
| 30,930,838
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,219
|
rd
|
dynamicddm.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{dynamicddm}
\alias{dynamicddm}
\title{Simulate DDM process (by condition, 2 items)
\code{dynamicddm()}}
\usage{
dynamicddm(parameters, decision = 0L, valuations = 0L, nr_attributes = 0L,
fixpos = 0L, fixdur = 0L, rt = 0L, stateStep = 0)
}
\arguments{
\item{decision}{integer which gives choice in current trial (1 or 2)}
\item{valuations}{vector that stores the item valuations for the provided trial}
\item{nr_attributes}{integer variable placeholder for interface consistency / see multiattribute versions for specification}
\item{fixpos}{vector placeholder for interface consistency / see attentional versions for specification}
\item{fixdur}{vector placeholder for interface consistency / see attentional versions for specification}
\item{rt}{reaction time of provided trial}
\item{stateStep}{numeric variable between [0,1] that indicates how finegrained the vertical grid of the model space shall be computed}
}
\value{
numeric variable storing likelihood value
}
\description{
Simulate DDM process by unique trial condition (2 items)
}
\author{
Alexander Fengler, \email{alexanderfengler@gmx.de}
}
|
09898999c1cb0faf54d9cae163a4bf8f09e8b8ac
|
418a0db93fb9562801df2d74b6390997e5fbcc24
|
/man/scrape.Rd
|
45cef773558218ecabb8df71b727c4dab10ba1b9
|
[
"MIT"
] |
permissive
|
stephematician/statsnbaR
|
078f0bc028b5a9f3009fb47b21ba7a16a8900e65
|
0d92d876c0255e16500db02f77f3bb43c4b77824
|
refs/heads/master
| 2021-01-10T03:25:07.417842
| 2018-01-02T09:20:46
| 2018-01-02T09:20:46
| 52,210,889
| 8
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 202
|
rd
|
scrape.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scrape.R
\name{scrape}
\alias{scrape}
\title{Main scrape function}
\description{
Main scrape function
}
\keyword{internal}
|
497267f79f738252df2b7401f808cc4f5505de81
|
f042fbdf31a2106bfbe298b32dc0aa551bd3ae84
|
/R/simulate_spatial.R
|
0ab3097c40cfafb2ea800498fc3392c42262f8c7
|
[] |
no_license
|
danielbonhaure/weather-generator
|
c76969967c3a60500a6d90d5931a88fb44570eba
|
6a207415fb53cca531b4c6be691ff2d7d221167d
|
refs/heads/gamwgen
| 2023-01-21T17:38:46.102213
| 2020-12-04T21:59:05
| 2020-12-04T21:59:05
| 286,565,700
| 0
| 0
| null | 2020-12-01T13:19:05
| 2020-08-10T19:50:16
|
R
|
UTF-8
|
R
| false
| false
| 61,881
|
r
|
simulate_spatial.R
|
## OBS: modo de ejecución (son tres)
## 1- locations a simular coinciden con los puntos ajustados (fit$stations), no necesitan estar todos los puntos ajustados pero todos los puntos a simular debieron haberse usado en el ajuste
## 1.1- ruido Santi
## 1.2- ruido nuevo (espacialmente correlacionado)
## 2- locations a simular no coinciden pero tampoco son una grilla regular (listo)
## 3- locations a simular es una grilla regular (listo)
#' @title Simulations control configuration
#' @description Provides fine control of different parameters that will be used to create new weather series.
#' @param nsim number of response vectors to simulate. Defaults to 1.
#' @param seed an object specifying if and how the random number generator should be initialized (‘seeded’).#'
#' Either NULL or an integer that will be used in a call to set.seed before simulating the response vectors.
#' If set, the value is saved as the "seed" attribute of the returned value. The default, NULL will not change the random generator state.
#' @param avbl_cores ...
#' @export
spatial_simulation_control <- function(nsim = 1,
seed = NULL,
avbl_cores = 2,
bbox_offset = 100000,
sim_loc_as_grid = T,
use_spatially_correlated_noise = T,
use_temporary_files_to_save_ram = T,
remove_temp_files_used_to_save_ram = T,
manage_parallelization_externally = F) {
prcp_noise_generating_function = gamwgen:::random_field_noise_prcp
temperature_noise_generating_function = gamwgen:::random_field_noise_temperature
if(!use_spatially_correlated_noise) {
prcp_noise_generating_function = gamwgen:::not_spatially_correlated_random_field_noise_prcp
temperature_noise_generating_function = gamwgen:::not_spatially_correlated_random_field_noise_temperature
}
return(list(nsim = nsim, seed = seed, avbl_cores = avbl_cores,
bbox_offset = bbox_offset, sim_loc_as_grid = sim_loc_as_grid,
use_spatially_correlated_noise = use_spatially_correlated_noise,
use_temporary_files_to_save_ram = use_temporary_files_to_save_ram,
remove_temp_files_used_to_save_ram = remove_temp_files_used_to_save_ram,
manage_parallelization_externally = manage_parallelization_externally,
prcp_noise_generating_function = prcp_noise_generating_function,
temperature_noise_generating_function = temperature_noise_generating_function))
}
#' @title Simulates new weather trajectories in stations
#' @description Simulates new weather trajectories.
#' @param model A gamwgen model.
#' @param simulation_locations a sf object with the points at which weather should be simulated.
#' If not set, the locations used to fit the model will be used.
#' @param start_date a start date in text format (will be converted using as.Date) or a date object.
#' @param end_date an end date in text format (will be converted using as.Date) or a date object.
#' @param control a gamwgen simulation control list.
#' @import dplyr
#' @import foreach
#' @export
spatial_simulation <- function(model, simulation_locations, start_date, end_date,
control = gamwgen:::spatial_simulation_control(),
output_folder = getwd(), output_filename = "sim_results.csv",
seasonal_covariates = NULL, verbose = F) {
## Español: Objeto que será devuelto
## English: Object to be returned
gen_climate <- list()
###############################################################
suppressPackageStartupMessages(library("dplyr"))
suppressPackageStartupMessages(library("foreach"))
base::invisible(utils::capture.output(suppressPackageStartupMessages(library("RandomFields"))))
###############################################################
# Español: Comprobar que el objeto ajustado sea de la clase correcta
# English: Check that the fitted object is of the right class
if(class(model) != 'gamwgen')
stop(glue::glue('Received a model of class {class(model)} and a model of class "gamwgen" was expected.'))
# Español: Comprobar que las locación a simular existan
# English: Check that the locations to be simulated exists
if (is.null(simulation_locations))
stop("The parameter simulation_locations can't be null!")
# Español: Comporbar que el objeto de entrada
# English: Check that the input objects are of the right class
gamwgen:::check.simulation.input.data(simulation_locations, seasonal_covariates)
###############################################################
# Español: Comprobar que las locaciones a simular estén proyectadas en el mismo sistema de
# coordenadas que el de los datos ajustados. De otra manera, se convierte
# English: Check that the locations to be simulated are projected in the same coordinate
# system as the fitted data. Otherwise, convert it
if(sf::st_crs(simulation_locations) != sf::st_crs(model$crs_used_to_fit)) {
simulation_locations <- simulation_locations %>%
sf::st_transform(sf::st_crs(model$crs_used_to_fit))
warning('The crs used to fit and the crs of simulation_locations are not equals. ',
'Se transforma simulation_locations al crs {del ajuste}')
}
## Español: Creacion de un área de influencia que debe abarcar a las locaciones a simular
## English: Create a bounding box to check whether the simulation location are engulfed by it
sl_bbox <- sf::st_bbox(model$stations)
sl_bbox_offset <- sf::st_bbox(
obj = c(xmin = sl_bbox[['xmin']] - control$bbox_offset,
ymin = sl_bbox[['ymin']] - control$bbox_offset,
xmax = sl_bbox[['xmax']] + control$bbox_offset,
ymax = sl_bbox[['ymax']] + control$bbox_offset),
crs = sf::st_crs(model$crs_used_to_fit))
polygon_offset <- sf::st_as_sfc(sl_bbox_offset)
sf::st_crs(polygon_offset) <- sf::st_crs(model$crs_used_to_fit)
## Español: Las locaciones a simular deben estar dentro del área
## English: Simulation locations should be inside the bounding box
if(!all(sf::st_contains(polygon_offset, simulation_locations, sparse = F)))
stop('Alguno de los puntos ajustados se encuentra fuera del bounding box ',
'creado a partir de los puntos a simular y con un offset de 10 km.')
## Español: El ruido espacialmente no correlacionado sólo puede ser usado si las estacion
## meteorológicas a simular fueron incluidas en el ajuste. Los residuos de estas estaciones
## son necesarios para modelar las distribuciones multivariadas.
## Non spatially correlated noise can only be used if the weather stations
## to be simulated where used in the fitting process. The residuals of those stations are
## needed to model the multivariate distributions.
if(!control$use_spatially_correlated_noise)
if(any(lapply(sf::st_equals(simulation_locations, model$stations), length) != 1))
stop('Los ruidos NO correlacionados espacialmente solo pueden ser usados cuando ',
'los puntos a ser simulados son los mismos que fueron utilizados en el ajuste!')
###############################################################
# Español: Comprobar que las locaciones a simular sean un objeto sf válido
# English: Check that the input location to be simulated is a valid sf object
if(!all(sf::st_is_valid(simulation_locations)))
stop('simulation_locations is not a valid sf object.')
###############################################################
# Español: Comporbar la consistencia entre las fechas de comienzo y fin del período de simulación
# English: Check consistency between start and end date of the simulation period
if(end_date <= start_date)
stop('End date should be greater than start date')
###############################################################
# Español: Comprobar que el numero de realización sea mayor o igual a uno
# English: Check that the number of realization is equal to or larger than one
if(control$nsim < 1)
stop('Number of simulations should be one or greater than one')
###############################################################
# Español: Comprobar que el número de núcleos usado sea válido
# English: Check that the number of cores to be used is valid
if(is.na(control$avbl_cores) || is.null(control$avbl_cores))
stop('The control parameter avbl_cores must be at least 1.')
###############################################################
if(!foreach::getDoParRegistered() && control$manage_parallelization_externally)
stop('The control parameter manage_parallelization_externally was set as True, but',
'neither sequential nor parallel backend was registered for the foreach package.')
###############################################################
## Control de uso correcto de covariables
# Esquema de uso de covariables!!
# cov ajuste | cov simulación
# interna | interna
# interna | externo
# externa | externa -> en ambos casos seasonal_covariates es el mismo tibble
# externa | externa -> diferentes (considerar siempre diferentes y listo)
# externa | interna (no corresponde)
# generalmente el ajuste es con covariables internas
## Español: Si seasonal_climate no es NULL, el ajuste del modelo debió haber sido hecho usando covariables
## English: If seasonal_climate is not NULL, model fit should have been done using covariates.
if(!is.null(seasonal_covariates) & is.null(model$seasonal_covariates))
stop('El ajuste fue hecho sin covariables, por lo tanto, la simulación ',
'también debe hacerse sin covariables y no es valido utilizar el ',
'parámetro seasonal_covariates!!')
## If model fit was done using an external set of covariates, the simulation
## should be done with an external set of variates as well.
if(is.null(seasonal_covariates) & !is.null(model$seasonal_covariates))
stop('El ajuste se hizo utilizando un archivo de covariables (parametro ',
'seasonal_covariates), por lo tanto, la simulación también debe hacerse ',
'con un archivo de covariables (parametro seasonal_covariates).')
## OBS:
# Ocurre lo siguiente: si ajuste se hace sin covariables, la simulación también debe
# hacerse sin covariables, y si el ajuste se hace con covariables, la simulación
# también debe hacerse con convariables!!
###############################################################
## Español: Se comprueba la presencia de series de covariables estacionales tan largas como el período de simulación
## Este control solo se realizará si llevará a cabo si la simulación usará covariables, de otra manera será salteado
## English: Check the presence of seasonal covariables time series as long as the simulation period.
## This control is only performed if the simulation will use covariables otherwise, it will be skipped.
if(!is.null(seasonal_covariates)) {
if(gamwgen:::repeat_seasonal_covariates(seasonal_covariates)) {
sim_dates_control <- tidyr::crossing(year = base::seq.int(lubridate::year(start_date), lubridate::year(end_date)),
season = as.integer(c(1, 2, 3, 4)))
seasonal_cov_ctrl <- seasonal_covariates %>% dplyr::select(year, season) %>% dplyr::distinct()
} else {
sim_dates_control <- tidyr::crossing(seasonal_covariates %>% dplyr::distinct(station_id),
year = base::seq.int(lubridate::year(start_date), lubridate::year(end_date)),
season = as.integer(c(1, 2, 3, 4)))
seasonal_cov_ctrl <- seasonal_covariates %>% dplyr::select(station_id, year, season) %>% dplyr::distinct()
}
if (!all(do.call(paste0, sim_dates_control) %in% do.call(paste0, seasonal_cov_ctrl)))
stop("Simulation years aren't in seasonal_covariates!")
}
###############################################################
############################
## INITIAL CONFIGURATIONS ##
############################
# Español: Configuración del paquete RandomFields para producir los resultados esperados
# English: Configuration of the RandomFields package in order to produce the expected results
RandomFields::RFoptions(printlevel = 0, spConform = FALSE)
# Para ...
if(!is.null(control$seed))
set.seed(control$seed)
# Español: Creaión de semillas para la simulción para así poder replicar los resultados
# English: Create simulation seeds to replicate results in different simulations
realizations_seeds <- NULL
if(!is.null(control$seed)) {
realizations_seeds <- list()
cant_dias_sim <- as.numeric(end_date - start_date) + 1
for (r in seq(1, control$nsim, 1)) {
realizations_seeds[[r]] <- list(general = ceiling(runif(min = 1, max = 10000000, n = control$nsim)), # uno por realizacion
prcp_occ = ceiling(runif(min = 1, max = 10000000, n = cant_dias_sim)), # uno por día por realizacion
prcp_amt = ceiling(runif(min = 1, max = 10000000, n = cant_dias_sim)), # uno por día por realizacion
temp_dry = ceiling(runif(min = 1, max = 10000000, n = cant_dias_sim)), # uno por día por realizacion
temp_wet = ceiling(runif(min = 1, max = 10000000, n = cant_dias_sim)), # uno por día por realizacion
retries = ceiling(runif(min = 1, max = 10000000, n = cant_dias_sim))) # uno por día por realizacion
}
}
####################################
## Parallelization initialization ##
####################################
## Español: Vairbale que indica si es necesario remover
## la confguración de la paralelización
## Variable that indicate if it's necessary to remove
## the parallelization configuration
remove_parallelization_conf <- F
## Register a sequential or a parallel backend for the foreach package
if(!control$manage_parallelization_externally) {
if (control$avbl_cores == 1) {
foreach::registerDoSEQ()
} else {
remove_parallelization_conf <- T
# Create cluster
cluster <- snow::makeCluster(type = "SOCK",
spec = rep('localhost', length.out = control$avbl_cores),
outfile = ifelse(verbose, "", snow::defaultClusterOptions$outfile))
# Register cluster as backend for the %dopar% function
doSNOW::registerDoSNOW(cluster)
}
}
## Register the number of workers to decide
## how manage progress bar in child processes
nworkers <- foreach::getDoParWorkers()
##################################
## PREPARACIÓN DE LA SIMULACIÓN ##
##################################
############################################
## Español: Carpeta de destino y nombre del archivo
## English: Process output_folder and output_filename
output_folder <- sub('/$', '', output_folder)
if (!fs::dir_exists(output_folder))
fs::dir_create(output_folder)
output_filename <- sub('\\.([^.]*)$', '', output_filename)
############################################
## Español: Borrar archivos temporales de corridas previas
## English: Delete temporary files of previous runs
files_pattern <- glue::glue("{output_filename}_realization_[0-9]+\\.rds")
file.remove(list.files(output_folder, pattern = files_pattern, full.names = T))
####################################
## Español: Generar fechas de simulación
## English: Generate simulation dates
simulation_dates <-
tibble::tibble(date = seq.Date(from = as.Date(start_date),
to = as.Date(end_date),
by = "days")) %>%
dplyr::mutate(year = lubridate::year(date),
month = lubridate::month(date),
season = lubridate::quarter(date, fiscal_start = 12))
## Español: Número de días a simular
## English: Numbers of days to be simulated
ndates <- nrow(simulation_dates)
##################################
## Español: Matriz de locaciones a ser simulados
## English: Matrix with the locations to be simulated
simulation_points <- simulation_locations %>%
sf::st_transform(sf::st_crs(model$crs_used_to_fit)) %>%
dplyr::mutate(point_id = dplyr::row_number(),
longitude = sf::st_coordinates(geometry)[,'X'],
latitude = sf::st_coordinates(geometry)[,'Y'])
##################################
## Español: Raster con las locaciones a simular
## English: Raster with the locations to be simulated.
if (control$sim_loc_as_grid) {
pnts_dist_matrix <- gamwgen:::make_distance_matrix(simulation_locations)
min_dist_btw_pnts <- floor(min(pnts_dist_matrix[upper.tri(pnts_dist_matrix)]))
raster_resolution <- c(min_dist_btw_pnts, min_dist_btw_pnts)
simulation_raster <- raster::rasterFromXYZ(
xyz = sf::st_coordinates(simulation_points),
res = raster_resolution,
crs = sf::st_crs(simulation_points))
}
if(!control$sim_loc_as_grid) {
stns_dist_matrix <- gamwgen:::make_distance_matrix(model$stations)
min_dist_btw_stns <- floor(min(stns_dist_matrix[upper.tri(stns_dist_matrix)]))
raster_resolution <- c(min_dist_btw_stns, min_dist_btw_stns)
simulation_raster <- raster::raster(
xmn = sl_bbox_offset[['xmin']],
xmx = sl_bbox_offset[['xmax']],
ymn = sl_bbox_offset[['ymin']],
ymx = sl_bbox_offset[['ymax']],
resolution = raster_resolution,
crs = sf::st_crs(simulation_points))
}
################################################################
## Español: Se obtiene los valores al día previo del comienzo de la simulación
## English: Obtaining values for the day before the simulation start day
start_date_prev_day_climatology <-
gamwgen:::get_start_climatology(model, simulation_points, start_date, control)
######################################################################################
# Verifiar si esto es necesario
## Si no se recibe un seasonal_covariates externo, se utiliza el generado en el ajuste
# Si por ahí fallan los controles, con esto se garantiza que: si se usaron covariables
# al momento de realizar el ajuste, también se utilize covariables en la simulación, y
# lo mismo para el caso inverso, es decir, cuando no se usó covariables ene l ajuste!!
# if(is.null(seasonal_covariates))
# seasonal_covariates <- model$seasonal_covariates
#################################################################
## Se pueden excluir los registros de años que no serán simulados
if (!is.null(seasonal_covariates))
seasonal_covariates <- seasonal_covariates %>%
dplyr::filter(year %in% unique(simulation_dates$year))
#############################################################
## Obtención de covariables, si van a ser utilizadas, sino no
if (!is.null(seasonal_covariates))
seasonal_covariates <-
gamwgen:::get_covariates(model, simulation_points, seasonal_covariates, simulation_dates, control)
#########################################
## Español: Parámetros globales para la generación de ruido
## English: Global paramteres for noise generators
if(control$use_spatially_correlated_noise)
gen_noise_params <- gamwgen:::generate_month_params(
residuals = model$models_residuals,
observed_climate = model$models_data,
stations = model$stations)
if(!control$use_spatially_correlated_noise)
gen_noise_params <- gamwgen:::generate_residuals_statistics(
models_residuals = model$models_residuals)
############################################
## Español: Matriz de clasificacion de días lluviosos
## English: Clasification matrix for wet days
clasification_matrix <- matrix(c(-Inf, 0, 0, 0, Inf, 1),
ncol = 3,
byrow = TRUE)
#######################################################################################
## Español: Se crea una matriz de simulación, esta va a contener todos los datos necesarios para
## la simulación de cada día a simular
## English: A simulation matrix is created, it will have all the necessary data for the
## simulation of each day to simulate
simulation_matrix <- simulation_points %>%
dplyr::select(tidyselect::any_of(c("station_id", "point_id")), longitude, latitude)
################################################################################################
## Español: Se agregan covariables estacionales si fueron usadas al ajustar el modelo (Sólo los años en simulation_dates)
## English: Add seasonal covariates if they were used when model fitting (only years in simulation_dates)
if (!is.null(seasonal_covariates))
simulation_matrix <- simulation_matrix %>% sf::st_join(seasonal_covariates) %>%
dplyr::mutate(ST1 = dplyr::if_else(season == 1, seasonal_prcp, 0),
ST2 = dplyr::if_else(season == 2, seasonal_prcp, 0),
ST3 = dplyr::if_else(season == 3, seasonal_prcp, 0),
ST4 = dplyr::if_else(season == 4, seasonal_prcp, 0),
SN1 = dplyr::if_else(season == 1, seasonal_tmin, 0),
SN2 = dplyr::if_else(season == 2, seasonal_tmin, 0),
SN3 = dplyr::if_else(season == 3, seasonal_tmin, 0),
SN4 = dplyr::if_else(season == 4, seasonal_tmin, 0),
SX1 = dplyr::if_else(season == 1, seasonal_tmax, 0),
SX2 = dplyr::if_else(season == 2, seasonal_tmax, 0),
SX3 = dplyr::if_else(season == 3, seasonal_tmax, 0),
SX4 = dplyr::if_else(season == 4, seasonal_tmax, 0))
#########################################
## Español: Umbrales para reintentar la simulación. Es decir, si los valores simulados están por encima/debajo del
## rango mínimo/máximo, la simulación para ese día se repetirá
## English: Thresholds for performing retries. i.e.: if simulated values are above/below the
## max/min range, the simulation for that day will be repeated.
temperature_range_thresholds <-
gamwgen:::get_temperature_thresholds(model$stations, simulation_points,
model$statistics_threshold, control)
##########################################
## AQUI EMPIEZA REALMENTE LA SIMULACIÓN ##
##########################################
#################################
## Español: Control de tiempo de ejecución
## English: Check the execution time
t.sim <- proc.time()
#######################################################
## Set the progress bar to know how long this will take
pb <- progress::progress_bar$new(
format = paste0(ifelse(nworkers == 1, " realization:", " finished realizations:"),
" :r / ", control$nsim, ifelse(nworkers == 1, paste0(" | day: :d / ", ndates), ""),
" | progress: :bar :percent (in :elapsed) | eta: :eta"),
total = ifelse(nworkers == 1, control$nsim*ndates, control$nsim),
clear = FALSE, width= 90, show_after = 0)
## For print something until first realization finish
if(nworkers > 1 && !verbose)
pb$tick(0, tokens = list(r = 0))
## For manage the progress bar in parallel executions
progress_pb <- function(r) {
pb$tick(1, tokens = list(r = r))
}
######
## Español: Comienzo de la simulación
## English: Simulation start
nsim_gen_clim <- foreach::foreach(r = 1:control$nsim,
.combine = dplyr::bind_rows,
.export = c('output_folder', 'output_filename'),
.packages = c('dplyr'),
.options.snow = list(progress = progress_pb),
.verbose=verbose) %dopar% {
######################################################################
# Español: Para que las funciones de RandomFields devuelvan lo esperado!! ----
# English: Configuration of the RandomFields package in order to produce the expected results
RandomFields::RFoptions(printlevel = 0, spConform = FALSE)
##################################################
## Español: Para cuando necesitamos repetir resultados
## English: In case results need to be repeated
set.seed(realizations_seeds[[r]]$general[[r]])
################################################################################
## Cuando se ejecuta el código en paralelo, simulation_matrix no es un sf válido
if(nworkers > 1)
simulation_matrix <- simulation_matrix %>%
sf::st_as_sf(coords = c('longitude', 'latitude'), crs = sf::st_crs(simulation_points))
#################################################################################
## Español: Creacion de los puntos de simulacion para el dia i (eq. daily covariates) ----
## English: Creation of simulation points por the i-th day
#################################################################################
simulation_matrix.d <- simulation_matrix %>%
# Español: Si se usan covariables, simulation_matrix tiene las covariables
# para cada season de cada year en simulation_dates! Por lo tanto,
# se debe filtrar por season y year para acelerar el sf::st_join!
# English: If covariates are used, simulation_matrix should have the covariates
# for each season of every year in simuladyion_dates. So,
# it should be filtered by season and year to speed up the process
{if (is.null(seasonal_covariates)) dplyr::filter(.)
else dplyr::filter(., year == simulation_dates$year[1], season == simulation_dates$season[1])} %>%
# Español: Luego se agrega la climatología inicial, es decir, la del día previo al primer día
# a simular. Las columnas incorporadas por esta acción son: prcp_occ, tmax y tmin,
# por lo tanto, deben ser renombradas a: prcp_occ_prev, tmax_prev y tmin_prev
# English: Daily climatology is added, i.e.: the climatology of the previous day of the first
# day of the simulation period. The variables added are: prcp_occ, tmax and tmin,
# therefore, they shoulb be renamed: prcp_occ_prev, tmax_prev and tmin_prev
sf::st_join(start_date_prev_day_climatology) %>%
dplyr::rename(prcp_occ_prev = prcp_occ, tmax_prev = tmax, tmin_prev = tmin) %>%
# Español: Se debe agregar variables complementarias: type_day_prev y prcp_amt_prev
# English: Complementary variables are added: type_day_prev and prcp_amt_prev
dplyr::mutate(type_day_prev = factor(prcp_occ_prev, levels = c(1, 0), labels = c('Wet', 'Dry')),
prcp_amt_prev = NA_real_) %>% # no se tiene la amplitud de prcp para el día previo al 1er día a simular!
# Español: Se agregan date, time y doy del primer día a simular
# English: More variables are added: date, time and doy of the first day
dplyr::mutate(date = simulation_dates$date[1],
time = as.numeric(date)/1000,
doy = lubridate::yday(date),
month = lubridate::month(date)) %>%
# Español: Se crean columnas para almacenar los resultados de la simulación
# English: Empty columns are created to store the results
dplyr::mutate(prcp_occ = NA_integer_,
tmax = NA_real_,
tmin = NA_real_,
type_day = NA_character_,
prcp_amt = NA_real_) %>%
# Español: para control de paralelización
# English: To manage paralelization
dplyr::mutate(nsim = r)
#######################################
## Español: Tiempos a tomar por cada realización
## English: Time for each realization
tiempos <- tibble::tibble(tiempo.gen_clim = list(),
tiempo.save_rds = list())
tiempos <- tiempos %>% dplyr::add_row()
#################################
## Español: Control de tiempo de ejecución
## English: Control time of the execution
t.daily_gen_clim <- proc.time()
#####################################################################################
## Antes se usaba un foreach para paralelizar esto, pero no se puede ser paralelizado
## porque simulation_matrix.d no toma los valores correctos al paralelizar!!
## Ver version anterior para más detalles (commit: 1898e5a)
daily_gen_clim <- purrr::map_dfr(1:ndates, function(d) {
##############################################################
## Español: Índice temporal para cada mes de la simulación/realización
## English: Temporal index for each month of the simulation/realization
current_month <- simulation_dates$month[d]
###########################################
## Precipitation occurrence (prcp_occ) ----
###########################################
# Español: Simulación de la ocurrencia de lluvia
# English: Simulation of precipitation occurrence
SIMocc <- mgcv::predict.bam(model$fitted_models$prcp_occ_fit,
newdata = simulation_matrix.d,
#cluster = cluster, # empeora el tiempo para grillas grandes
newdata.guaranteed = TRUE) # una optimizacion
# Español: Raster con el componente "climático"
# English: Raster with the climatic component
SIMocc_points_climate.d <- simulation_points %>%
dplyr::mutate(SIMocc = !!SIMocc) %>%
gamwgen:::sf2raster('SIMocc', simulation_raster)
# Español: Raster con el componente "meteorológico"
# English: raster with the meteorological component
SIMocc_points_noise.d <- control$prcp_noise_generating_function(
simulation_points = simulation_points, # Location
gen_noise_params = gen_noise_params, # Monthly parameters
month_number = current_month, # Month
selector = 'prcp', # Variables
seed = realizations_seeds[[r]]$prcp_occ[[d]]) %>% # Seed
gamwgen:::sf2raster('prcp_residuals', simulation_raster) # Conversion to raster
# Español: Raster con los valores simulados
# English: Raster with the simulated values
SIMocc_points.d <- SIMocc_points_climate.d + SIMocc_points_noise.d
# Español: Raster con los valores simulados reclasificados a 0 y/o 1
# English: Raster with the reclasified simulated values to 0 and/or 1
SIMocc_points.d <- raster::reclassify(SIMocc_points.d, clasification_matrix)
# Español: Agregar valores de ocurrencia a la grilla de simulacion
# English: Add occurrence values to the simulation grid
simulation_matrix.d <- simulation_matrix.d %>%
dplyr::mutate(prcp_occ = raster::extract(SIMocc_points.d, simulation_points),
type_day = factor(prcp_occ, levels = c(1, 0),
labels = c('Wet', 'Dry')))
#########################################
## Temperature (both, tmax and tmin) ----
#########################################
# Español: Raster con el componente meterológico para días secos
# English: Raster with the meterological component for dry days
temperature_random_fields_dry <-
control$temperature_noise_generating_function(
simulation_points = simulation_points,
gen_noise_params = gen_noise_params,
month_number = current_month,
selector = c('tmax_dry', 'tmin_dry'),
seed = realizations_seeds[[r]]$temp_dry[[d]])
# Español: Procesamiento de residuos para dias secos
# English: Dry days residues processing
rasters_secos.d <- purrr::map(
.x = c("tmin", "tmax"),
.f = function(variable, objeto_sf) {
return (gamwgen:::sf2raster(objeto_sf, paste0(variable, '_residuals'), simulation_raster))
}, objeto_sf = temperature_random_fields_dry
)
names(rasters_secos.d) <- c("tmin", "tmax")
# Español: Raster con el componente meterológico para días lluviosos
# English: Raster with the meterological component for wet days
temperature_random_fields_wet <-
control$temperature_noise_generating_function(
simulation_points = simulation_points,
gen_noise_params = gen_noise_params,
month_number = current_month,
selector = c('tmax_wet', 'tmin_wet'),
seed = realizations_seeds[[r]]$temp_wet[[d]])
# Español: Procesamiento de residuos para dias humedos
# English: Wet days residues processing
rasters_humedos.d <- purrr::map(
.x = c("tmin", "tmax"),
.f = function(variable, objeto_sf) {
return (gamwgen:::sf2raster(objeto_sf, paste0(variable, '_residuals'), simulation_raster))
}, objeto_sf = temperature_random_fields_wet
)
names(rasters_humedos.d) <- c("tmin", "tmax")
# Español: Se generan dos rasters, uno para tmax y otro para tmin
# Cada raster tiene los residuos de las variables correspondientes
# considerando la ocurrencia de dia seco o humedo
# English: Two rasters are generated, uno for tmax and another one for tmin
# Each raster has the residues of the corresponding variables
# considering the occurrence of precipitation
SIMmax_points_noise.d <-
gamwgen:::ensamblar_raster_residuos(rasters_humedos.d$tmax, rasters_secos.d$tmax, SIMocc_points.d)
SIMmin_points_noise.d <-
gamwgen:::ensamblar_raster_residuos(rasters_humedos.d$tmin, rasters_secos.d$tmin, SIMocc_points.d)
##################################
## Maximum temperature (tmax) ----
##################################
# Español: Simulación del componente meteorológico
# de la temperatura máxima (SIMmax)
# English: Simulation of the climatic component of
# maximum temperature (SIMmax)
SIMmax <- mgcv::predict.bam(model$fitted_models$tmax_fit,
newdata = simulation_matrix.d,
#cluster = cluster, # no mejora mucho el tiempo
newdata.guaranteed = TRUE) # una optimizacion
# Español: Raster con el componente climático de tmax
# English: Raster with the climatic component of tmax
SIMmax_points_climate.d <- simulation_points %>%
dplyr::mutate(SIMmax = !!SIMmax) %>%
gamwgen:::sf2raster('SIMmax', simulation_raster)
# Español: Raster con los valores simulados
# English: Raster with the simulated values
SIMmax_points.d <- SIMmax_points_climate.d + SIMmax_points_noise.d
# Español: Agregar valores de temperatura mínima a los puntos de simulación
# English: Add simulated tmax values to the simulation points
simulation_matrix.d <- simulation_matrix.d %>%
dplyr::mutate(tmax = raster::extract(SIMmax_points.d, simulation_points))
##################################
## Minimum temperature (tmin) ----
##################################
# Español: Simulación del componente meteorológico
# de la temperatura mínima (SIMmin)
# English: Simulation of the climatic component of
# minimum temperature (SIMmin)
SIMmin <- mgcv::predict.bam(model$fitted_models$tmin_fit,
newdata = simulation_matrix.d,
#cluster = cluster, # no mejora mucho el tiempo
newdata.guaranteed = TRUE) # una optimizacion
# Español: Raster con el componente climático de tmin
# English: Raster with the climatic component of tmin
SIMmin_points_climate.d <- simulation_points %>%
dplyr::mutate(SIMmin = !!SIMmin) %>%
gamwgen:::sf2raster('SIMmin', simulation_raster)
# Español: Raster con los valores simulados
# English: Raster with the simulated values
SIMmin_points.d <- SIMmin_points_climate.d + SIMmin_points_noise.d
# Español: Agregar valores de temperatura mínima a los puntos de simulación
# English: Add simulated tmax values to the simulation points
simulation_matrix.d <- simulation_matrix.d %>%
dplyr::mutate(tmin = raster::extract(SIMmin_points.d, simulation_points))
#################################################
## Check Temperatures (both, tmax and tmin) ----
#################################################
# Español: Los valores de temperaturas máximas y mínimas simulados serán válidos si el
# rango (tmax- tmin) diario cae dentro de los umbrales estimados a partir
# de los datos originales. Si lo valores de temperatura simulados está fuera
# de los umbrales, i.e.: por encima del rango máximo o por debajo del mínimo,
# ese valor diario será vuelto a simular hasta que se satisfaga la condición.
# English:Simulated maximum and minium temperature values will be valid if the daily
# temperature range (tmax - tmin) falls between the thresholds estimated
# from the original data. If the simulated daily temperature is outside the
# thresholds, i.e.: above the maximum range or beneath the minimum range,
# the daily value will be resimulated until the condition is satisfied.
#################
## Español: Creación de rasters con umbrales para días secos
## English: Creation of raster with thresholds for dry days
# Maximum range
dry_max_range <- temperature_range_thresholds %>%
dplyr::filter(month == current_month & prcp_occ == 0) %>%
sf::st_as_sf(., coords = c('longitude', 'latitude')) %>%
gamwgen:::sf2raster('max.range', simulation_raster)
# Minimum range
dry_min_range <- temperature_range_thresholds %>%
dplyr::filter(month == current_month & prcp_occ == 0) %>%
sf::st_as_sf(., coords = c('longitude', 'latitude')) %>%
gamwgen:::sf2raster('min.range', simulation_raster)
################
## Español: Creación de rasters con umbrales para días lluviosos
## English: Creation of raster with thresholds for wet days
# Maximum range
wet_max_range <- temperature_range_thresholds %>%
dplyr::filter(month == current_month & prcp_occ == 1) %>%
sf::st_as_sf(., coords = c('longitude', 'latitude')) %>%
gamwgen:::sf2raster('max.range', simulation_raster)
# Minimum range
wet_min_range <- temperature_range_thresholds %>%
dplyr::filter(month == current_month & prcp_occ == 0) %>%
sf::st_as_sf(., coords = c('longitude', 'latitude')) %>%
gamwgen:::sf2raster('min.range', simulation_raster)
################
## Español: Se combinan ambos rasters considerando el tipo de día
## English: Both rasters are combined considering type of day
# Maximum range
maximum_daily_range_raster <-
gamwgen:::ensamblar_raster_residuos(dry_max_range, wet_max_range, SIMocc_points.d)
# Minimum range
minimum_daily_range_raster <-
gamwgen:::ensamblar_raster_residuos(dry_min_range, wet_min_range, SIMocc_points.d)
# Español: Calculo del rango diario simulado (tmax- tmin)
# English: Calculation of the daily simulated range (tmax - tmin)
daily_range_points.d <- base::abs(SIMmax_points.d - SIMmin_points.d)
# Español: Se realiza la comprobación. Si la temperatura simulada está por encima del rango mínimo observado y
# por debajo del rango máximo observado, las simulaciones son válidas. De otra manera, se repite la simulación
# English: Perform the test. If the simulate temperature range is above the minimum observed range and
# below the maximum observed range, the simulations are valid. Otherwise, re-simulate
daily_retries <- 0
while ( daily_retries < 100 && (any(raster::getValues(SIMmax_points.d < SIMmin_points.d), na.rm = T) ||
any(raster::getValues(daily_range_points.d > maximum_daily_range_raster), na.rm = T) ||
any(raster::getValues(daily_range_points.d < minimum_daily_range_raster), na.rm = T)) ) {
daily_retries <- daily_retries + 1
# Español: Raster con el componente meterológico para días secos
# English: Raster with the meterological component for dry days
temperature_random_fields_dry <-
control$temperature_noise_generating_function(
simulation_points = simulation_points,
gen_noise_params = gen_noise_params,
month_number = current_month,
selector = c('tmax_dry', 'tmin_dry'),
seed = if (is.null(control$seed)) NULL else realizations_seeds[[r]]$retries[[d]] + daily_retries)
#}, times = 10)
# Español: Procesamiento de residuos para dias secos
# English: Dry days residues processing
rasters_secos.d <- purrr::map(
.x = c("tmin", "tmax"),
.f = function(variable, objeto_sf) {
return (gamwgen:::sf2raster(objeto_sf, paste0(variable, '_residuals'), simulation_raster))
}, objeto_sf = temperature_random_fields_dry
)
names(rasters_secos.d) <- c("tmin", "tmax")
# Español: Raster con el componente meterológico para días lluviosos
# English: Raster with the meterological component for wet days
temperature_random_fields_wet <-
control$temperature_noise_generating_function(
simulation_points = simulation_points,
gen_noise_params = gen_noise_params,
month_number = current_month,
selector = c('tmax_wet', 'tmin_wet'),
seed = if (is.null(control$seed)) NULL else realizations_seeds[[r]]$retries[[d]] + daily_retries)
# Español: Procesamiento de residuos para dias humedos
# English: Wet days residues processing
rasters_humedos.d <- purrr::map(
.x = c("tmin", "tmax"),
.f = function(variable, objeto_sf) {
return (gamwgen:::sf2raster(objeto_sf, paste0(variable, '_residuals'), simulation_raster))
}, objeto_sf = temperature_random_fields_wet
)
names(rasters_humedos.d) <- c("tmin", "tmax")
# Español: Raster con los valores simulados
# English: Raster with the simulated values
SIMmax_points_noise.d <-
gamwgen:::ensamblar_raster_residuos(rasters_humedos.d$tmax, rasters_secos.d$tmax, SIMocc_points.d)
SIMmin_points_noise.d <-
gamwgen:::ensamblar_raster_residuos(rasters_humedos.d$tmin, rasters_secos.d$tmin, SIMocc_points.d)
# Español: Se genera un raster con nuevas temperaturas
# # English: A raster with the new temperatures is generated
# Maximum temperature
new_tmax <- SIMmax_points_climate.d + SIMmax_points_noise.d
# Minimum temperature
new_tmin <- SIMmin_points_climate.d + SIMmin_points_noise.d
# Español: Actualizar temperaturas simuladas
# English: Update simulated temperatures
SIMmax_points.d <- new_tmax
SIMmin_points.d <- new_tmin
# Español: Nuevo rango diario
# English: New daily range
daily_range_points.d <- SIMmax_points.d - SIMmin_points.d
#################################################
## Progress Bar (for non parallel execution) ----
if(nworkers == 1) # for report retries!!
pb$tick(0, tokens = list(r = r, d = d, t = daily_retries))
}
##########################
## Report retries problems
if(daily_retries >= 100)
warning("Failed to simulate random noise that doesn't violate the constraint of max. temp. > min. temp.")
###############################################################
## Español: Actualización de simulation_matrix.d con los nuevos valores de tmax y tmin
## English: Update simulation_matrix.d with new values for tmax and tmin
if (daily_retries > 0) {
# Population of the simulation matrix with the simulated maximum temperature data
simulation_matrix.d <- simulation_matrix.d %>%
dplyr::mutate(tmax = raster::extract(SIMmax_points.d, simulation_points))
# Population of the simulation matrix with the simulated minimum temperature data
simulation_matrix.d <- simulation_matrix.d %>%
dplyr::mutate(tmin = raster::extract(SIMmin_points.d, simulation_points))
}
########################################
## Precipitation amounts (prcp_amt) ----
########################################
# Español: Filtrar el modelo a usar por el mes en curso
# English: Filter the model to use by current month
prcp_amt_fit <- model$fitted_models$prcp_amt_fit[[current_month]]
# Español: Estimación del parametro de forma
# English: Estimation of the shape parameter
alphaamt <- MASS::gamma.shape(prcp_amt_fit)$alpha
# Español: Estimación de los parametros de escala
# English: Estimation of the scale parameter
betaamt <- base::exp(mgcv::predict.bam(prcp_amt_fit,
newdata = simulation_matrix.d,
#cluster = cluster, # no mejora mucho el tiempo
newdata.guaranteed = TRUE))/alphaamt
# Español: Raster con los valores de "ruido"
# English: raster with the noise values
SIMamt_points_noise.d <- control$prcp_noise_generating_function(
simulation_points = simulation_points,
gen_noise_params = gen_noise_params,
month_number = current_month,
selector = 'prcp',
seed = realizations_seeds[[r]]$prcp_amt[[d]]) %>%
gamwgen:::sf2raster('prcp_residuals', simulation_raster)
# Español: Simulacion de montos
# English: Amounts simulation
SIMamt <- stats::qgamma(stats::pnorm(raster::extract(SIMamt_points_noise.d, simulation_points)),
shape = rep(alphaamt, length(betaamt)), scale = betaamt)
# Español: Raster con el componente meterológico
# English: Raster with the meteorological component
SIMamt_points_climate.d <- simulation_points %>%
dplyr::mutate(SIMamt = !!SIMamt) %>%
gamwgen:::sf2raster('SIMamt', simulation_raster)
# Español: Enmascarar pixeles sin ocurrencia de lluvia
# English: Mask pixels without precipitation occurrence
SIMamt_points.d <- SIMamt_points_climate.d * SIMocc_points.d
# Español: Agregar valores de los montos de prcp a los puntos de simulación
# English: Add the amounts of precipitation to simulation points
simulation_matrix.d <- simulation_matrix.d %>%
dplyr::mutate(prcp_amt = raster::extract(SIMamt_points.d, simulation_points))
#########################################################################
## Preparar simulation_matrix.d para la simulación del siguiente día ----
#########################################################################
# Español: Se prepara la matriz de datos para la simulación del día i + 1
# English: Prepartion of the data matriz for the simulation of the i + 1 day
current_sim_matrix <- simulation_matrix.d %>%
sf::st_drop_geometry() %>% tibble::as_tibble()
current_sim_results <- current_sim_matrix %>%
dplyr::select(tidyselect::any_of(c("station_id", "point_id")), prcp_occ, tmax, tmin, prcp_amt, type_day)
# OJO: se usa el operador <<- para utilizar los resultados el siguiente día
simulation_matrix.d <<- simulation_matrix %>%
# Si se usan covariables, simulation_matrix tiene las covariables
# para cada season de cada year en simulation_dates! Por lo tanto,
# se debe filtrar por season y year para acelerar el sf::st_join!
{if (is.null(seasonal_covariates)) dplyr::filter(.)
else dplyr::filter(., year == simulation_dates$year[d+1], season == simulation_dates$season[d+1])} %>%
# Español: Ya no se agrega la climatología inicial (la del día previo al primer día a simular),
# sino que, como climatología previa se usan los resultados del día en curso
# English: Start climatology is no longer added bu replaced by the simulated values of the previous day
dplyr::inner_join(current_sim_results, by =
if (all(c("station_id", "point_id") %in% colnames(current_sim_results))) {
c("station_id", "point_id")
} else if("station_id" %in% colnames(current_sim_results)) {
c("station_id")
} else if("point_id" %in% colnames(current_sim_results)) {
c("point_id")
} ) %>%
# Español: Se hacen las actualizaciones necesarias para que simulation_matrix.d
# pueda ser utilizada en la siguiente iteración, es decir para el siguiente día a simular
# English: Necessary updates are performed to simulation_matriz.d so it can be used
# in the next iteration, i.e.: for the next day
dplyr::mutate(prcp_occ_prev = prcp_occ,
tmax_prev = tmax,
tmin_prev = tmin,
type_day_prev = type_day,
prcp_amt_prev = prcp_amt,
date = simulation_dates$date[d+1],
time = as.numeric(date)/1000,
doy = lubridate::yday(date),
month = lubridate::month(date),
prcp_occ = NA_integer_,
tmax = NA_real_,
tmin = NA_real_,
type_day = NA_character_,
prcp_amt = NA_real_,
nsim = r)
#################################################
## Progress Bar (for non parallel execution) ----
if(nworkers == 1)
pb$tick(1, tokens = list(r = r, d = d))
###########################
## Español: Devolver resultados ----
## English: Return results
return (current_sim_matrix %>% dplyr::mutate(retries = daily_retries) %>%
dplyr::select(nsim, tidyselect::any_of(c("station_id", "point_id")), longitude, latitude,
date, prcp_occ, prcp_occ_prev, tmax, tmax_prev, tmin, tmin_prev,
type_day, type_day_prev, prcp_amt, prcp_amt_prev, retries))
})
##############################################
## Tomar tiempo de generación del clima diario
tiempos <- dplyr::mutate(tiempos, tiempo.gen_clim = list(proc.time() - t.daily_gen_clim))
###################################################################
## Español: Se guarda en disco el tibble con los rasters de la realización
## English: It written on disk the tibble with rasters of the realization
rds_path <- glue::glue("{output_folder}/{output_filename}_realization_{r}.rds")
if(control$use_temporary_files_to_save_ram) {
t.saveRDS <- proc.time()
base::saveRDS(daily_gen_clim, rds_path)
tiempos <- dplyr::mutate(tiempos, tiempo.save_rds = list(proc.time() - t.saveRDS))
}
######################
## Español: Liberar memoria RAM
## English: Free memory RAM
if(control$use_temporary_files_to_save_ram) {
rm(daily_gen_clim)
invisible(gc())
}
#################
## Español: Retorno final
## English: Return results
return (tibble::tibble(nsim = r,
nsim_gen_climate = ifelse(control$use_temporary_files_to_save_ram, list(rds_path), list(daily_gen_clim))
) %>% dplyr::bind_cols(tiempos))
}
##############################################
## Guardar realizacion en archivo de salida ##
##############################################
####################################################
## Español: Tomar tiempos de generación del archivo de salida
## English: Take generation time of the output file
ctrl_output_file <- purrr::map2_dfr(
nsim_gen_clim %>% dplyr::pull(nsim),
nsim_gen_clim %>% dplyr::pull(nsim_gen_climate),
function(r, daily_gen_clim) {
#######################################
## Tiempos a tomar por cada realización
tiempos <- tibble::tibble(tiempo.read_rds = list(),
tiempo.gen_file = list())
tiempos <- tiempos %>% dplyr::add_row()
######################################
## Leer archivos con rasters generados
if(control$use_temporary_files_to_save_ram) {
t.read_rds <- proc.time()
rds_path <- daily_gen_clim
daily_gen_clim <- base::readRDS(rds_path)
tiempos <- dplyr::mutate(tiempos, tiempo.read_rds = list(proc.time() - t.read_rds))
}
#############################
## Español: Generar archivos de salida
## English: Generate output file
t.gen_file <- proc.time()
gamwgen:::GuardarRealizacionEnCSV(filename = glue::glue("{output_folder}/{output_filename}.csv"),
numero_realizacion = r, tibble_with_data = daily_gen_clim,
avbl_cores = control$avbl_cores)
tiempos <- dplyr::mutate(tiempos, tiempo.gen_file = list(proc.time() - t.gen_file))
######################
## Español: Liberar memoria RAM
## English: Free memory RAM
if(control$use_temporary_files_to_save_ram) {
rm(daily_gen_clim)
invisible(gc())
}
#################
## Español: Retorno final
## English: Last return
return (tibble::tibble(nsim = r) %>% dplyr::bind_cols(tiempos))
})
#################################
## Español: Control de tiempo de ejecución
## English: Control execution time
tiempo.sim <- proc.time() - t.sim
##############################
## Preparar datos de salida ##
##############################
# Español: Se guardan los resultados en el objeto de salida
# English: Save results in the output file
gen_climate[['nsim']] <- control$nsim # Number of simulations
gen_climate[['seed']] <- control$seed # Initial seed
gen_climate[['realizations_seeds']] <- realizations_seeds # Realization seed
gen_climate[['simulation_points']] <- simulation_points # Simulation locations
gen_climate[['output_file_with_results']] <- glue::glue("{output_folder}/{output_filename}.csv") # Output file name
gen_climate[['output_file_fomart']] <- "CSV" # Output file format
fitted_stations <- model$stations; climate <- model$climate # Observed meteorological data
fsc_filename <- glue::glue("{output_folder}/fitted_stations_and_climate.RData")
save(fitted_stations, climate, file = fsc_filename)
gen_climate[['rdata_file_with_fitted_stations_and_climate']] <- fsc_filename
rm(fsc_filename); invisible(gc())
names(nsim_gen_clim$tiempo.gen_clim) <- paste0("sim_", nsim_gen_clim$nsim)
gen_climate[['exec_times']][["gen_clim_time"]] <- nsim_gen_clim$tiempo.gen_clim
if(control$use_temporary_files_to_save_ram) {
names(nsim_gen_clim$tiempo.save_rds) <- paste0("sim_", nsim_gen_clim$nsim)
gen_climate[['exec_times']][["rds_save_time"]] <- nsim_gen_clim$tiempo.save_rds
}
if(control$use_temporary_files_to_save_ram) {
names(ctrl_output_file$tiempo.read_rds) <- paste0("sim_", ctrl_output_file$nsim)
gen_climate[['exec_times']][["rds_read_time"]] <- ctrl_output_file$tiempo.read_rds
}
names(ctrl_output_file$tiempo.gen_file) <- paste0("sim_", ctrl_output_file$nsim)
gen_climate[['exec_times']][["gen_output_time"]] <- ctrl_output_file$tiempo.gen_file
gen_climate[['exec_times']][["exec_total_time"]] <- tiempo.sim
class(gen_climate) <- c(class(gen_climate), 'gamwgen.climate')
#########################
## FINALIZAR EJECUCIÓN ##
#########################
## Cerrar progress bar
pb$terminate()
## Remove parallelization conf, if necessary
if(remove_parallelization_conf) {
foreach::registerDoSEQ()
snow::stopCluster(cluster)
}
## Español: Se borran los archivos temporarios
## English: Remove temporary files
if(control$use_temporary_files_to_save_ram && control$remove_temp_files_used_to_save_ram)
purrr::walk( nsim_gen_clim %>% dplyr::pull(nsim_gen_climate),
function(filename) { file.remove(filename); invisible(gc()) } )
## Español: Devolver resultado
## English: Return result
gen_climate
}
|
4616c16c9c409178e1603f72ab4a708b2ee818ff
|
8455fc20fed9641f65ed8a5b2e065c7e8075e730
|
/man/compatability.Rd
|
df126f0685ece31e4d06f0e6a6d8517f9082dbf5
|
[] |
no_license
|
andreas50/uts
|
0cfb629448886bcee992e6ae8ab453d15fd366ff
|
f7cea0d2ba074d332a4eb9b5498451fe0bc9a94f
|
refs/heads/master
| 2021-07-24T13:41:29.982215
| 2021-04-05T14:41:04
| 2021-04-05T14:41:04
| 35,902,127
| 16
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 933
|
rd
|
compatability.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ts_compatability.R
\name{compatability}
\alias{compatability}
\alias{as.ts.uts}
\alias{cycle.uts}
\alias{frequency.uts}
\title{Compatability with ts class}
\usage{
\method{as.ts}{uts}(x, ...)
\method{cycle}{uts}(x, ...)
\method{frequency}{uts}(x, ...)
}
\arguments{
\item{x}{a \code{"uts"} object.}
\item{\dots}{further arguments passed to or from methods.}
}
\value{
\code{cycle()} and \code{frequency()} give an error message, because \code{"uts"} objects, by definition, do not have a fixed number of observations in a given time interval.
}
\description{
These methods exist solely to ensure that methods intended for \code{"ts"} objects in base \R are not accidentally applied to \code{"uts"} objects.
}
\examples{
\dontrun{as.ts(ex_uts())}
\dontrun{cycle(ex_uts())}
\dontrun{frequency(ex_uts())}
}
\seealso{
\code{\link{ts}}
}
\keyword{internal}
|
caffb08748834e8d814084c7999679f5b84a9525
|
379452dad7d01072f5cd143be9d5227f372dc727
|
/plot_figureS6.R
|
4fb2022e417f96810f983dbb5f8573772c0b4ffc
|
[] |
no_license
|
BennyStrobes/gtex_v8_rare_variant_figure_generation
|
51f4d6ec0e11cd4f956a7e8d158a075603697ae5
|
43eff32438fc47fbeed982f8b04a76b6f4aab42c
|
refs/heads/master
| 2023-01-01T18:13:15.092394
| 2020-10-26T21:14:44
| 2020-10-26T21:14:44
| 265,935,957
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,178
|
r
|
plot_figureS6.R
|
library(ggplot2)
library(cowplot)
theme_set(theme_cowplot())
gtex_v8_figure_theme <- function() {
return(theme(plot.title = element_text(face="plain",size=8, hjust=0.5), text = element_text(size=8),axis.text=element_text(size=7), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black"), legend.text = element_text(size=7), legend.title = element_text(size=8)))
}
make_figS6_panel <- function(x_axis_pvalues, y_axis_pvalues, color_vector, x_axis_label, y_axis_label) {
# randomly sample indices for viz
random_indices <- sample (c(1:length(x_axis_pvalues)), size=length(x_axis_pvalues)/100.0, replace=F)
x_axis_pvalues = x_axis_pvalues[random_indices]
y_axis_pvalues = y_axis_pvalues[random_indices]
color_vector = color_vector[random_indices]
df <- data.frame(stadard_pvalue=-log10(x_axis_pvalues+1e-6), alt_pvalue=-log10(y_axis_pvalues+1e-6), fraction=color_vector)
p <- ggplot(df, aes(x=stadard_pvalue, y=alt_pvalue,colour=fraction)) +
geom_point(size=.0001) +
gtex_v8_figure_theme() +
theme(legend.position="bottom") +
labs(x = paste0("-log10(p-value) [ ", x_axis_label, " ]"), y=paste0("-log10(p-value) [ ", y_axis_label, " ]"),colour="Fraction of reads\nfrom one junction")
return(p)
}
##############
# Input data
figS6_input_file = "processed_input_data/figureS6/figS6_input_data.txt"
##############
# Load in data
figS6_df <- read.table(figS6_input_file, header=TRUE)
figS6a <- make_figS6_panel(figS6_df$standard_prior_20K_reads, figS6_df$standard_prior_10K_reads, figS6_df$fraction, "20000 reads", "10000 reads")
figS6b <-make_figS6_panel(figS6_df$standard_prior_20K_reads, figS6_df$standard_prior_100K_reads, figS6_df$fraction, "20000 reads", "100000 reads")
figS6c <-make_figS6_panel(figS6_df$standard_prior_20K_reads, figS6_df$no_prior_20K_reads, figS6_df$fraction, "Standard prior", "No prior")
# Make combined plot
figS6 <- plot_grid(figS6a, figS6b, figS6c, ncol=2, labels=c("A","B", "C"))
# Save to output file
output_file <- "generated_figures/figureS6.pdf"
ggsave(figS6, file=output_file, width=7.2, height=7, units="in")
|
236ae66fdaa6572770fce8d5779f71c15aae8d39
|
89f471a1facf26cba075e79ad778f58c1d03a175
|
/man/create_stick_plot.Rd
|
90674fe86ccf287b182c0ebeb9ed7139e5211489
|
[
"MIT"
] |
permissive
|
deandevl/RplotterPkg
|
a90d229946639235949483f595b9ee8c5eeab101
|
5a70e51eeb45d84685e4fddc9a9f7bd9e68f089a
|
refs/heads/main
| 2023-05-12T06:55:22.537757
| 2023-05-01T09:47:17
| 2023-05-01T09:47:17
| 230,162,174
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 6,303
|
rd
|
create_stick_plot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_stick_plot.R
\name{create_stick_plot}
\alias{create_stick_plot}
\title{Function wraps a ggplot2 geom_linerange to produce a "stick" plot.}
\usage{
create_stick_plot(
df,
base_val = 0,
aes_x = NULL,
aes_y = NULL,
aes_color = NULL,
title = NULL,
subtitle = NULL,
caption = NULL,
center_titles = FALSE,
x_title = NULL,
y_title = NULL,
hide_x_tics = FALSE,
hide_y_tics = FALSE,
rot_x_tic_angle = 0,
rot_y_tic_label = FALSE,
x_limits = NULL,
x_major_breaks = waiver(),
x_minor_breaks = waiver(),
x_labels = waiver(),
x_major_date_breaks = waiver(),
x_date_labels = waiver(),
y_limits = NULL,
y_major_breaks = waiver(),
y_minor_breaks = waiver(),
y_labels = waiver(),
axis_text_size = 11,
line_color = "black",
line_width = 0.8,
line_type = "solid",
line_alpha = 1,
show_major_grids = TRUE,
show_minor_grids = TRUE,
panel_color = "white",
panel_border_color = "black",
show_legend = TRUE,
legend_pos = "right",
legend_key_width = 1.5,
legend_key_height = 1.5,
legend_key_backgrd = "white",
bold_y = NULL,
bold_y_color = "black",
silent_NA_warning = FALSE,
png_file_path = NULL,
png_width_height = c(480, 480)
)
}
\arguments{
\item{df}{The target data frame from which the "stick" lines are drawn.}
\item{base_val}{A numeric that sets the base value from which the "stick" originates.
The default value is 0.}
\item{aes_x}{Sets the x axis variable name from \code{df}. Can be a numeric/Date/POSIXct variable and is required.}
\item{aes_y}{Sets the y axis variable name from \code{df} and controls the height of
individual "sticks". The argument is required}
\item{aes_color}{Sets the variable name from \code{df} for the aesthetic mapping for color.}
\item{title}{A string that sets the plot title.}
\item{subtitle}{A string that sets the plot subtitle.}
\item{caption}{A string that sets the plot caption}
\item{center_titles}{A logical which if \code{TRUE} centers both the \code{title} and \code{subtitle}.}
\item{x_title}{A string that sets the x axis title. If NULL (the default) then the x axis title does not appear.}
\item{y_title}{A string that sets the y axis title. If NULL (the default) then the y axis title does not appear.}
\item{hide_x_tics}{A logical that controls the appearance of the x axis tics.}
\item{hide_y_tics}{A logical that controls the appearance of the y axis tics.}
\item{rot_x_tic_angle}{A numeric that sets the angle of rotation for the x tic labels. When x tic labels are long,
a value of 40 for this argument usually works well.}
\item{rot_y_tic_label}{A logical which if TRUE rotates the y tic labels 90 degrees for enhanced readability.}
\item{x_limits}{Depending on the class of \code{aes_x}, a numeric/Date/POSIXct 2 element vector that sets the minimum
and maximum for the x axis. Use NA to refer to the existing minimum and maximum.}
\item{x_major_breaks}{Depending on the class of \code{aes_x}, a numeric/Date/POSIXct vector or function that
defines the exact major tic locations along the x axis.}
\item{x_minor_breaks}{Depending on the class of \code{aes_x}, a numeric/Date/POSIXct vector or function that defines
the exact minor tic locations along the x axis.}
\item{x_labels}{A character vector with the same length as \code{x_major_breaks}, that labels the major tics.}
\item{x_major_date_breaks}{If the class of \code{aes_x} is Date/POSIXct, a string containing the number and date
unit for major breaks. \code{"1 year"}, \code{"4 sec"}, \code{"3 month"}, \code{"2 week"}.}
\item{x_date_labels}{If the class of \code{aes_x} is Date/POSIXct, a string containing the format codes, the
strftime format, for the date. Examples: \code{\%Y-\%m}, \code{\%Y/\%b/\%d}, \code{\%H-\%M-\%S}}
\item{y_limits}{A numeric 2 element vector that sets the minimum and maximum for the y axis.
Use NA to refer to the existing minimum and maximum.}
\item{y_major_breaks}{A numeric vector or function that defines the exact major tic locations along the y axis.}
\item{y_minor_breaks}{A numeric vector or function that defines the exact minor tic locations along the y axis.}
\item{y_labels}{A character vector with the same length as \code{y_major_breaks}, that labels the major tics.}
\item{axis_text_size}{A numeric that sets the font size along the axis'. Default is 11.}
\item{line_color}{A string that sets the color attribute of the lines.}
\item{line_width}{A numeric value that sets the width of lines.}
\item{line_type}{A string that sets the linetype. The default is "solid".}
\item{line_alpha}{A numeric value that sets the degree of color alpha attribute for the lines.}
\item{show_major_grids}{A logical that controls the appearence of major grids.}
\item{show_minor_grids}{A logical that controls the appearence of minor grids.}
\item{panel_color}{A string in hexidecimal or color name that sets the plot panel's color.
The default is "white".}
\item{panel_border_color}{A string in hexidecimal or color name that sets the plot panel's border color.
The default is "black".}
\item{show_legend}{A logical that controls the appearence of the legend.}
\item{legend_pos}{A string that sets the legend position. Acceptable values are
"top", "bottom", "left", "right".}
\item{legend_key_width}{A numeric that sets the legend width in cm.}
\item{legend_key_height}{A numeric that sets the legend height in cm.}
\item{legend_key_backgrd}{A string that sets the legend's background color.}
\item{bold_y}{A numeric that plots a bold horizontal line at this y value.}
\item{bold_y_color}{A string that sets the bold horizontal line color. Default is "black".}
\item{silent_NA_warning}{A logical that controls the appearance of a console warning when NA's
are removed.}
\item{png_file_path}{A character string with the directory and file name to produce
a png image of the plot.}
\item{png_width_height}{A numeric vector that sets the width and height of the png image in pixels. The
default is c(480,480). There are 37.8 pixels in a centimeter.}
}
\value{
A plot object
}
\description{
Function returns a plot object showing vertical/horizontal lines that run from a base value to
a measurement value. Options are provided for scaling.
}
\author{
Rick Dean
}
|
a09ac4804f68a792eeb4c90151ae36a83c9214ec
|
fed93c5054545d927f3695b51f3a8c9dafb90086
|
/R/tagtools/R/interp2length.R
|
9b6f60c1ef4367035963e942e6c0683c53aa53cb
|
[] |
no_license
|
spluque/TagTools
|
34629e360afd3170aa167437cccfd72001b2c69c
|
5f150109114cbbdf551cbf8a02e335006613d332
|
refs/heads/master
| 2021-12-07T10:54:11.656760
| 2021-10-14T20:36:29
| 2021-10-14T20:36:29
| 233,162,704
| 0
| 0
| null | 2020-01-11T02:11:30
| 2020-01-11T02:11:29
| null |
UTF-8
|
R
| false
| false
| 3,831
|
r
|
interp2length.R
|
#' Interpolate regularly sampled data to increase its sampling rate and match its length to another variable.
#'
#' This function is used to reduce the time span of data by cropping out any data that falls before and after two time cues.
#'
#' @param X A sensor list, vector, or matrix. If x is or contains matrix, each column is treated as an independent signal.
#' @param Z is a sensor structure, vector or matrix whose sampling rate and length is to be matched.
#' @param fs_in is the sampling rate in Hz of the data in X. This is only needed if X is not a sensor structure.
#' @param fs_out is the required new sampling rate in Hz. This is only needed if Z is not given.
#' @param n_out is an optional length for the output data. If n_out is not given, the output data length will be the input data length * fs_out/fs_in.
#' @return Y is a sensor structure, vector or matrix of interpolated data with the same number of columns as X.
#' @examples
#' plott(X = list(harbor_seal$P), fsx = 5)
#' # get an idea of what the data looks like
#' P_dec <- decdc(harbor_seal$P, 5)
#'
#' # note: you would not really want to decimate and then linearly interpolate.
#' # only doing so here to create an example from existing datasets
#' # that have uniform sampling rates across sensors
#'
#' P_interp <- interp2length(X = P_dec, Z = harbor_seal$A)
#' plott(X = list(P_interp$data), fsx = 1)
#' # compare to original plot. should be pretty close
#' @export
interp2length <- function(X, Z, fs_in = NULL, fs_out = NULL, n_out = NULL) {
# INPUT CHECKING ----------------------------
if (missing(X) | missing(Z)) {
stop("Inputs X and Z are required for interp2length().")
}
if (is.list(X)) {
x <- X$data
fs_in <- X$sampling_rate
} else {
if (missing(fs_in)){
stop('Input fs_in is required if X is not a sensor data list.')
}
x <- X
}
if (!is.matrix(x)) {
x <- matrix(x, ncol = 1)
}
if (nrow(x) == 1) {
x <- t(x)
}
if (is.list(Z)) {
z <- Z$data
fs_out <- Z$sampling_rate
} else {
if (missing(fs_out)){
stop('input fs_out is required if Z is not a sensor data list.')
}
z <- Z
}
if (!is.matrix(z)) {
z <- matrix(z, ncol = 1)
}
if (nrow(z) == 1) {
z <- t(z)
}
if (is.null(n_out)){
n_out <- nrow(z)
}
# DO INTERPOLATION ---------------------------------
if (fs_in == fs_out) {
# if sampling rates are the same, no need to interpolate,
# just make sure the length is right
y <- check_size(x, n_out)
} else {
# if sampling rates are different
y <- matrix(0, nrow = nrow(z), ncol = ncol(x))
for (c in 1:ncol(x)) {
y[ , c] <- stats::approx(x = c(0:(nrow(x)-1)) / fs_in,
y = x[, c],
xout = c(0:(nrow(z)-1)) / fs_out,
rule = 2 # return value at the closest data extreme when extrapolating (should be only a few samples)
)$y
}
y <- check_size(y, n_out)
}
# FORMAT OUTPUT (TO SENSOR LIST IF NEEDED) ----------
if (is.list(X)) {
Y <- X
Y$data <- y
Y$sampling_rate <- fs_out
Y$history <- paste(Y$history, ' interp2length from', fs_in, 'Hz to ', fs_out, 'Hz')
} else {
Y = y
}
return(Y)
}
check_size <- function(y, n_out) {
if (nrow(y) < n_out) {
warning(paste('Data size mismatch: data is shorter than expected by ', n_out - nrow(y), ' rows.'))
y <- rbind(y,
matrix(data = y[nrow(y),],
nrow = n_out - nrow(y),
byrow = TRUE))
}
if (nrow(y) > n_out) {
warning(paste('Data size mismatch: data is longer than expected by ', n_out - nrow(y), ' rows.'))
y <- y[1:n_out,]
}
return(y)
}
|
1368d40cd0c828e955d7856bec05509953ddef33
|
c23a1fce67d95efab4c3e5a59e76a09d41d946a2
|
/tests/testthat/test-utils.R
|
aa014009f658d7e0720587d2e4caae8bdf161d21
|
[] |
no_license
|
OJWatson/context
|
d4b56e1816f3f9c0f86595e6fa290e0ad0e46dae
|
d3e038f179417353ec6f2d74422ec5dff79a0566
|
refs/heads/master
| 2021-01-16T21:57:15.997940
| 2016-05-27T10:59:29
| 2016-05-27T10:59:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,766
|
r
|
test-utils.R
|
context("utils")
test_that("filter_warnings", {
f <- function(x) {
warning(x)
}
expect_warning(filter_warnings(f("warning"), character(0)), "warning")
expect_warning(filter_warnings(f("warning"), "pattern"), "warning")
expect_silent(filter_warnings(f("warning"), "warning"))
expect_silent(filter_warnings(f("warning"), c("pattern", "warning")))
expect_silent(filter_warnings(f("warning"), c("warning", "pattern")))
})
test_that("install.packages2", {
repos <- c(CRAN="http://cran.rstudio.com")
expect_warning(install.packages("asdfa", repos=repos))
## This is super annoying; really should fail:
expect_null(suppressWarnings(install.packages("asdfa", repos=repos)))
expect_error(suppressWarnings(install.packages2("asdfa", repos=repos)),
"is not available")
})
test_that("capture_log", {
filename <- tempfile()
expect_message(capture_log(message("foo"), NULL), "foo")
capture_log(message("foo"), filename)
expect_true(file.exists(filename))
## This is because of test_that's message muffling; that's
## notoriously version dependent unfortunately.
## expect_identical(readLines(filename), "foo")
## In comparison see
## local({
## filename <- tempfile()
## capture_log(message("foo"), filename)
## readLines(filename)
## })
f <- function() {
cat("foo\n")
1
}
expect_equal(capture_log(f(), filename), 1)
expect_identical(readLines(filename), "foo")
})
test_that("absolute paths", {
expect_true(is_absolute_path("/foo/bar"))
expect_true(is_absolute_path("//network/bar"))
expect_true(is_absolute_path("\\\\network/bar"))
expect_true(is_absolute_path("c:/foo/bar"))
expect_false(is_absolute_path("."))
expect_false(is_absolute_path("foo/bar"))
})
|
346c5319f8851dcf60fe1082e499244853ddf516
|
0301c0c718e4be5f3323a2e6776a8f4daa7b173e
|
/R/std.ramp.R
|
a491b6e7d60396d7b60d84788940f3ba8b09d6dc
|
[] |
no_license
|
suztolwinskiward/VSLiteR
|
c719b13ab5b242c3735fa2e99c5f24a178d2d7cd
|
b96bb4b668326e8b9cfded3ce23badc28b02059b
|
refs/heads/master
| 2021-06-04T07:24:13.451521
| 2015-09-16T14:34:11
| 2015-09-16T14:34:11
| 34,275,377
| 12
| 5
| null | 2018-09-14T00:38:27
| 2015-04-20T17:29:40
|
R
|
UTF-8
|
R
| false
| false
| 665
|
r
|
std.ramp.R
|
#' "Standard ramp" function for building growth response functions.
#'
#' \code{std.ramp.r} takes a lower and upper bounds, and creates a linear response
#' between 0 and 1 for values in between the bounds. Values below (above) the lower (upper)
#' bound are assigned a value of zero (one).
#'
#' @param x The value at which we want to evaluate the ramp function.
#' @param x1 The lower bound of the support of the nonzero part of the ramp.
#' @param x2 The lower bound of the range of the preimage of 1.
#'
#' @export
std.ramp <- function(x,x1,x2){return(
apply(as.matrix(apply((x-x1)/(x2-x1),1:length(dim(x)),min,1)),
1:length(dim(x)),max,0)
)}
|
c1622f9afc8bd785cfe69cbb7db086b1740aee4c
|
b613db234c506cd10f1ffd333097fa94a2c8215e
|
/ncaa_scraping/ncaa_scraping.R
|
74b6e88108c9892d3def40e5ea479ce7dc85aa98
|
[] |
no_license
|
BillPetti/baseball_research_notebook
|
b6d8e21b62296dbaf5d14806d4cd280b667fb2e7
|
935edea8a81dd8213d733864bba3f1918dc14a61
|
refs/heads/master
| 2021-01-17T18:18:18.743291
| 2018-02-17T20:15:34
| 2018-02-17T20:15:34
| 71,358,789
| 12
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,713
|
r
|
ncaa_scraping.R
|
#### Bill Petti
#### billpetti.github.io
#### Research Notebook
#### Acquiring Baseball Stats from the NCAA with R
#### Originally coded November 2016
# load required packages
if(!require(baseballr)) {
install_github("BillPetti/baseballr")
require(baseballr)
} # functions for baseball analysis
require(rvest) # for data scraping
require(xml2) # for data scraping
require(dplyr) # for data manipulation
# function to scrape conference_ids
conference_table <- function(year, div) {
url <- paste0("http://stats.ncaa.org/team/inst_team_list?academic_year=", year, "&conf_id=-1&division=", div, "&sport_code=MBA")
read <- read_html(url)
links <- html_nodes(read, "a") %>%
html_attr("href")
link_names <- html_nodes(read, "a") %>%
html_text()
table <- as.data.frame(cbind(link_names, links))
names(table) <- c("conference", "links")
table$conference <- as.character(table$conference)
links_conferences <- table %>%
filter(grepl("changeConference", links))
conference_ids <- sub("\\).*", "", sub(".*\\(", "", links_conferences$links))
conference_ids <- as.data.frame(conference_ids)
names(conference_ids) <- "conference_id"
table <- cbind(links_conferences, conference_ids)
table <- table %>%
mutate(year = year, division = div, conference_id = as.numeric(as.character(conference_id))) %>%
select(year, division, everything(), -links) %>% .[-1,]
table
}
# loop table
year <- c(2013, 2014, 2015, 2016)
division <- c(1,2,3)
div_yr <- expand.grid(year, division)
# loop over values to create conference lookup table
conference_code_lu <- div_yr %>%
group_by(Var1, Var2) %>%
do(conference_table(.$Var1, .$Var2))
# team function
teams <- function(year, conference, div) {
url <- paste0("http://stats.ncaa.org/team/inst_team_list?academic_year=", year, "&conf_id=", conference, "&division=", div, "&sport_code=MBA")
read <- read_html(url)
links <- html_nodes(read, "a") %>%
html_attr("href")
link_names <- html_nodes(read, "a") %>%
html_text()
table <- as.data.frame(cbind(link_names, links))
table$links <- as.character(table$links)
table$link_names <- as.character(table$link_names)
table <- table %>%
filter(grepl("team", links)) %>%
filter(!grepl("inst_team", links)) %>%
filter(!grepl("schedule", links))
table$links <- gsub("/team/", "", table$links)
table$links <- sub("/.*", "", table$links)
table$year <- year
table$division <- div
table$conference_id <- conference
names(table) <- c("school", "school_id", "year", "division", "conference_id")
table
}
# loop over values in the conference_code_lu table
master_ncaa_team_lu <- conference_code_lu %>%
group_by(year, division, conference, conference_id) %>%
do(teams(.$year, .$conference_id, .$division)) %>%
ungroup() %>%
select(school, conference, everything()) %>%
mutate(school_id = as.numeric(school_id)) %>%
arrange(school)
# example uses of the baseballr::ncaa_scrape function
# 736 is the school_id for Vanderbilt
ncaa_scrape(736, 2015, "batting")
ncaa_scrape(736, 2015, "pitching")
# create looping table
year <- c(2014, 2015, 2016)
school_id <- 736
division <- 1
v_table <- expand.grid(school_id, year)
# loop over values to acquire last three years of pitching data for Vanderbilt
v_table %>%
group_by(Var1, Var2) %>%
do(ncaa_scrape(.$Var1, .$Var2, "pitching"))
# create looping table for Vanderbilt and Florida for the past three years
year <- c(2014, 2015, 2016)
school_id <- c(235, 736)
v_table <- expand.grid(school_id, year)
# loop over values to acquire last three years of pitching data for Vanderbilt and Florida
v_table %>%
group_by(Var1, Var2) %>%
do(ncaa_scrape(.$Var1, .$Var2, "pitching"))
|
92be820f20ca6e62d262e7a10ff63676883b893c
|
3fa0a9316249793f6c25be59084603b99c700961
|
/old/choropleth_pop_land_co2.R
|
7072b95a85146f05ea56c2580c4c0de3a0cbd456
|
[
"MIT"
] |
permissive
|
TimothyNguyen/Climate-Change
|
58820b1df20a81f5e9c0b0135a6887413486926b
|
daeec30acbee24d1b064d7add4f46a70323a8c23
|
refs/heads/master
| 2023-04-17T23:18:31.418922
| 2021-05-05T00:05:19
| 2021-05-05T00:05:19
| 342,430,285
| 0
| 0
|
MIT
| 2021-03-01T02:01:25
| 2021-02-26T01:40:52
|
HTML
|
UTF-8
|
R
| false
| false
| 6,372
|
r
|
choropleth_pop_land_co2.R
|
rm(list=ls())
# install.packages("choroplethrMaps")
library(shiny)
library(ggplot2)
library(plotly)
library(tidyr)
library(dplyr)
library(countrycode)
library(choroplethr)
library(readr)
library(lubridate)
gdp_per_cap <-
read.csv(
"./data/income_per_person_gdppercapita_ppp_inflation_adjusted.csv",
header = TRUE,
stringsAsFactors = FALSE,
check.names = FALSE
)
pop <-
read.csv(
"./data/population_total.csv",
header = TRUE,
stringsAsFactors = FALSE,
check.names = FALSE
)
yearly_co2 <-
read.csv(
"./data/yearly_co2_emissions_1000_tonnes.csv",
header = TRUE,
stringsAsFactors = FALSE,
check.names = FALSE
)
land_temp <-
read.csv(
"./data/GlobalLandTemperaturesByCountry.csv",
header = TRUE,
stringsAsFactors = FALSE,
check.names = FALSE
)
gdp_per_cap$continent <- countrycode(sourcevar = gdp_per_cap[, "country"],
origin = "country.name",
destination = "continent")
pop$continent <- countrycode(sourcevar = pop[, "country"],
origin = "country.name",
destination = "continent")
land_temp$continent <- countrycode(sourcevar = land_temp[, "country"],
origin = "country.name",
destination = "continent")
yearly_co2$continent <- countrycode(sourcevar = yearly_co2[, "country"],
origin = "country.name",
destination = "continent")
# Clean the data
land_temp <- land_temp %>% drop_na("continent")
drop <- c("AverageTemperatureUncertainty")
land_temp <- land_temp[!(names(land_temp) %in% drop)]
land_temp <- within(land_temp,
date <- ifelse(!is.na(as.Date(land_temp$dt, "%Y-%m-%d")),
as.character(as.Date(land_temp$dt, "%Y-%m-%d")),
as.character(as.Date(land_temp$dt, "%m/%d/%Y"))))
land_temp <- land_temp[!(names(land_temp) %in% drop)]
land_temp <- na.omit(land_temp)
land_df <- land_temp %>%
mutate(country, year = year(date)) %>%
group_by(country, year, continent)
drop <- c("dt")
land_df <- land_df[!(names(land_df) %in% drop)]
land_df <- aggregate(land_df$AverageTemperature,
by=list(year=land_df$year,
country=land_df$country,
continent=land_df$continent),
FUN=mean, na.action = na.omit)
land_df <- land_df %>%
mutate(AverageTemperature = x * 1.8 + 32)
drop <- c("x")
land_df <- land_df[!(names(land_df) %in% drop)]
names(land_df)[4] <- "AverageTemperature"
df_co2 <- yearly_co2%>%
pivot_longer(c('1850':'2012'), names_to = "year",
values_to = "co2_emissions") %>%
select(country, year, co2_emissions)
df_co2 <- na.omit(df_co2, cols=c("co2_emissions"))
df_gdp <- gdp_per_cap%>%
pivot_longer(c('1850':'2012'), names_to = "year", values_to = "gdpPercap") %>%
select(country, year, gdpPercap)
df_pop <- pop%>%
pivot_longer(c('1850':'2012'), names_to = "year", values_to = "pop") %>%
select(country, year, pop)
df_land <- filter(land_df, year >= 1850) %>% filter(year <= 2012)
df_land <- df_land %>% mutate(year = as.character(year))
first_graph <- left_join(df_pop, df_co2) %>%
merge(df_land)
first_graph <- na.omit(first_graph, cols=c("co2_emissions"))
first_graph$CODE <- countrycode(first_graph$country, origin = 'country.name', destination = 'genc3c')
con <- factor(c('Asia','Africa', 'Americas', 'Europe', 'Oceania'))
print(levels(con))
ui <- fluidPage(
sidebarLayout(
sidebarPanel(
helpText("Interactive plotting on Land Temperatures"),
sliderInput("year", "Year (Land Temperatures)",
min = range(as.numeric( first_graph$year))[1],
max = range(as.numeric( first_graph$year))[2],
value = range(as.numeric( first_graph$year))[1],
sep = "",
step = 1,
animate = animationOptions(interval = 500)
),
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("AverageTemperature", plotOutput("temp_plot")),
tabPanel("CO2 Emissions", plotOutput("co2_plot"))))
)
)
server <- function(input, output) {
output$temp_plot <- renderPlot({
df <- first_graph %>%
filter(year == input$year) %>%
rename(region = country, value = AverageTemperature) %>%
mutate(region = tolower(region)) %>%
mutate(region = recode(region,
"united states" = "united states of america",
"congo, dem. rep." = "democratic republic of the congo",
"congo, rep." = "republic of congo",
"korea, dem. rep." = "south korea",
"korea. rep." = "north korea",
"tanzania" = "united republic of tanzania",
"serbia" = "republic of serbia",
"slovak republic" = "slovakia",
"yemen, rep." = "yemen"))
country_choropleth(df, num_colors=5) +
scale_fill_brewer(palette="OrRd")
})
output$co2_plot <- renderPlot({
df <- first_graph %>%
filter(year == input$year) %>%
rename(region = country, value = co2_emissions) %>%
mutate(region = tolower(region)) %>%
mutate(region = recode(region,
"united states" = "united states of america",
"congo, dem. rep." = "democratic republic of the congo",
"congo, rep." = "republic of congo",
"korea, dem. rep." = "south korea",
"korea. rep." = "north korea",
"tanzania" = "united republic of tanzania",
"serbia" = "republic of serbia",
"slovak republic" = "slovakia",
"yemen, rep." = "yemen"))
country_choropleth(df, num_colors=5) +
scale_fill_brewer(palette="OrRd")
})
}
shinyApp(ui = ui, server = server)
|
163ce118b0df2ba0562e3b1dcc29727063d71190
|
27b8a460e3d1085b0a0eb09540238b0e9f9ed1de
|
/cachematrix.R
|
0137511422516eb8f877440123047c81f64bb353
|
[] |
no_license
|
limbt/ProgrammingAssignment2
|
e76a18068afa65bbcd790af6d114fc879e1d8338
|
9ff3c04f48ef7452f488d35303a23b108f6e2508
|
refs/heads/master
| 2020-12-11T03:36:47.292148
| 2016-07-27T04:33:32
| 2016-07-27T04:33:32
| 64,272,593
| 0
| 0
| null | 2016-07-27T03:03:20
| 2016-07-27T03:03:20
| null |
UTF-8
|
R
| false
| false
| 1,471
|
r
|
cachematrix.R
|
## makeCacheMatrix - this function creates a special "matrix" object that can cache its inverse
## initialize objects x and inv
## define the functions for objects of type makeCacheMatrix - set, get, setInverse and getInverse
## set changes the vector stored in the main function
## get returns the vector x stored in the main function
## setmean and getmean store the value of the input in a variable inv
## return a list() to create a new object
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(solve) inv <<- solve
getinverse <- function() inv
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve - this function computes the inverse of the special "matrix" returned by makeCacheMatrix above
## if the inverse has already been calculated (and the matrix has not changed)
## then the cachesolve should retrieve the inverse from the cache
## list elements in cacheSolve() are defined with names
## this allows us to access these functions with the $ form of the extract operator
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
813197bef5b0bed3485ff090d0205e432c1dba84
|
aca4485cc47b6b8c62d2c3f9b4139ffae188f30d
|
/R/SullivanStatsSurveyII.R
|
a6aa6934472e9fa220fd6b23b9893fc95befb589
|
[] |
no_license
|
dtkaplan/sullystats6e
|
df5c4df3beacc177f8cfe7e523ed5f2d97790a39
|
3d2be5e92266b0971c4bc889f8d5ccd923443908
|
refs/heads/master
| 2022-06-08T01:15:52.076015
| 2020-05-05T17:14:52
| 2020-05-05T17:14:52
| 259,423,101
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,231
|
r
|
SullivanStatsSurveyII.R
|
#' Sullivan Statistics Survey II data
#'
#' @docType data
#'
#' @details This data represents the results of a survey written by the students of Michael Sullivan.
#'
#'
#' @usage data(SullivanStatsSurveyII)
#'
#' @format The variables are
#'
#' - `Gender`: The gender of the respondent.
#' - `Age`: The age of the respondent (in years).
#' - `Education`: What is your level of education?
#' - `Tax`: What percent of income do you believe individuals should pay in federal income tax?
#' - `Inequality`: Do you believe there an income inequality discrepancy between males and females when each has the same experience and education?
#' - `MinWage`: Do you beleive there should be a minimum wage?
#' - `Amount`: If yes what do you believe the minimum wage should be?
#' - `Philosophy`: What is your political philosophy?
#' - `Text`: Do you believe it is okay to text while at a red light (as the driver)?
#' - `RetirementDollars`: How much do you believe (in today's dollars) you will need for a comfortable retirement?
#' - `Retire`: What do you believe is the ideal retirement age?
#' - `Death`: For how many years would you like to live?
#'
#'
#' @keywords datasets
#'
#' @source StatCrunch Survey
#'
"SullivanStatsSurveyII"
|
72a2921ca3c1ac6d01cfd304763177254ec50bfe
|
5318dfda83a8070e821dd7d2975cf8f86f323d01
|
/analyses/20CR/obs_feedback/plot_scatter.R
|
7edfbfa565162c774657200cd5b7b6c922cd4de6
|
[] |
no_license
|
oldweather/oldWeather1
|
a8f82d1c56c21a74b69af00935e830803691097e
|
04b970cf0e93d4de1ea8a1dc0b2a870dcb5870c6
|
refs/heads/master
| 2021-01-17T13:11:48.821947
| 2020-11-13T16:56:03
| 2020-11-13T16:56:03
| 10,826,748
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,865
|
r
|
plot_scatter.R
|
# Make scatter plots from the 20CR obs feedback files
library(TWCR)
library(grid)
Year<-1918
Month<-8
Day<-12
Hour<-6
Version<-'3.3.8'
# Get obs used in single assimilation run
local.TWCR.get.obs<-function(year,month,day,hour,version=2) {
base.dir<-TWCR:::TWCR.get.data.dir(version)
of.name<-sprintf(
"%s/observations/%04d/prepbufrobs_assim_%04d%02d%02d%02d.txt",base.dir,
year,year,month,day,hour)
if(!file.exists(of.name)) stop("No obs file fior given date")
o<-read.table(pipe(sprintf("cut -c1-160 %s",of.name)),
header=F,stringsAsFactors=F,
colClasses=c('character','integer','character',
rep('numeric',20)))
o$odates<-chron(dates=sprintf("%04d/%02d/%02d",as.integer(substr(o$V1,1,4)),
as.integer(substr(o$V1,5,6)),
as.integer(substr(o$V1,7,8))),
times=sprintf("%02d:00:00",as.integer(substr(o$V1,9,10))),
format=c(dates='y/m/d',times='h:m:s'))
o<-o[,seq(1,23)] # Truncate to regular section
o$V4<-as.numeric(o$V4)
o$V5<-as.numeric(o$V5)
o<-o[(o$V4<=360 & o$V5<=90),] # Throw away obs outside possible range
# Process the obs to set missing correctly and flag oW obs
o$height<-o$V7
is.na(o$height[o$height==9999])<-T
o$observed.pressure<-o$V11
is.na(o$observed.pressure[o$observed.pressure==10000])<-T
o$modified.obs<-o$V10
is.na(o$modified.obs[o$modified.obs>9000])<-T
# Adjust to sea-level with a linear model
l<-lm(o$modified.obs~o$height+1)
o$modified.obs<-l$residuals+l$coefficients[1]
o$mean.analysis<-o$V22
is.na(o$mean.analysis[o$mean.analysis>1000])<-T
o$mean.analysis<-o$mean.analysis+o$modified.obs
o$spread.analysis<-o$V23
is.na(o$spread.analysis[o$spread.analysis>1000])<-T
o$oW.obs<-grepl('9931',o$V1) # Find the oW obs
return(o)
}
obs<-local.TWCR.get.obs(Year,Month,Day,Hour,version=Version)
png('tst.png',width=1000,height=1000)
range<-c(980,1040)
pushViewport(viewport(width=1,height=1,x=0,y=0,
just=c("left","bottom"),name="vp_main"))
pushViewport(plotViewport(margins=c(5,5,1,1)))
pushViewport(dataViewport(range,range))
#tics<-pretty(range,n=5)
grid.xaxis(main=T)
grid.text('Observed Pressure',y=unit(-3,"lines"))
grid.yaxis(,main=T)
grid.text('Analysis Pressure',x=unit(-3.5,"lines"), rot=90)
gp_blue = gpar(col=rgb(0,0,1,1),fill=rgb(0,0,1,1),lwd=2)
gp_red = gpar(col=rgb(1,0,0,1),fill=rgb(1,0,0,1),lwd=2)
gp_grey = gpar(col=rgb(0.8,0.8,0.8,1),fill=rgb(0.8,0.8,0.8,1))
grid.lines(x=unit(range,'native'),
y=unit(range,'native'),
gp=gp_grey)
grid.polyline(x=unit(as.vector(rbind(obs$modified.obs,obs$modified.obs)),"native"),
y=unit(as.vector(rbind(obs$mean.analysis-obs$spread.analysis*2,
obs$mean.analysis+obs$spread.analysis*2)),'native'),
id.lengths=rep(2,length(obs$modified.obs)),
gp=gp_blue)
grid.polyline(x=unit(as.vector(rbind(obs$modified.obs[which(obs$oW.obs)],
obs$modified.obs[which(obs$oW.obs)])),"native"),
y=unit(as.vector(rbind(obs$mean.analysis[which(obs$oW.obs)]-
obs$spread.analysis[which(obs$oW.obs)]*2,
obs$mean.analysis[which(obs$oW.obs)]+
obs$spread.analysis[which(obs$oW.obs)]*2)),
'native'),
id.lengths=rep(2,length(obs$modified.obs[which(obs$oW.obs)])),
gp=gp_red)
popViewport()
popViewport()
upViewport()
dev.off()
|
aaeb6334dee8d494dae25ba7c9bcfe8faee3f5ea
|
e0e538679b6e29837839fdbc3d68b4550e256bb9
|
/docs/spring/code/ex08.R
|
fede6155bafd4429a59d905bb1ae33b68d85b6be
|
[] |
no_license
|
noboru-murata/sda
|
69e3076da2f6c24faf754071702a5edfe317ced4
|
4f535c3749f6e60f641d6600e99a0e269d1fa4ea
|
refs/heads/master
| 2020-09-24T20:23:36.224958
| 2020-09-22T07:17:54
| 2020-09-22T07:17:54
| 225,833,335
| 0
| 0
| null | 2019-12-05T08:20:51
| 2019-12-04T09:51:18
| null |
UTF-8
|
R
| false
| false
| 2,345
|
r
|
ex08.R
|
###
### 例: Yahoo! finance から株価のデータを取得して整理
###
install.packages("quantmod")
library(quantmod)
## Google(Alphabet), Microsoft, Amazon, Facebook, IBM, Apple
companies <- c("Google","Microsoft","Amazon","Facebook","IBM","Apple")
symbols <- c("GOOG","MSFT","AMZN","FB","IBM","AAPL")
getSymbols(symbols)
myData <- data.frame( # FB が 2012-05-18 からしかないので,2012-06 以降であわせる
GOOG['2012-06/',1],
MSFT['2012-06/',1],
AMZN['2012-06/',1],
FB['2012-06/',1],
IBM['2012-06/',1],
AAPL['2012-06/',1])
names(myData) <- companies
## データを保存する
write.csv(myData,file="data/prices.csv")
### ここまでが収集の例
## 保存したデータを読み込む
myData <- read.csv(file="data/prices.csv", row.names=1)
companies <- names(myData)
## グラフを書いてみる
plot.ts(myData) # 時系列として表示
matplot(myData, type="l", col=rainbow(6))
legend("topleft", inset=0.01,
legend=companies, col=rainbow(6), lty=1, lwd=3)
## 月曜日だけ取り出す
## 行名に日付(文字列)が入っているので
## 時刻表現 (as.POSIXlt) に直して
## weekday 情報 (wday) が月曜 (1) に対応する行番号を抽出
mondays <- which(as.POSIXlt(rownames(myData))$wday==1)
myDataMon <- myData[mondays,]
## 同様に図示してみる
plot.ts(myDataMon) # 時系列として表示
matplot(myDataMon, type="l", col=rainbow(6))
legend("topleft", inset=0.01,
legend=companies, col=rainbow(6), lty=1, lwd=3)
## たとえば平均値の比較をしてみる
colMeans(myData) # apply(myData,2,mean) も可
colMeans(myDataMon)
## 変動の平均を比較してみる
## 関数 diff(x) はベクトル x の階差を計算
colMeans(apply(myData,2,diff)) # 毎日どのくらい増加(減少)しているか
colMeans(apply(myDataMon,2,diff)) # 5日ごとの変動
colMeans(apply(myData,2,diff))*5 # だいたい合っている(平均なので当然)
## 変動の様子を図示
boxplot(apply(myData,2,diff))
boxplot(apply(myDataMon,2,diff))
## 変動幅(変動の絶対値)の平均を比較
colMeans(apply(myData,2,function(x)abs(diff(x)))) # 毎日の変動幅
colMeans(apply(myDataMon,2,function(x)abs(diff(x)))) # 週頭ごとの変動幅
colMeans(apply(myData,2,function(x)abs(diff(x))))*5 # 週の中で結構増減している
|
a89f00b97f5f1f5014fc3c0c7101d8fea03d90ec
|
00daf46a1286c20caa103a95b111a815ea539d73
|
/man/Function.Rd
|
a0f1539f7bd36f4338cfa3abfe36c0f17a49dd0a
|
[] |
no_license
|
duncantl/Rllvm
|
5e24ec5ef50641535895de4464252d6b8430e191
|
27ae840015619c03b2cc6713bde71367edb1486d
|
refs/heads/master
| 2023-01-10T15:12:40.759998
| 2023-01-02T18:05:26
| 2023-01-02T18:05:26
| 3,893,906
| 65
| 14
| null | 2017-03-09T07:59:25
| 2012-04-01T16:57:16
|
R
|
UTF-8
|
R
| false
| false
| 3,081
|
rd
|
Function.Rd
|
\name{Function}
\alias{Function}
\alias{Routine}
\alias{Function-class}
\alias{names<-,Function,character-method}
\alias{names<-,ParameterList,character-method}
\alias{names,ParameterList-method}
\alias{names,Function-method}
\alias{[,Function,numeric,missing-method}
\alias{[[,Function,character,ANY-method}
\alias{coerce,Function,Module-method}
\alias{getParameters}
\alias{$,Function-method}
\alias{coerce,Function,function-method}
\alias{getFunctionArgs}
\alias{setFuncAttributes}
\alias{getFunctionReturnType}
\alias{setParamAttributes}
\alias{makeRFunction}
\alias{getFunctionAddress}
\alias{getFuncAttributes}
\alias{getBlocks}
\alias{getBlocks,Function-method}
\alias{getModule,Function-method}
\alias{coerce,Function,function-method}
\alias{[[,Function,numeric,ANY-method}
\alias{getParent,Function-method}
\alias{getParent,Argument-method}
\alias{getParameters,Function-method}
\alias{getParameters,FunctionType-method}
\alias{LLVMAttributes}
\alias{isVarArg}
\alias{getContext,Function-method}
\alias{coerce,Function,character-method}
\alias{coerce,Function,character-method}
\alias{getReturnType,FunctionType-method}
\alias{getInstructions,Function-method}
\alias{canReturnTwice}
\alias{doesNoCfCheck}
\alias{getEntryBlock}
\title{Create an LLVM function/routine}
\description{
This function and class creates a template
Function object which we can use to build
a native, compiled function.
This is not to be confused with the R reserved
word \code{function}.
}
\usage{
Function(name, retType, paramTypes = list(), module = Module(), varArgs = FALSE, ...)
}
\arguments{
\item{name}{a string giving the name of the function}
\item{retType}{the return type of the new function. This should be of
class \code{\link{Type-class}}.}
\item{paramTypes}{a list giving the types of the parameters of the
function.
These can be named or not.}
\item{module}{the module in which the function is to be defined}
\item{varArgs}{a logical value that indicates whether the routine
has variadic arguments, i.e. passed to it via \dots (not the \dots
in this function)}
\item{\dots}{additional inputs for this call (not for calling the
routine)}
% \item{x}{the function object whose parent module we want to retrieve.}
}
\value{
An object of class \code{Function}
}
\references{
LLVM Documentation \url{http://llvm.org/docs/}
}
\author{
Duncan Temple Lang
}
\seealso{
\code{\link{simpleFunction}},
\code{\link{Block}},
\code{\link{IRBuilder}}
}
\examples{
# This shows how we can use LLVM to call an existing routine.
m = Module()
ee = ExecutionEngine(m)
Rf_PrintValue = declareFunction(list(VoidType, SEXPType), "Rf_PrintValue", m)
#llvmAddSymbol("Rf_PrintValue")
.llvm(Rf_PrintValue, 1:10)
m = Module()
f = Function("f", Int32Type, module = m)
ir = IRBuilder(f)
ir$createReturn(ir$createConstant(3L))
showModule(m)
.llvm(f)
f = system.file("IR/fib.ir", package = 'Rllvm')
m = parseIR(f)
a1 = getFuncAttributes(m$fib)
a2 = getFuncAttributes(m$fib, FALSE)
names(a2)
names(a2)[a2]
}
\keyword{programming}
|
d89d10c2b9d484af9eb3194b5db4e40bd47461c4
|
11b609da66e0f4034f887ea0201ca29687b85531
|
/Starter Kit/scripts/scoping.R
|
66140ebd2f4f99e907255fecce4bbfcfdb77bd7b
|
[] |
no_license
|
petarnikolovski/Introduction_to_R
|
1c81228499d76268f8d02710f2ff40bba2270a79
|
29a054939fd83eb0fc52506e88d5ed5848f90724
|
refs/heads/master
| 2021-08-12T07:03:57.788839
| 2015-10-22T14:33:16
| 2015-10-22T14:33:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 740
|
r
|
scoping.R
|
# Scoping Rules
# The search list for R environment can be found by using:
search()
# An example of lexical scoping:
z <- 27
myFun <- function(x, y) {
(x * y) / z
}
myFun(3, 9)
# Lexical scoping can be understood best by nested functions
# If a function is nested, then its parent environment is the
# function in which it is nested
customRoot <- function(n) {
root <- function(x) {
x ^ (1/n)
}
root # return value of root function
}
squareRoot <- customRoot(2)
fiveRoot <- customRoot(5)
squareRoot(25)
fiveRoot(25)
# Lexical Scoping vs. Dynamic Scoping
env <- 27
f <- function(num) {
env <- 2
env ^ 2 + g(num)
}
g <- function(num) {
num * env
}
# Can you predict value of f(2) before hitting ctrl + enter?
f(2)
|
3946aeb9f6015f1e8e583ccdfc3fa1793a2e43d8
|
f75388ed157784a05bdd1c1ba3eed739a8c9fa25
|
/tests/testthat/test-sentenceTokenParse.R
|
29e7fecad56a5094b0affb0c14d5757f04602b8c
|
[] |
no_license
|
tbwhite2/lexRankr
|
9675fa427f72060327b5efd72c2897d2478a41ec
|
42e4c1576f67bd660b077726bcc2d3de769b9ebd
|
refs/heads/master
| 2020-12-30T16:27:41.905598
| 2017-05-11T14:30:23
| 2017-05-11T14:30:23
| 90,988,457
| 0
| 0
| null | 2017-05-11T14:27:54
| 2017-05-11T14:27:54
| null |
UTF-8
|
R
| false
| false
| 1,529
|
r
|
test-sentenceTokenParse.R
|
context("sentenceTokenParse")
# test output classes ----------------------------------------
test_that("object class and structure check", {
testDocs <- c("12345", "Testing 1, 2, 3.", "Is everything working as expected Mr. Wickham?")
testResult <- sentenceTokenParse(testDocs)
expect_equal(class(testResult), "list")
expect_equal(unique(vapply(testResult, class, character(1))), "data.frame")
expect_equal(names(testResult$tokens), c("docId","sentenceId","token"))
expect_true(is.numeric(testResult$tokens$docId))
expect_true(is.character(testResult$tokens$sentenceId))
expect_true(is.character(testResult$tokens$sentence))
})
# test output value -------------------------------------------
test_that("All clean options TRUE", {
testDocs <- c("Testing 1, 2, 3.", "Is everything working as expected Mr. Wickham?")
testResult <- sentenceTokenParse(testDocs,
docId = "create",
removePunc=TRUE,
removeNum=TRUE,
toLower=TRUE,
stemWords=TRUE,
rmStopWords=TRUE)
expectedResultSentences <- sentenceParse(testDocs)
expectedResultTokens <- lexRankr::tokenize(testDocs) %>%
unlist() %>%
.[which(!is.na(.))]
expect_equal(testResult$sentences, expectedResultSentences)
expect_equal(testResult$tokens$token, expectedResultTokens)
expect_equal(class(testResult), "list")
})
|
8b3ce967766f22f31065ad8b995dec933cf5338e
|
646d95fd691c086c13c4dc968990cddc971d87c6
|
/man/rot.Rd
|
e109402bca0cb47ce5dc41be7ae289130b79dedf
|
[] |
no_license
|
AlunHewinson/geocacheR
|
5ec85b1e9c38b3e72e5e2d0f0e64e959d8dd5989
|
b5599e8449475a33f62a66e803e1caa98506ba63
|
refs/heads/master
| 2021-05-02T02:00:01.625359
| 2020-02-16T18:25:17
| 2020-02-16T18:25:17
| 230,493,492
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 664
|
rd
|
rot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/text_manipulation.R
\name{rot}
\alias{rot}
\title{Caesar-shift a string by a given number of letters.}
\usage{
rot(x, n = 13, alphabet = standard_alphabet, showWarn = TRUE)
}
\arguments{
\item{x}{A string.}
\item{n}{A number of letters to shift the string by.}
\item{alphabet}{A list containing lower and upper case alphabets}
\item{showWarn}{boolean. Do you want to see warnings about alphabets?}
}
\value{
A string
}
\description{
Caesar-shift a string by a given number of letters.
}
\examples{
rot("abc")
rot("abc", n=2)
rot("abc", n=5, list(lw=letters[1:7], up=LETTERS[1:7]))
}
|
026d5ce3959a653e60cd1405271300fe1b5e8bf2
|
8f501777660f04ddadf06400074bc6b412c90fb9
|
/IsoriX/man/relevate.Rd
|
56eb7e9585fa9e99297c395c3181a9713cd0f18c
|
[] |
no_license
|
PhDMeiwp/IsoriX_project
|
db0e323fd2822a98cf16c4708fc9ef31df85b9f8
|
14510f948a3497a99554e80d563a9131d40550c0
|
refs/heads/master
| 2020-03-09T18:24:08.128374
| 2017-08-14T08:23:56
| 2017-08-14T08:23:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 6,719
|
rd
|
relevate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/relevate.R
\name{relevate}
\alias{relevate}
\title{Prepare the elevation raster}
\usage{
relevate(elevation.raster, isofit = NULL, margin_pct = 5,
aggregation.factor = 0L, aggregation.fun = mean, manual.crop = NULL,
verbose = interactive())
}
\arguments{
\item{elevation.raster}{The elevation raster (\var{RasterLayer})}
\item{isofit}{The fitted isoscape model returned by the function
\code{\link{isofit}}}
\item{margin_pct}{The percentage representing by how much the space should
extend outside the range of the coordinates of the weather stations
(default = 5).}
\item{aggregation.factor}{The number of neighbouring cells (\var{integer})
to merge during aggregation}
\item{aggregation.fun}{The \var{function} used to aggregate cells}
\item{manual.crop}{A vector of four coordinates (\var{numeric}) for manual
cropping, e.g. the spatial extent}
\item{verbose}{A \var{logical} indicating whether information about the
progress of the procedure should be displayed or not while the function is
running. By default verbose is \var{TRUE} if users use an interactive R
session, and \var{FALSE} otherwise.}
}
\value{
The fine-tuned elevation raster of class \var{RasterLayer}
}
\description{
This function prepares the elevation raster for the follow-up analyses. The
size and extent of the elevation raster defines the resolution at which the
isoscape and the origin assignment are defined.
}
\details{
This functions allows the user to crop an elevation raster according to
either the extent of the isoscape or manually. If a fitted isoscape object
is provided (see \code{\link{isofit}}), the function extracts the observed
locations of isotopic sources from the model object and crops the elevation
raster accordingly. Alternatively, \code{manual.crop} allows you to crop the
elevation raster to a desired extent. If no model and no coordinates for
manual cropping are provided, no crop will be performed. Importantly,
cropping is recommended as it prevents extrapolations outside the
latitude/longitude range of the source data. Predicting outside the range of
the source data may lead to highly unreliable predictions.
Aggregation changes the spatial resolution of the raster, making computation
faster and using less memory (this can affect the assignment; see note
below). An aggregation factor of zero (or one) keeps the resolution constant
(default).
This function relies on calls to the functions
\code{\link[raster]{aggregate}} and \code{\link[raster]{crop}} from the
package \pkg{\link[raster]{raster}}. It thus share the limitations of these
functions. In particular, \code{\link[raster]{crop}} expects extents with
increasing longitudes and latitudes. We have tried to partially relax this
constrains for longitude and you can use the argument \code{manual.crop} to
provide longitudes in decreasing order, which is useful to center a isoscape
around the pacific for instance. But this fix does not solve all the
limitations as plotting polygons or points on top of that remains problematic
(see example bellow). We will work on this on the future but we have other
priorities for now (let us know if you really need this feature).
}
\note{
Aggregating the raster may lead to different results for the
assignment, because the elevation of raster cells changes depending on the
aggregation function (see example below), which in turn affects model
predictions.
}
\examples{
## The examples below will only be run if sufficient time is allowed
## You can change that by typing e.g. IsoriX.options(example_maxtime = XX)
## if you want to allow for examples taking up to ca. XX seconds to run
## (so don't write XX but put a number instead!)
if(IsoriX.getOption("example_maxtime") > 30) {
## We fit the models for Germany
GNIPDataDEagg <- prepdata(data = GNIPDataDE)
GermanFit <- isofit(iso.data = GNIPDataDEagg,
mean.model.fix = list(elev = TRUE, lat.abs = TRUE))
### Let's explore the difference between aggregation schemes
## We aggregate and crop using different settings
elevation.raster1 <- relevate(
elevation.raster = ElevRasterDE,
isofit = GermanFit,
margin_pct = 0,
aggregation.factor = 0)
elevation.raster2 <- relevate(
elevation.raster = ElevRasterDE,
isofit = GermanFit,
margin_pct = 5,
aggregation.factor = 5)
elevation.raster3 <- relevate(
elevation.raster = ElevRasterDE,
isofit = GermanFit,
margin_pct = 10,
aggregation.factor = 5, aggregation.fun = max)
## We build the plots of the outcome of the 3 different aggregation schemes
if(require(rasterVis)) {
plot.aggregation1 <- levelplot(elevation.raster1,
margin = FALSE, main = "Original small raster") +
layer(sp.polygons(CountryBorders)) +
layer(sp.polygons(OceanMask, fill = "blue"))
plot.aggregation2 <- levelplot(elevation.raster2,
margin = FALSE, main = "Small raster aggregated (by mean)") +
layer(sp.polygons(CountryBorders)) +
layer(sp.polygons(OceanMask, fill = "blue"))
plot.aggregation3 <- levelplot(elevation.raster3,
margin = FALSE, main = "Small raster aggregated (by max)") +
layer(sp.polygons(CountryBorders)) +
layer(sp.polygons(OceanMask, fill = "blue"))
## We plot as a panel using lattice syntax:
print(plot.aggregation1, split = c(1, 1, 1, 3), more = TRUE)
print(plot.aggregation2, split = c(1, 2, 1, 3), more = TRUE)
print(plot.aggregation3, split = c(1, 3, 1, 3))
}
}
#' ## The examples below will only be run if sufficient time is allowed
## You can change that by typing e.g. IsoriX.options(example_maxtime = XX)
## if you want to allow for examples taking up to ca. XX seconds to run
## (so don't write XX but put a number instead!)
if(IsoriX.getOption("example_maxtime") > 10) {
### Let's create a raster centered around the pacific
## We first create an empty raster
empty.raster <- raster(matrix(0, ncol = 360, nrow = 180))
extent(empty.raster) <- c(-180, 180, -90, 90)
projection(empty.raster) <- CRS("+proj=longlat +datum=WGS84")
## We crop it around the pacific
pacificA <- relevate(empty.raster, manual.crop = c(110, -70, -90, 90))
extent(pacificA) # note that the extent has changed!
## We plot (note the use of the function shift()!)
if(require(rasterVis)) {
levelplot(pacificA, margin = FALSE, colorkey = FALSE, col = "blue")+
layer(sp.polygons(CountryBorders, fill = "black"))+
layer(sp.polygons(shift(CountryBorders, x = 360), fill = "black"))
}
}
}
\seealso{
\code{\link{ElevRasterDE}} for information on elevation rasters
\code{\link{IsoriX}} for the complete workflow
}
\keyword{utilities}
|
c02c18cfeb99a642049add011b4dbdb31eb14be9
|
1056a8291e402622e3aad540fc9416c2fe652886
|
/FunctionsTwitterApi.R
|
4ada5e28720f621f92237797c5ce4065695b805f
|
[
"MIT"
] |
permissive
|
M3SOulu/TrendMining
|
61e46e2b16f2af4e59ad9f6acf17df3ced0e6b52
|
26035780f358a485b999268e9967d72b08ece1f4
|
refs/heads/master
| 2023-04-28T14:04:35.468283
| 2023-03-10T11:55:10
| 2023-03-10T11:55:10
| 91,310,593
| 12
| 23
|
MIT
| 2023-04-14T17:23:27
| 2017-05-15T07:58:26
|
R
|
UTF-8
|
R
| false
| false
| 1,712
|
r
|
FunctionsTwitterApi.R
|
#install.packages("devtools", dependencies = TRUE)
#install.packages("rJava", depdendencies = TRUE)
library(devtools)
library(rJava)
get_twitter_data <- function (query_string, maxtweets=100){
#Start the JVM
.jinit('.')
.jaddClassPath(getoldtweets_path)
#For selecting a date range
#from_date = "2010-01-01"
#to_date = "2017-07-31"
old_wd <- getwd()
#Set the directory for creating the output file
setwd(getoldtweets_path)
command = "java -jar got.jar querysearch="
command = paste(command, query_string, sep = "", collapse = '')
#For a date range
#command = paste(command, " since=", sep = "", collapse = '')
#command = paste(command, from_date, sep = "", collapse = '')
#command = paste(command, " until=", sep = "", collapse = '')
#command = paste(command, to_date, sep = "", collapse = '')
#For testing purposes, only
command = paste(command, " maxtweets=", maxtweets, sep = "", collapse = '')
system(command)
#Get the data
csv_file = paste(getoldtweets_path, "/output_got.csv", sep = '', collapse = '')
my_data = read.csv(csv_file, sep=";", header=TRUE, quote="")
return_data_frame = data.frame()
#reset the old working directory
setwd(old_wd)
for (tweet in 1:nrow(my_data)){
temp <- data.frame(
AuthorName = my_data$username[tweet],
Title = my_data$text[tweet],
Date = my_data$date[tweet],
Cites = my_data$retweets[tweet],
Abstract = my_data$hashtags[tweet],
Id = my_data$id[tweet]
)
return_data_frame <- rbind(return_data_frame, temp)
}
return_data_frame
}
|
a229fc6714fe600d52efc113bf1ba5021af90663
|
2b37e34406d54afb3e714ba651358f4e9bb7430d
|
/R/representative_curves.R
|
95cb95619922da1014f9f24f5aa24a075d2cb7d2
|
[] |
no_license
|
jpmeagher/sdsBAT
|
68c3bacffb9fe5c680c7cd3de9acc49ed946c4f1
|
257a28dbc707155f35cd899799184ffff1948198
|
refs/heads/master
| 2021-01-20T03:14:53.593120
| 2017-05-15T20:47:55
| 2017-05-15T20:47:55
| 89,512,838
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,259
|
r
|
representative_curves.R
|
#' Representative spectral curves
#'
#' @export
species_mean <- function(df = smooth_and_regularise_call_densities(), ignored_columns = c(1,2), vector_length = length(df$chirp1[[1]])){
individual_representation <- individual_means(df = df, ignored_columns = ignored_columns, vector_length = vector_length)
species_representation <- tapply(individual_representation, unlist(df$species), list_vector_mean)
return(species_representation)
}
individual_means <- function(df = smooth_and_regularise_call_densities(), ignored_columns = c(1,2), vector_length = length(df$chirp1[[1]])){
considered_cells <- df[-ignored_columns]
individual_means <- apply(considered_cells, 1, unlist)
individual_means <- lapply(individual_means, na.omit)
individual_means <- sapply(individual_means, call_mean, vector_length = vector_length)
return(individual_means)
}
call_mean <- function(observations, vector_length = 208){
observations <- matrix(observations, nrow = vector_length)
call_mean <- apply(observations, 1, mean)
return(list(call_mean))
}
list_vector_mean <- function(some_list, vector_length = 208){
list_as_matrix <- matrix(unlist(some_list), nrow = vector_length)
mean_vector <- apply(list_as_matrix, 1, mean)
return(mean_vector)
}
|
d697be1512367f9a5d47b45950def345d9788559
|
885080787fa0f6300760f2bcc406f0eea3feef64
|
/R/rscript/Main.R
|
435adc0755cd56e437cb852e4edbd3bc692f2685
|
[] |
no_license
|
ming19871211/docker-tools
|
2a9de2aab2ad612556dfea0549f3b68dd9e3f4e1
|
f045804df72c3c001852691e577112cee19753d7
|
refs/heads/master
| 2021-12-14T21:24:57.924974
| 2021-12-14T09:30:48
| 2021-12-14T09:30:48
| 172,426,617
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,312
|
r
|
Main.R
|
###单个维度的查询方法####
dataQueriesSingleDim <- function(df,dimension,dimkey,doWhere,myorder,seriesDim=c(),runType,mergeShow){
#创建一个空的list用于保存结果集
resList <- list()
#对原始数据过滤
df <- oriFilter(df,dimension,doWhere=doWhere)
if (nrow(df) == 0){
return(resList)
}
if(length(seriesDim) > 0 ){
#拼接组合系列条件的字符串
sers <- c(paste("paste(",seriesDim[1]),seriesDim[-1],"sep = ',')")
sers <- paste(sers,collapse = ",")
#添加compaName列
df$compaName <- with(df,eval(parse(text = sers)))
dimension <- c(dimension,seriesDim,'compaName')
dimkey <- c(dimkey,seriesDim,'compaName')
}
#得到维度的成员列表
resMember <- unique(df[,c(dimension)])
if(length(dimension) == 1)
resMember <- data.frame(resMember)
#删除维度成员中的空值,并将其转换成数据框
resdf <- as.data.frame(resMember[complete.cases(resMember),])
#给数据框加上列标题
names(resdf) <- c(dimension)
#合并计算
resdf <- mergeShowDim(df,resdf,mergeShow)
#对数据框进行排序
resdf <- sortDataFrame(myorder,resdf)
#如果没有排序列,则进行默认排序,排序完成之后返回的是一个向量
if(!missing(myorder)){
resdf[,1] <- as.character(resdf[,1])
}
resdf <- resFilter(resdf,doWhere=doWhere)
names(resdf) <- c(dimkey,tail(names(resdf),-length(dimkey)))
#如果为设计模式且系列维度不为空,则返回范例数据
if((missing(runType) || runType == 2) && length(seriesDim) > 0){
#根据原始数据的第一行的系列维度值过滤,拼接过滤条件
resdf <- resdf[resdf$compaName == resdf[1,'compaName'],]
}
resList[[1]] <- resdf
return(resList)
}
###单个指标的查询方法####
dataQueriesMetrics <- function(df,measureList,doWhere,myorder,mergeShow){
#创建一个空的list用于保存结果集
l <- list()
#对原始数据过滤
dffilter <- oriFilter(df,doWhere=doWhere)
#遍历度量列表,分别对度量进行运算
for (ele in measureList) {
#如果度量的公式类型为计算公式
if(ele["metricsType"][[1]] != 3){
measure <- ele["measureList"][[1]]
res <- onlyMetrics(dffilter,measure)
columnName <- measure["measure"][[1]]
#dataColumnName = paste(columnName,measure["exprName"][[1]],sep = "_")
#dataColumnName = measureColumnNames(ele)
dataColumnName = ele["resultColumn"][[1]]
}else{
res <- noDimenstionFormula(dffilter,ele)
dataColumnName <- ele["resultColumn"][[1]]
}
l[dataColumnName] <- res
}
resdf <- as.data.frame(l)
#合并计算
resdf <- mergeShowDim(df,resdf,mergeShow)
resList <- list()
resdf <- sortDataFrame(myorder,resdf)
resdf <- resFilter(resdf,doWhere=doWhere)
#用指标的key值替换数据集相应的列名
#resdf <- replaceMeasureKey(resdf,measureList)
#对结果集过滤
resList[[1]] <- resdf
return(resList)
}
###多维度多指标查询方法####
# 参数列表
# df:操作数据框
# measureList:度量列表,包括度量名称以及公式
# dimGropList:查询的维度列表,如果为并列分析dimGropList的长度将大于>=2
# doWhere:过滤条件,过滤条件分为原始数据过滤和结果集过滤 list(oriFilter=list(),resFilter=list())
# oriFilter: 原始数据过滤 list(measureList=list(),noagRe="",aggRel="")
# resFilter: 结果集过滤 list(measureList=list(),relation="")
# myorder: 排序列
# tableCalculaList:表计算说明
dataQueries <- function(df,measureList,dimGropList,dimkeyList,doWhere,myorder,tableCalculaList,
queryTotalObj,comparison,seriesDim=c(),runType,mergeShow){
#创建一个空的list用于保存结果集
resList <- list()
#维度key的长度
keyslength <- length(dimkeyList)
#遍历维度list,实会是一个维度组,一组维度构成list中的一个元素
i <- 1
for (dimension in rev(dimGropList)) {
#对于在dimension中存在系列维度,
#需要在统计运算时排除系列的维度,在计算结束后在添加该列,使计算速度更快
#系列或对比中剩余的维度
surplusDim <- setdiff(seriesDim,dimension)
#对原始数据过滤
df <- oriFilter(df,dimension,doWhere)
if (nrow(df) == 0){
return(resList)
}
#对一组维度进行查询
resdf <- singleGroupQueries(df,measureList,c(dimension,surplusDim))
#合并计算
resdf <- mergeShowDim(df,resdf,mergeShow)
#对数据进行排序
resdf <- sortDataFrame(myorder,resdf)
#对结果集过滤
resdf <- resFilter(resdf,dimension,doWhere)
#用指标的key值替换数据集相应的列名
# resdf <- replaceMeasureKey(resdf,measureList)
#如果存在系列条件,则在结果集上添加compaName列,值为系列条件以逗号间隔的组合
#同时去掉系列维度,在做完表计算与合计后将compaName列以逗号拆分为系列维度
seriesDimRe <- c()
if(length(seriesDim) > 0 ){
#去掉维度成员两端的空格
# library(stringr)
# for(dim in seriesDim)
# df[,dim] <- str_trim(df[,dim],"both")
#拼接组合系列条件的字符串
sers <- c(paste("paste(",seriesDim[1]),seriesDim[-1],"sep = ',')")
sers <- paste(sers,collapse = ",")
#添加compaName列
resdf$compaName <- with(resdf,eval(parse(text = sers)))
#同时在原始数据中添加compaName列
df$compaName <- with(df,eval(parse(text = sers)))
#将系列维度添加到数据框中,如果数据框中原本存在该列,则再次添加会改变系列的列名,在后续需要重新命名
resdf <- resdf[,c(setdiff(names(resdf),surplusDim),seriesDim)]
colNameLen <- length(names(resdf))
seriesDimRe <- names(resdf)[(colNameLen-length(seriesDim)+1):colNameLen]
}
# resdf <- transform(resdf,column504991=edurank(bbsj_xscj_YW_avg,'asc',bbsj_xscj_Q))
#表计算与合计
if(!missing(tableCalculaList) || !missing(queryTotalObj))
resdf <- tableCompute_SubTotal(resdf,df,seriesDimRe,dimension,measureList,i,queryTotalObj,tableCalculaList,mergeShow)
#用维度的key值替换数据集相应的列名
key <- NULL
if(length(dimension) > 0){
colname <- names(resdf)
key <- dimkeyList[[keyslength-i+1]]
names(resdf) <- c(key,tail(colname,-length(key)))
}
#重新命名系列列名
if(length(seriesDim) > 0){
# library(splitstackshape)
# colNa <- names(resdf)
# resdf <- as.data.frame(cSplit(resdf,"compaName", ",",drop = FALSE))
# names(resdf) <- c(colNa,seriesDim)
colNa <- names(resdf)
for (serIndex in 1:length(seriesDimRe)) {
names(resdf)[which(seriesDimRe[serIndex]==colNa)] <- seriesDim[serIndex]
}
}
#做对比运算
if(!missing(comparison))
resList <- compaFun(resdf,df,dimension,key,seriesDim,measureList,comparison)
else
resList[[i]] <- resdf
i <- i + 1
}
#如果为设计模式且系列维度不为空,则返回范例数据
if((missing(runType) || runType == 2) && length(seriesDim) > 0){
#如果存在对比对象,则结果集列表中只过滤第一个数据集
if(!missing(comparison))
index <- 1
else
index <- length(resList)
#根据原始数据的第一行的系列维度值过滤,拼接过滤条件
filterValue <- list()
for(seri in seriesDim){
filterValue[[length(filterValue)+1]] <- paste(seri," == '",resList[[1]][1,seri],"'",sep = "")
}
filterValue <- paste(filterValue,collapse = " & ")
#进行过滤
for(j in c(1:index)){
filterDa <- resList[[j]]
isArr <- with(filterDa,eval(parse(text = filterValue)))
isArr[is.na(isArr)] <- TRUE
resList[[j]] <- filterDa[isArr,]
}
}
#如果分组维度只有一组,则返回计算结果
if (i == 2)
return(resList)
#并列计算合并数据
library(plyr)
resda <- rbind.fill(resList)
for(dim in rev(Reduce(union,rev(dimkeyList)))){
resda[,dim] <- as.character(resda[,dim])
resda <- resda[order(resda[,dim],na.last=FALSE),]
}
resda <- sortStratifie(resda,myorder,dimGropList,dimkeyList)
#返回果集
return(list(resda))
}
###用指标的key值替换数据集相应的列名####
replaceMeasureKey <- function(resdf, measureList) {
if(length(measureList) > 0){
colnames <- names(resdf)
for(j in 1:length(measureList)){
measure <- measureList[[j]]
colName <- measure["measure"][[1]]
key <- measureList[[j]]["resultColumn"][[1]]
for(k in 1:length(colnames)){
if (colnames[k] == colName){
colnames[k] <- key
break
}
}
}
names(resdf) <- colnames
}
return(resdf)
}
###记录数查询方法####
dataCount <- function(df,doWhere){
#对始数据框进行过滤得到新的数据框
dffilter <- oriFilter(df,doWhere=doWhere)
dffilter <- resFilter(dffilter,doWhere=doWhere)
totalCount <- nrow(dffilter)
return(totalCount)
}
###明细查询方法####
dataDetailCount <- function(df,column,keys,pageObject,doWhere,myorder,seriesColumn=c(),runType){
#对始数据框进行过滤得到新的数据框
dffilter <- oriFilter(df,doWhere=doWhere)
dffilter <- resFilter(dffilter,doWhere=doWhere)
#对数据进行排序
dffilter <- sortDataFrame(myorder,dffilter)
#如果为设计模式且系列维度不为空,则返回范例数据
if((missing(runType) || runType == 2) && length(seriesColumn) > 0){
#根据原始数据的第一行的系列维度值过滤,拼接过滤条件
filterValue <- list()
for(seri in seriesColumn){
filterValue[[length(filterValue)+1]] <- paste(seri," == '",dffilter[1,seri],"'",sep = "")
}
filterValue <- paste(filterValue,collapse = " & ")
#进行过滤
isArr <- with(dffilter,eval(parse(text = filterValue)))
isArr[is.na(isArr)] <- FALSE
dffilter <- dffilter[isArr,]
}
totalCount <- nrow(dffilter)
return(totalCount)
}
###明细查询方法####
dataDetail <- function(df,column,keys,pageObject,doWhere,myorder,seriesColumn=c(),runType){
#对始数据框进行过滤得到新的数据框
dffilter <- oriFilter(df,doWhere=doWhere)
dffilter <- resFilter(dffilter,doWhere=doWhere)
#对数据进行排序
resdf <- sortDataFrame(myorder,dffilter)
#如果为设计模式且系列维度不为空,则返回范例数据
if((missing(runType) || runType == 2) && length(seriesColumn) > 0){
#根据原始数据的第一行的系列维度值过滤,拼接过滤条件
filterValue <- list()
for(seri in seriesColumn){
filterValue[[length(filterValue)+1]] <- paste(seri," == '",resdf[1,seri],"'",sep = "")
}
filterValue <- paste(filterValue,collapse = " & ")
#进行过滤
isArr <- with(resdf,eval(parse(text = filterValue)))
isArr[is.na(isArr)] <- FALSE
resdf <- resdf[isArr,]
}
totalCount <- nrow(resdf)
#确定分页信息
page <- pageObject["page"][[1]]
pageSize <- pageObject["pageSize"][[1]]
endIndex <- page*pageSize
if(endIndex > totalCount)
endIndex = totalCount
startIndex <- (page - 1)*pageSize + 1
if(length(seriesColumn > 0)){
column <- c(column,seriesColumn)
keys <- c(keys,seriesColumn)
}
resdf <- resdf[c(startIndex:endIndex),column]
if(length(column) == 1){
resdf <- as.data.frame(resdf)
#给数据框加上列标题
names(resdf)[1] <- column
}
names(resdf) <- keys
return(resdf)
}
###添加维度范围分组####
addGroup <- function(sdf,dimensition,groupObject){
#如果维度只有一个且这个维度的成员为字符型数字
spLi <- list()
if(length(dimensition) == 1 && !is.na(suppressWarnings(as.numeric(as.character(sdf[1,dimensition]))))){
#在这些维度成员前添加#
sdf[,dimensition] <- paste("#",sdf[,dimensition],sep = "")
spLi[length(spLi)+1] <- dimensition
groupCol <- dimensition
}else if(length(dimensition) > 1){
#如果维度个数大于1,则将这些维度合并为groupCol列
sers <- c(paste("paste(",dimensition[1]),dimensition[-1],"sep = '`%r%`')")
sers <- paste(sers,collapse = ",")
#添加compaName列
sdf$groupCol <- with(sdf,eval(parse(text = sers)))
groupCol <- "groupCol"
}else{
groupCol <- dimensition
}
dflist <- split(sdf,sdf[,groupCol])
dflist <- lapply(dflist, function(x){
if(!is.null(groupObject[["point"]]))
eval(parse(text = groupObject[["point"]]))
eval(parse(text = groupObject[["newCol"]]))
})
sdf <- unsplit(dflist,f = sdf[,groupCol])
#将添加#的列去除#
if(length(spLi) > 0){
spdim <- spLi[[1]]
sdf[,spdim] <- as.character(sdf[,spdim])
sdf[,spdim] <- vapply(strsplit(sdf[,spdim],"#"),function(x){x[2]},FUN.VALUE = c(""))
}
if(nrow(sdf) != nrow(sdf))
sdf <- merge.data.frame(sdf,sdf,all = T)
return(sdf)
}
#对比对象####
# df 运算后的数据
# dfOrig 原始数据
# measureList 度量集合
# dimGropList 维度集合
# compaObject 对比对象
# compaName 是 String 对比对象名称
# compaType 是 String 对比类型:1—对比维度;2—对比维度成员;3—对比指标
# dimension 是 String 维度列名
# filter 否 String 维度过滤
# compaMeasure 否 Object 对比指标对象
# resType 是 String 结果集类型:1、join;2、nojoin
compaFun <- function(df,dfOrig,dimension,dimkey,seriesDim,measureList,compaObject){
#获得对比对象中的维度
compaDim <- c()
for(com in compaObject)
compaDim <- c(compaDim,com[['dimension']])
#制作一个大的数据集,包含行列的维度、系列维度、对比维度
ser_comDim <- unique(c(seriesDim,compaDim))
temDf <- unique(dfOrig[,c(dimension,ser_comDim)])
if(!is.data.frame(temDf))
temDf <- data.frame(temDf)
names(temDf) <- c(dimkey,ser_comDim)
#在系列的结果集中添加空的对比对象维度列
dfCol <- names(df)
for(dim in compaDim){
if(!dim %in% dfCol)
df[,dim] <- NA
}
library(plyr)
#需要join的集合
compaDas_join <- list(df)
#不要join的集合
compaDas_nojo <- list()
#获得度量列表中的列名
measureNames <- vapply(measureList,function(x){x[['resultColumn']]},c(""))
for(compa in compaObject){
#存在过滤
if(length(compa[['filter']]) != 0)
with(dfOrig,{dffilter <<- dfOrig[eval(parse(text = compa[['filter']])),]})
else
dffilter <- dfOrig
#替换度量列表中的度量公式
if(length(measureList) > 0 && length(compa[['compaMeasure']]) > 0){
for(i in 1:length(measureList)){
if(measureList[[i]][['metricsType']] %in% c(1,2)){
compa[['compaMeasure']][['measure']] <- measureList[[i]][['measure']]
measureList[[i]][['measureList']] <- compa[['compaMeasure']]
}
}
}
#做统计运算
if(length(c(compa[['dimension']],dimension)) != 0){
da <- singleGroupQueries(dffilter,measureList,c(dimension,compa[['dimension']]))
if(length(dimension) > 0)
names(da) <- c(dimkey,compa[['dimension']],tail(names(da),-length(c(dimension,compa[['dimension']]))))
}else{
da <- dataQueriesMetrics(dffilter,measureList)[[1]]
}
#判断是否join数据集
if(compa[['resType']] == 1){
da <- join(temDf,da,by=c(dimkey,compa[['dimension']]),match="first")
#添加标记列,用对比对象的名称标记
da[,'compaName'] <- rep(compa[['compaName']],nrow(da))
compaDas_join[[length(compaDas_join)+1]] <- da
}else{
#添加标记列,用对比对象的名称标记
da[,'compaName'] <- rep(compa[['compaName']],nrow(da))
compaDas_nojo[[length(compaDas_nojo)+1]] <- da
}
}
#join数据集
compaDas_join <- rbind.fill(compaDas_join)
#将join的数据集与不用join的数据集放到一个list中
union(list(compaDas_join),compaDas_nojo)
}
#判断字符型数字是否需要添加一个
|
dd04368b8fbe6476b7b3250b8bb1ed99aa1ee074
|
74cf495f1758e3e791395694a9eba2b7c08ed531
|
/GEUVADIS.11-17-15/ui.R
|
2d666e2a58925f192048b15d819555f9128c4600
|
[] |
no_license
|
DrOppenheimer/Shiny_fun
|
c423dfaa36fa401fa82c7e7114c76fba0d8133c1
|
97e7e49295e52fd4c25394031840d9baa1f4c87d
|
refs/heads/master
| 2021-01-18T23:10:24.963976
| 2016-06-03T21:42:03
| 2016-06-03T21:42:03
| 46,286,375
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 926
|
r
|
ui.R
|
library(shiny)
library(shinyRGL)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Expression Profiles for a Subset of GEUVADIS Samples"),
# Sidebar with a slider input for the number of bins
sidebarLayout(position="right",
sidebarPanel(
selectInput("colorby", "Color By:",
choices=c("Population" = "Population",
"Performer" = "Performer"
)),
plotOutput("distPlot")
),
# Show a plot of the generated distribution
mainPanel(
# webGLOutput("myWebGL", width=800, height=800)
webGLOutput("myWebGL", width=1200, height=1200)
)
)
))
|
b955628d0d8911958168796aef4c456afcb5a0c4
|
1be1c1a52dcdc8b63e19b430f4fba64f8403b7b8
|
/man/cophylogeny.Rd
|
7358e8368a8ebf4f795c03cf491c8797971ef3ff
|
[] |
no_license
|
JanEngelstaedter/cophy
|
5af999cd1aed5261a1ab5052e1de3348698e146c
|
81853cd5b56502e6f9ab5595606feeb8a37b9bb4
|
refs/heads/master
| 2023-08-16T20:28:45.084296
| 2023-08-14T06:18:45
| 2023-08-14T06:18:45
| 134,203,070
| 3
| 3
| null | 2021-02-18T06:29:43
| 2018-05-21T01:36:30
|
R
|
UTF-8
|
R
| false
| true
| 661
|
rd
|
cophylogeny.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert.R
\name{cophylogeny}
\alias{cophylogeny}
\title{Creates a cophylogeny object}
\usage{
cophylogeny(HP.tree)
}
\arguments{
\item{HP.tree}{a list of a pre-built host phylogenetic tree and a parasite
phylogenetic tree of class 'cophylogeny' or 'data.frame'}
}
\value{
this function returns an object of class 'cophylogeny' which can be
for plotting and printing.
}
\description{
This function creates an object of class 'cophylogeny', which can be passed
to \code{\link{plot.cophylogeny}}. This object must contain at least one
host and one parasite tree.
}
\keyword{internal}
|
f32a9b9a10759458f6982e65b43963b4487526b8
|
d9f588314bfea0c7e066a4e9b19a4e8bbad07153
|
/test.r
|
98e52707c247ce3e62c1f04d2bc3d02adbea00df
|
[] |
no_license
|
karanpanjabi/r-compiler
|
9d4e56271b4bc94849ddeedcddbe590e566073d3
|
9ba2d1a8c35dd07ce2a729cccd6fd299f534fdf3
|
refs/heads/master
| 2022-07-27T07:32:42.661250
| 2020-05-19T15:04:51
| 2020-05-19T15:04:51
| 239,592,210
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 204
|
r
|
test.r
|
# this is a comment
a = 3
b = "hello"
c <- 2.3
if a == 2)
{
a = 3
}
# egd <- 567
print(a)
a <- 3.5
for (i in 1:4) {
print(i)
}
2 + 3*4
(2+3)*4
while(i < 5)
{
a = 3
print(a)
}
|
4d81379dd46be3599f23eec3dc7d03dc4716a491
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/CENFA/R/vulnerability.R
|
81602375bf3b992d8d12d430b69c3af376dc81cd
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,215
|
r
|
vulnerability.R
|
#' Climatic vulnerability
#'
#' Calculates the climatic vulnerability of a species using a \code{cnfa} and
#' \code{departure} object.
#'
#' @param cnfa Object of class \code{cnfa}
#' @param dep Object of class \code{departure}
#' @param method character. What type of mean should be used to combine sensitivity
#' and exposure. Choices are "arithmetic" and "geometric"
#' @param w numeric. Optional vector of length two specifying the relative weights of
#' sensitivity and exposure. See Details
#' @param parallel logical. If \code{TRUE} then multiple cores are utilized
#' @param n numeric. Number of cores to use for calculation
#' @param filename character. Output filename (optional)
#' @param ... Additional arguments for file writing as for \code{\link[raster]{writeRaster}}
#'
#' @details
#' The values of the vulnerability raster are calculated by combining the sensitivity
#' \eqn{\sigma} and the exposure \eqn{\epsilon}. If \code{method = "arithmetic"},
#' they will be combined as
#'
#' \eqn{\nu = (w_1\sigma + w_2\epsilon) / (\sum_i w_i).}
#'
#' If \code{method = "geometric"}, they will be combined as
#'
#' \eqn{\nu = \sqrt(\sigma * \epsilon).}
#'
#' @references
#' Rinnan, D. Scott and Lawler, Joshua. Climate-niche factor analysis: a spatial
#' approach to quantifying species vulnerability to climate change. Ecography (2019):
#' \href{https://doi.org/10.1111/ecog.03937}{doi:10.1111/ecog.03937}.
#'
#' @examples
#' \dontrun{
#' mod1 <- cnfa(x = climdat.hist, s.dat = ABPR, field = "CODE")
#' dep <- departure(x = climdat.hist, y = climdat.fut, s.dat = ABPR)
#' vuln <- vulnerability(cnfa = mod1, dep = dep)
#' }
#'
#' @return Returns an S4 object of class \code{vulnerability} with the following slots:
#' \describe{
#' \item{call}{Original function call}
#' \item{vf}{Vulnerability factor. Vector of length p that describes the amount of
#' vulnerability in each climate variable}
#' \item{vulnerability}{Magnitude of the vulnerability factor}
#' \item{ras}{RasterLayer of climate vulnerability}
#' \item{weights}{Raster layer of weights used for departure calculation}
#' }
#'
#' @seealso \code{\link{departure}}
#'
#' @export
setGeneric("vulnerability", function(cnfa, dep, method = "geometric", w, parallel = FALSE, n = 1, filename = "", ...) {
standardGeneric("vulnerability")
})
#' @rdname vulnerability
setMethod("vulnerability",
signature(cnfa = "cnfa", dep = "departure"),
function(cnfa, dep, method = "geometric", w, parallel = FALSE, n = 1, filename = "", ...) {
call <- sys.call(sys.parent())
call <- match.call(vulnerability, call)
if (missing(w)) w <- c(1, 1)
if (length(w) != 2) {
warning("more than two weights supplied; defaulting to equal weights.")
w <- c(1, 1)
}
d <- dep@df + 1
s <- cnfa@sf
ras <- dep@ras
if (method == "arithmetic") {
v <- (s*w[1] + d*w[2]) / sum(w)
} else if (method == "geometric") {
v <- (s^w[1] * d^w[2])^(1 / sum(w))
}
names(v) <- names(s)
V <- sqrt(mean(v))
filename <- trim(filename)
if (!canProcessInMemory(ras) && filename == '') {
filename <- rasterTmpFile()
}
s.map <- sensitivity_map(cnfa, parallel = parallel, n = n)
e.map <- exposure_map(dep, parallel = parallel, n = n)
#e.map <- e.map + 1
if (method == "arithmetic") {
f1 <- function(x,y) (x*w[1] + y*w[2]) / sum(w)
vuln.ras <- overlay(s.map, e.map, fun = f1, filename = filename, ...)
} else if (method == "geometric") {
if(w[1] == w[2]) {
w <- c(1, 1)
} else {
w <- w / sum(w)
}
f1 <- function(x,y) (x^w[1] * y^w[2])^(1 / sum(w))
vuln.ras <- overlay(s.map, e.map, fun = f1, filename = filename, ...)
}
vuln <- methods::new("vulnerability", call = call, vf = v, vulnerability = V, ras = vuln.ras, weights = dep@weights)
return(vuln)
}
)
|
af33383548a672ef24916101e01ca5d2f14cd5a8
|
2da2406aff1f6318cba7453db555c7ed4d2ea0d3
|
/inst/snippet/nfl-bt01.R
|
05d005a57dd598837bd29d09e615674e0fcec1d4
|
[] |
no_license
|
rpruim/fastR2
|
4efe9742f56fe7fcee0ede1c1ec1203abb312f34
|
d0fe0464ea6a6258b2414e4fcd59166eaf3103f8
|
refs/heads/main
| 2022-05-05T23:24:55.024994
| 2022-03-15T23:06:08
| 2022-03-15T23:06:08
| 3,821,177
| 11
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 201
|
r
|
nfl-bt01.R
|
NFL <- NFL2007 %>% mutate(
dscore = homeScore - visitorScore,
winner = ifelse(dscore > 0, home, visitor),
loser = ifelse(dscore > 0, visitor, home),
homeTeamWon = dscore > 0
)
head(NFL, 3)
|
75eb080583ed556e8f2b495d0c0b6c41881be604
|
868e671dcb7c2fbadf6b214e560ccb436dc3d839
|
/man/im_cols.Rd
|
3698dff1bf0cf3a893ec8c0334ba801a5e316953
|
[] |
no_license
|
nationalparkservice/IMDColorPalette
|
132ec0ebaadebe48e47653a2a33e709187cc7220
|
9b691cc06f2fcb114f513881720215712a78c56a
|
refs/heads/master
| 2023-01-05T11:10:24.556831
| 2020-11-06T23:04:24
| 2020-11-06T23:04:24
| 309,830,213
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 425
|
rd
|
im_cols.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/color_palette.R
\name{im_cols}
\alias{im_cols}
\title{Function to extract colors as hex codes}
\usage{
im_cols(...)
}
\arguments{
\item{...}{Character names of colors}
}
\value{
A named character vector of hex codes
}
\description{
Function to extract colors as hex codes
}
\examples{
im_cols("RICH BROWN")
im_cols("EARTH GREEN", "PALE BROWN")
}
|
231fcc0cedff5d5e1095383784a07c30bcfec709
|
71236830d36a3d76a37252515cf93c45fcbb9cec
|
/R_code/Preprocessing/concatenate_fields.R
|
4d32db8be5b4cddbe8b2ccc3f687b2cfffb32320
|
[
"MIT"
] |
permissive
|
pcollender/Firearm-injury-geocoding
|
58830f07b13d64caeecb40614cb268d695328bb8
|
5460944c802a329a7c093d58d45773da799e4608
|
refs/heads/main
| 2023-04-27T04:34:28.246943
| 2021-04-21T18:25:38
| 2021-04-21T18:25:38
| 348,461,773
| 0
| 0
|
MIT
| 2021-04-07T18:44:43
| 2021-03-16T19:02:58
|
R
|
UTF-8
|
R
| false
| false
| 323
|
r
|
concatenate_fields.R
|
args = commandArgs(trailingOnly = T)
#should be file name followed by fields to be concatenated
data = read.csv(args[1], stringsAsFactors = F)
fields = args[-1]
data$ADDRESS = do.call(paste,data[,fields])
cat('\n')
head(data)
cat('\nNew data written to temp_data.csv\n')
write.csv(data,'temp_data.csv', row.names = F)
|
80bb79acb5dd832de0f1ca29466ed09e3cec8fe4
|
6a0cc324741a1651a1937e13a5ed2e968c731566
|
/man/drug_categories.Rd
|
71576fb29b5e4b948f500c8a4143707b263aeb47
|
[] |
no_license
|
cran/dbparser
|
1cc45add1e836e89b5269709cccd2c2358c925d6
|
8136c971d88388f9fdabfd2f4ce56a957ec6997e
|
refs/heads/master
| 2023-04-09T18:03:16.200329
| 2023-03-27T06:30:05
| 2023-03-27T06:30:05
| 162,203,907
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 498
|
rd
|
drug_categories.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drug_parsers.R
\name{drug_categories}
\alias{drug_categories}
\title{Drug Categories parser}
\usage{
drug_categories()
}
\value{
a tibble with 2 variables:
\describe{
\item{category}{category name}
\item{mesh-id}{The Medical Subjects Headings (MeSH) identifier for the
category.}
\item{\emph{drugbank_id}}{drugbank id}
}
}
\description{
General categorizations of the drug.
}
\keyword{internal}
|
750d5732e963121220536f439504f2d68ab15b76
|
feedc553c26cd01e9c9a0335f157d48ce58adf51
|
/man/DataBackendDplyr.Rd
|
1ae7e527bb67a2ade8f9d0b821d170a196a15faa
|
[] |
no_license
|
cran/mlr3db
|
c0280baeffbea302dab808653a558d0b156ec417
|
c6bb90b40a2c9711e806d929895e9bd254befd24
|
refs/heads/master
| 2022-09-07T02:19:32.872601
| 2022-08-08T09:10:02
| 2022-08-08T09:10:02
| 218,371,421
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 10,741
|
rd
|
DataBackendDplyr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DataBackendDplyr.R
\name{DataBackendDplyr}
\alias{DataBackendDplyr}
\title{DataBackend for dplyr/dbplyr}
\description{
A \link[mlr3:DataBackend]{mlr3::DataBackend} using \code{\link[dplyr:tbl]{dplyr::tbl()}} from packages \CRANpkg{dplyr}/\CRANpkg{dbplyr}.
This includes \code{\link[tibble:tibble]{tibbles}} and abstract database connections interfaced by \CRANpkg{dbplyr}.
The latter allows \link[mlr3:Task]{mlr3::Task}s to interface an out-of-memory database.
}
\examples{
# Backend using a in-memory tibble
data = tibble::as_tibble(iris)
data$Sepal.Length[1:30] = NA
data$row_id = 1:150
b = DataBackendDplyr$new(data, primary_key = "row_id")
# Object supports all accessors of DataBackend
print(b)
b$nrow
b$ncol
b$colnames
b$data(rows = 100:101, cols = "Species")
b$distinct(b$rownames, "Species")
# Classification task using this backend
task = mlr3::TaskClassif$new(id = "iris_tibble", backend = b, target = "Species")
print(task)
task$head()
# Create a temporary SQLite database
con = DBI::dbConnect(RSQLite::SQLite(), ":memory:")
dplyr::copy_to(con, data)
tbl = dplyr::tbl(con, "data")
# Define a backend on a subset of the database
tbl = dplyr::select_at(tbl, setdiff(colnames(tbl), "Sepal.Width")) # do not use column "Sepal.Width"
tbl = dplyr::filter(tbl, row_id \%in\% 1:120) # Use only first 120 rows
b = DataBackendDplyr$new(tbl, primary_key = "row_id")
print(b)
# Query disinct values
b$distinct(b$rownames, "Species")
# Query number of missing values
b$missings(b$rownames, b$colnames)
# Note that SQLite does not support factors, column Species has been converted to character
lapply(b$head(), class)
# Cleanup
rm(tbl)
DBI::dbDisconnect(con)
}
\section{Super class}{
\code{\link[mlr3:DataBackend]{mlr3::DataBackend}} -> \code{DataBackendDplyr}
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{levels}}{(named \code{list()})\cr
List (named with column names) of factor levels as \code{character()}.
Used to auto-convert character columns to factor variables.}
\item{\code{connector}}{(\verb{function()})\cr
Function which is called to re-connect in case the connection became invalid.}
}
\if{html}{\out{</div>}}
}
\section{Active bindings}{
\if{html}{\out{<div class="r6-active-bindings">}}
\describe{
\item{\code{rownames}}{(\code{integer()})\cr
Returns vector of all distinct row identifiers, i.e. the contents of the primary key column.}
\item{\code{colnames}}{(\code{character()})\cr
Returns vector of all column names, including the primary key column.}
\item{\code{nrow}}{(\code{integer(1)})\cr
Number of rows (observations).}
\item{\code{ncol}}{(\code{integer(1)})\cr
Number of columns (variables), including the primary key column.}
\item{\code{valid}}{(\code{logical(1)})\cr
Returns \code{NA} if the data does not inherits from \code{"tbl_sql"} (i.e., it is not a real SQL data base).
Returns the result of \code{\link[DBI:dbIsValid]{DBI::dbIsValid()}} otherwise.}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-DataBackendDplyr-new}{\code{DataBackendDplyr$new()}}
\item \href{#method-DataBackendDplyr-finalize}{\code{DataBackendDplyr$finalize()}}
\item \href{#method-DataBackendDplyr-data}{\code{DataBackendDplyr$data()}}
\item \href{#method-DataBackendDplyr-head}{\code{DataBackendDplyr$head()}}
\item \href{#method-DataBackendDplyr-distinct}{\code{DataBackendDplyr$distinct()}}
\item \href{#method-DataBackendDplyr-missings}{\code{DataBackendDplyr$missings()}}
}
}
\if{html}{\out{
<details open><summary>Inherited methods</summary>
<ul>
<li><span class="pkg-link" data-pkg="mlr3" data-topic="DataBackend" data-id="format"><a href='../../mlr3/html/DataBackend.html#method-DataBackend-format'><code>mlr3::DataBackend$format()</code></a></span></li>
<li><span class="pkg-link" data-pkg="mlr3" data-topic="DataBackend" data-id="print"><a href='../../mlr3/html/DataBackend.html#method-DataBackend-print'><code>mlr3::DataBackend$print()</code></a></span></li>
</ul>
</details>
}}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-DataBackendDplyr-new"></a>}}
\if{latex}{\out{\hypertarget{method-DataBackendDplyr-new}{}}}
\subsection{Method \code{new()}}{
Creates a backend for a \code{\link[dplyr:tbl]{dplyr::tbl()}} object.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{DataBackendDplyr$new(
data,
primary_key,
strings_as_factors = TRUE,
connector = NULL
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{data}}{(\code{\link[dplyr:tbl]{dplyr::tbl()}})\cr
The data object.
Instead of calling the constructor yourself, you can call \code{\link[mlr3:as_data_backend]{mlr3::as_data_backend()}}
on a \code{\link[dplyr:tbl]{dplyr::tbl()}}.
Note that only objects of class \code{"tbl_lazy"} will be converted to a \link{DataBackendDplyr}
(this includes all connectors from \CRANpkg{dbplyr}).
Local \code{"tbl"} objects such as \code{\link[tibble:tibble]{tibbles}} will converted to a
\link[mlr3:DataBackendDataTable]{DataBackendDataTable}.}
\item{\code{primary_key}}{(\code{character(1)})\cr
Name of the primary key column.}
\item{\code{strings_as_factors}}{(\code{logical(1)} || \code{character()})\cr
Either a character vector of column names to convert to factors, or a single logical flag:
if \code{FALSE}, no column will be converted, if \code{TRUE} all string columns (except the primary key).
For conversion, the backend is queried for distinct values of the respective columns
on construction and their levels are stored in \verb{$levels}.}
\item{\code{connector}}{(function()\verb{)\\cr If not }NULL`, a function which re-connects to the database in case the connection has become invalid.
Database connections can become invalid due to timeouts or if the backend is serialized
to the file system and then de-serialized again.
This round trip is often performed for parallelization, e.g. to send the objects to remote workers.
\code{\link[DBI:dbIsValid]{DBI::dbIsValid()}} is called to validate the connection.
The function must return just the connection, not a \code{\link[dplyr:tbl]{dplyr::tbl()}} object!
Note that this this function is serialized together with the backend, including
possible sensitive information such as login credentials.
These can be retrieved from the stored \link[mlr3:DataBackend]{mlr3::DataBackend}/\link[mlr3:Task]{mlr3::Task}.
To protect your credentials, it is recommended to use the \CRANpkg{secret} package.}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-DataBackendDplyr-finalize"></a>}}
\if{latex}{\out{\hypertarget{method-DataBackendDplyr-finalize}{}}}
\subsection{Method \code{finalize()}}{
Finalizer which disconnects from the database.
This is called during garbage collection of the instance.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{DataBackendDplyr$finalize()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
\code{logical(1)}, the return value of \code{\link[DBI:dbDisconnect]{DBI::dbDisconnect()}}.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-DataBackendDplyr-data"></a>}}
\if{latex}{\out{\hypertarget{method-DataBackendDplyr-data}{}}}
\subsection{Method \code{data()}}{
Returns a slice of the data.
Calls \code{\link[dplyr:filter]{dplyr::filter()}} and \code{\link[dplyr:select]{dplyr::select()}} on the table and converts it to a \code{\link[data.table:data.table]{data.table::data.table()}}.
The rows must be addressed as vector of primary key values, columns must be referred to via column names.
Queries for rows with no matching row id and queries for columns with no matching
column name are silently ignored.
Rows are guaranteed to be returned in the same order as \code{rows}, columns may be returned in an arbitrary order.
Duplicated row ids result in duplicated rows, duplicated column names lead to an exception.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{DataBackendDplyr$data(rows, cols, data_format = "data.table")}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{rows}}{\code{integer()}\cr
Row indices.}
\item{\code{cols}}{\code{character()}\cr
Column names.}
\item{\code{data_format}}{(\code{character(1)})\cr
Desired data format, e.g. \code{"data.table"} or \code{"Matrix"}.}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-DataBackendDplyr-head"></a>}}
\if{latex}{\out{\hypertarget{method-DataBackendDplyr-head}{}}}
\subsection{Method \code{head()}}{
Retrieve the first \code{n} rows.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{DataBackendDplyr$head(n = 6L)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{n}}{(\code{integer(1)})\cr
Number of rows.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
\code{\link[data.table:data.table]{data.table::data.table()}} of the first \code{n} rows.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-DataBackendDplyr-distinct"></a>}}
\if{latex}{\out{\hypertarget{method-DataBackendDplyr-distinct}{}}}
\subsection{Method \code{distinct()}}{
Returns a named list of vectors of distinct values for each column
specified. If \code{na_rm} is \code{TRUE}, missing values are removed from the
returned vectors of distinct values. Non-existing rows and columns are
silently ignored.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{DataBackendDplyr$distinct(rows, cols, na_rm = TRUE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{rows}}{\code{integer()}\cr
Row indices.}
\item{\code{cols}}{\code{character()}\cr
Column names.}
\item{\code{na_rm}}{\code{logical(1)}\cr
Whether to remove NAs or not.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
Named \code{list()} of distinct values.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-DataBackendDplyr-missings"></a>}}
\if{latex}{\out{\hypertarget{method-DataBackendDplyr-missings}{}}}
\subsection{Method \code{missings()}}{
Returns the number of missing values per column in the specified slice
of data. Non-existing rows and columns are silently ignored.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{DataBackendDplyr$missings(rows, cols)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{rows}}{\code{integer()}\cr
Row indices.}
\item{\code{cols}}{\code{character()}\cr
Column names.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
Total of missing values per column (named \code{numeric()}).
}
}
}
|
0aba5d17ef5cd9daeefd29e4d2929fe491b8bc0a
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.compute/man/eks_untag_resource.Rd
|
2b2ebffe91bb7c2993e41f408d09e9c73c96d4d9
|
[
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 741
|
rd
|
eks_untag_resource.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eks_operations.R
\name{eks_untag_resource}
\alias{eks_untag_resource}
\title{Deletes specified tags from a resource}
\usage{
eks_untag_resource(resourceArn, tagKeys)
}
\arguments{
\item{resourceArn}{[required] The Amazon Resource Name (ARN) of the resource from which to delete
tags. Currently, the supported resources are Amazon EKS clusters and
managed node groups.}
\item{tagKeys}{[required] The keys of the tags to be removed.}
}
\value{
An empty list.
}
\description{
Deletes specified tags from a resource.
}
\section{Request syntax}{
\preformatted{svc$untag_resource(
resourceArn = "string",
tagKeys = list(
"string"
)
)
}
}
\keyword{internal}
|
71c96d42907b795f0b84650b6e5aa1b31745ae01
|
dd435ea466117a0071e46b14ad8bc8f81a736d21
|
/man/nord_palettes.Rd
|
d13a04dc1a387bf2f09da81268d1308da5eff3a8
|
[
"MIT"
] |
permissive
|
saera-chun-hud/hud-palletes
|
c1251b48c993c1e85e0a2d7e1f32db7d935bac2f
|
a6810329626cedd2bb970e839cf210cb4ccb87bb
|
refs/heads/master
| 2023-03-17T11:59:46.486420
| 2019-08-29T18:28:40
| 2019-08-29T18:28:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 934
|
rd
|
nord_palettes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colors.R
\docType{data}
\name{nord_palettes}
\alias{nord_palettes}
\title{Nord and northern-themed color palettes}
\format{An object of class \code{list} of length 16.}
\usage{
nord_palettes
}
\description{
Nord: An arctic, north-bluish color palette.
Created for the clean- and minimal flat design pattern to achieve a optimal focus and readability for code syntax highlighting and UI. It consists of four palettes utilizing a total of sixteen, carefully selected, dimmed pastel colors for a eye-comfortable, but yet colorful ambiance.
}
\details{
The available palettes are:
polarnight
snowstorm
frost
aurora
There are also 11 Colour palettes extracted from the works of the [Group of Seven](https://en.wikipedia.org/wiki/Group_of_Seven_(artists)) and one color palette drawn from [Lumina Borealis](https://www.luminaborealis.com/)
}
\keyword{datasets}
|
b9367dd8a871a2d8e39b673f1795793bae9beb3f
|
d31492b02b5d4a249cfa3a731054be8ee02a5c2e
|
/man/Lagged3d-class.Rd
|
c5cc2422ada69c658a0fed5b774b84f1e8b98c5c
|
[] |
no_license
|
GeoBosh/lagged
|
6fc59d2f8ad26a05f3454b07e158d82ab651040a
|
2697b9e5faf7fcf88f5b18da94c34de248a54be7
|
refs/heads/master
| 2022-05-02T03:37:40.976743
| 2022-04-04T21:33:54
| 2022-04-04T21:33:54
| 92,064,916
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,358
|
rd
|
Lagged3d-class.Rd
|
\name{Lagged3d-class}
\Rdversion{1.1}
\docType{class}
\alias{Lagged3d-class}
\title{Class Lagged3d}
\description{Class Lagged3d.}
\section{Objects from the Class}{
Objects can be created by calls of the form \code{Lagged(a)} or
\code{new("Lagged3d", data = a)}, where \code{a} is a 3-d array.
\code{new("Lagged3d", ...)} also works.
%% ~~ describe objects here ~~
}
\section{Slots}{
\describe{
\item{\code{data}:}{Object of class \code{"array"} ~~ }
}
}
\section{Extends}{
Class \code{"\linkS4class{Lagged}"}, directly.
}
\section{Methods}{
\describe{
\item{[}{\code{signature(x = "Lagged3d", i = "numeric", j = "missing", drop = "logical")}: ... }
\item{[}{\code{signature(x = "Lagged3d", i = "numeric", j = "missing", drop = "missing")}: ... }
\item{[<-}{\code{signature(x = "Lagged3d", i = "numeric")}: ... }
\item{show}{\code{signature(object = "Lagged3d")}: ... }
\item{whichLagged}{\code{signature(x = "Lagged3d", y = "missing")}: ... }}
}
%\references{
%%% ~~put references to the literature/web site here~~
%}
\author{Georgi N. Boshnakov}
%\note{
%%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{Lagged}},
\code{\linkS4class{Lagged1d}},
\code{\linkS4class{Lagged2d}}
}
\examples{
## see examples for class "Lagged"
}
\keyword{classes}
|
0ff3c0e88e0a1fbacbc75cf8205da9a0dda1b919
|
b1a226a32212563ff5caa6eef79d87c4c415c5fa
|
/packrat/lib/x86_64-w64-mingw32/3.5.1/cowplot/doc/plot_grid.R
|
dc626c2da1e76eec01f30210656cc5071b8b9064
|
[
"MIT"
] |
permissive
|
jcvdav/curso_marea
|
05b0b7b9a48066c558c0911c2172bb8cd6c1874e
|
07c175488e90256f5a6590fa43f36d193b04a1ad
|
refs/heads/master
| 2020-04-07T10:01:29.749199
| 2019-03-21T20:15:19
| 2019-03-21T20:15:19
| 158,272,854
| 0
| 2
|
MIT
| 2018-11-29T22:01:39
| 2018-11-19T18:36:22
|
C++
|
UTF-8
|
R
| false
| false
| 4,028
|
r
|
plot_grid.R
|
## ---- message=FALSE, fig.width=6.8, fig.height=2.55----------------------
require(cowplot)
theme_set(theme_cowplot(font_size=12)) # reduce default font size
plot.mpg <- ggplot(mpg, aes(x = cty, y = hwy, colour = factor(cyl))) +
geom_point(size=2.5)
plot.diamonds <- ggplot(diamonds, aes(clarity, fill = cut)) + geom_bar() +
theme(axis.text.x = element_text(angle=70, vjust=0.5))
plot_grid(plot.mpg, plot.diamonds, labels = c('A', 'B'))
## ---- message=FALSE, fig.width=6.8, fig.height=2.55----------------------
plot_grid(plot.mpg, plot.diamonds, labels = "AUTO")
## ---- message=FALSE, fig.width=6.8, fig.height=2.55----------------------
plot_grid(plot.mpg, plot.diamonds, labels = "auto")
## ---- message=FALSE, fig.width=6.8, fig.height=2.55----------------------
plot_grid(plot.mpg, plot.diamonds, labels = "AUTO", align = 'h')
## ---- message=FALSE, fig.width=3.9, fig.height=5.1-----------------------
plot_grid(plot.mpg, plot.diamonds, labels = "AUTO", ncol = 1, align = 'v')
## ---- message=FALSE, fig.width=5, fig.height=5---------------------------
plot.iris <- ggplot(iris, aes(Sepal.Length, Sepal.Width)) +
geom_point() + facet_grid(. ~ Species) + stat_smooth(method = "lm") +
background_grid(major = 'y', minor = "none") + # add thin horizontal lines
panel_border() # and a border around each panel
plot_grid(plot.iris, plot.mpg, labels = "AUTO", ncol = 1,
align = 'v', axis = 'l') # aligning vertically along the left axis
## ---- message=FALSE, results="hold", collapse=TRUE-----------------------
par(xpd = NA, # switch off clipping, necessary to always see axis labels
bg = "transparent", # switch off background to avoid obscuring adjacent plots
oma = c(2, 2, 0, 0), # move plot to the right and up
mgp = c(2, 1, 0) # move axis labels closer to axis
)
plot(sqrt) # plot the square root function
recordedplot <- recordPlot() # record the previous plot
## ---- message=FALSE------------------------------------------------------
plotfunc <- function() image(volcano) # define the function
plotfunc() # call the function to make the plot
## ---- message=FALSE------------------------------------------------------
gcircle <- grid::circleGrob()
ggdraw(gcircle)
## ---- message=FALSE, fig.width=7, fig.height=5---------------------------
plot_grid(plot.mpg, recordedplot, plotfunc, gcircle, labels = "AUTO", hjust = 0, vjust = 1,
scale = c(1., 1., 0.9, 0.9))
## ---- message=FALSE, fig.width=6.8, fig.height=2.55----------------------
plot_grid(plot.mpg, plot.diamonds, labels = "AUTO", align = 'h', label_size = 12)
## ---- message=FALSE, fig.width=6.8, fig.height=2.55----------------------
plot_grid(plot.mpg, plot.diamonds, labels = "AUTO", align = 'h', label_fontfamily = "serif",
label_fontface = "plain", label_colour = "blue")
## ---- message=FALSE, fig.width=6.8, fig.height=2.55----------------------
plot_grid(plot.mpg, plot.diamonds, labels = "AUTO", align = 'h', label_size = 12,
label_x = 0, label_y = 0, hjust = -0.5, vjust = -0.5 )
## ---- message=FALSE, fig.width=6.8, fig.height=2.55----------------------
plot_grid(plot.mpg, plot.diamonds, labels = "AUTO", align = 'h', rel_widths = c(1, 1.3))
## ---- message=FALSE, fig.width=7.65, fig.height=5.1----------------------
bottom_row <- plot_grid(plot.mpg, plot.diamonds, labels = c('B', 'C'), align = 'h', rel_widths = c(1, 1.3))
plot_grid(plot.iris, bottom_row, labels = c('A', ''), ncol = 1, rel_heights = c(1, 1.2))
## ---- message=FALSE, fig.width=7.65, fig.height=5.1----------------------
# first align the top-row plot (plot.iris) with the left-most plot of the
# bottom row (plot.mpg)
plots <- align_plots(plot.mpg, plot.iris, align = 'v', axis = 'l')
# then build the bottom row
bottom_row <- plot_grid(plots[[1]], plot.diamonds,
labels = c('B', 'C'), align = 'h', rel_widths = c(1, 1.3))
# then combine with the top row for final plot
plot_grid(plots[[2]], bottom_row, labels = c('A', ''), ncol = 1, rel_heights = c(1, 1.2))
|
b8e92b2fe65f622fe0d4bfc65d994efb6a1776ff
|
885441742c392cf44286b27f1c379b07c3b9ee68
|
/Quad_Venn_example.R
|
2b80f14e3e33131c05a03c5e6bdf6fcfee64d613
|
[
"CC0-1.0"
] |
permissive
|
DCGenomics/Delmarva_R_Users_Group
|
86415f56c9be64e92e5150b867b57116252e9f4f
|
1fff51c317c9f7e4e16072b2cfb4b64677dd8ef5
|
refs/heads/master
| 2021-01-25T12:07:23.236972
| 2015-02-05T23:04:25
| 2015-02-05T23:04:25
| 30,382,176
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,004
|
r
|
Quad_Venn_example.R
|
require(VennDiagram)
n1234<-1865
n123<-6372
n124<-292
n12<-17486
n134<-204
n13<-791
n14<-73
n1<-14856
n234<-123
n23<-905
n24<-82
n2<-15023
n34<-249
n3<-2516
n4<-468
venn <-draw.quad.venn(area1=n1+n1234+n123+n124+n12+n134+n13+n14,
area2=n2+n1234+n123+n124+n12+n234+n23+n24,
area3=n3+n1234+n123+n134+n13+n234+n23+n34,
area4=n4+n1234+n124+n134+n14+n234+n24+n34,
n12=n12+n1234+n123+n124,
n13=n13+n1234+n123+n134,
n14=n14+n1234+n124+n134,
n23=n23+n1234+n123+n234,
n24=n24+n1234+n124+n234,
n34=n34+n1234+n134+n234,
n123=n123+n1234,
n124=n124+n1234,
n134=n134+n1234,
n234=n234+n1234,
n1234=n1234,
category=c(1,2,3,4),
fill=c('red','blue','green','yellow')
)
|
860392f431c25194e5f9740c376185383c7ce1eb
|
ac655728cfed40aacb3686b9a3fd2c26f8facdc0
|
/scripts/dhs/dhs_plot_gamma.R
|
535c9dc92ae3b8a109e493f2cce92df4ffc3423e
|
[] |
no_license
|
jakeyeung/Yeung_et_al_2018_TissueSpecificity
|
8ba092245e934eff8c5dd6eab3d265a35ccfca06
|
f1a6550aa3d703b4bb494066be1b647dfedcb51c
|
refs/heads/master
| 2020-09-20T12:29:01.164008
| 2020-08-07T07:49:46
| 2020-08-07T07:49:46
| 224,476,307
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,420
|
r
|
dhs_plot_gamma.R
|
# Jake Yeung
# 2015-09-03
# Test out Gamma fitting functions
source("scripts/functions/MixtureModelFunctions.R")
bedpath <- "/home/yeung/projects/tissue-specificity/data/beds/merge/ucsc_names/Cerebellum.dhs.merged.bed"
gammapath <- "/home/yeung/projects/tissue-specificity/data/beds/merge/cutoffs_smoother_gamma/Cerebellum/gammamdl.Robj"
gammapath <- "/home/yeung/projects/tissue-specificity/data/beds/merge/cutoffs_smoother_gamma_cutoff_high_accuracy/Cerebellum/gammamdl.Robj"
gammapath <- "/home/yeung/projects/tissue-specificity/data/beds/merge/cutoffs_smoother_gamma_cutoff_high_accuracy_ALL_gamma_inits/Cerebellum/gammamdl.Robj"
bedpath <- "/home/yeung/projects/tissue-specificity/data/beds/merge/ucsc_names/Heart.dhs.merged.bed"
gammapath <- "/home/yeung/projects/tissue-specificity/data/beds/merge/cutoffs_smoother_gamma_cutoff_high_accuracy_ALL_gamma2/Heart/gammamdl.Robj"
gammapath <- "/home/yeung/projects/tissue-specificity/data/beds/merge/cutoffs_smoother_gamma_cutoff_high_accuracy_ALL_gamma_inits/Heart/gammamdl.Robj"
bedpath <- "/home/yeung/projects/tissue-specificity/data/beds/merge/ucsc_names/SkeletalMuscle.dhs.merged.bed"
gammapath <- "/home/yeung/projects/tissue-specificity/data/beds/merge/cutoffs_smoother_gamma_cutoff_high_accuracy_ALL_gamma_inits/SkeletalMuscle/gammamdl.Robj"
bedpath <- "/home/yeung/projects/tissue-specificity/data/beds/merge/ucsc_names/Liver.dhs.merged.bed"
gammapath <- "/home/yeung/projects/tissue-specificity/data/beds/merge/cutoffs_smoother_gamma_cutoff_high_accuracy_ALL_gamma_inits/Liver/gammamdl.Robj"
gammapath <- "/home/yeung/projects/tissue-specificity/data/beds/merge/cutoffs_smoother_gamma_cutoff_high_accuracy_ALL_gamma_inits_scale50/Liver/gammamdl.Robj"
dat <- read.table(bedpath, header = FALSE)
# Find peaks from merged bam files -------- -------------------------------
counts <- dat$V4[which(dat$V4 > 1)]
counts <- sample(counts, 0.01 * length(counts))
# Load object -------------------------------------------------------------
load(gammapath, verbose = TRUE)
# PlotGammaDist(mixmdl, log2(counts))
# scale <- 50
# mixmdl <- gammamixEM(log2(counts), lambda = c(0.8, 0.2), alpha = c(scale, scale), beta = c(3 / scale, 12 / scale))
cutoff <- PlotGammaMixmdl(mixmdl, log2(counts))
# cutoff <- optimize(f = ShannonEntropyMixMdl, c(log2(counts), mixmdl), interval = range(log2(counts)), tol = 0.0001, maximum = TRUE)
# abline(v = cutoff$maximum)
|
f65208427b84f529df21edb52dc98fe26bb727e9
|
84690a057ce5002c22f75239a77a0d75b291e940
|
/BAN400 Term Project.R
|
881ee3b6357d4362880c6e30ab3cd9fbbc846ff1
|
[] |
no_license
|
chrlov/project
|
5b7cb28c700131781cf4a60d969c112152c97939
|
b9391a6d3ef820e59222dfd3ec176db1477063bf
|
refs/heads/main
| 2023-01-23T10:37:13.769505
| 2020-12-11T19:25:29
| 2020-12-11T19:25:29
| 302,040,871
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 28,306
|
r
|
BAN400 Term Project.R
|
#### Packages ----------------------------------------------------------------
library(tidyverse)
library(shiny)
library(leaflet)
#### Data --------------------------------------------------------------------
# COVID-19 data
data <- COVID19::covid19() %>%
transmute("Country" = administrative_area_level_1,
"Date" = date,
"Tests" = tests,
"Tests / Population" = tests / population,
"Confirmed Cases" = confirmed,
"Confirmed Cases / Population" = confirmed / population,
"Confirmed Cases / Tests" = confirmed / tests,
"Recovered" = recovered,
"Currently Infected" = confirmed - recovered,
"Currently Infected / Population" = (confirmed - recovered) / population,
"Deaths" = deaths,
"Deaths / Population" = deaths / population,
"Deaths / Confirmed Cases" = deaths / confirmed,
"Closed: Schools" = school_closing,
"Closed: Workplaces" = workplace_closing,
"Closed: Transport" = transport_closing,
"Closed: Events" = cancel_events,
"Restriction: Gathering" = gatherings_restrictions,
"Restriction: Stay Home" = stay_home_restrictions,
"Restriction: Internal Movement" = internal_movement_restrictions,
"Restriction: International Movement" = international_movement_restrictions,
"iso_a2" = iso_alpha_2
)
# Description of variables. Source: https://covid19datahub.io/articles/doc/data.html
explaination <- c(# id:
"unique identifier",
#Country:
"country",
# Date:
"observation date",
# Tests:
"the cumulative number of tests in a country.",
# Tested / Population:
"the cumulative number of tests in a country divided by the respective
country's population.",
# Confirmed Cases:
"the cumulative number of confirmed cases in a country.",
# Confirmed Cases / Population:
"the cumulative number of confirmed cases in a country divided by the
country's population.",
# Confirmed Cases / Tests:
"the cumulative number of confirmed cases in a country divided by the
number of tests performed in the country.",
# Recovered:
"the cumulative number of patients released from hospitals or reported
recovered in a country.",
# Currently Infected:
"the cumulative number of confirmed cases in a country minus the
cumulative number of patients released from hospitals or reported
recovered in the country.",
# Currently Infected / Population:
"the cumulative number of confirmed cases in a country minus the
cumulative number of patients released from hospitals or reported
recovered in the country, divided by the country's population.",
# COVID-19 Deaths:
"the cumulative number of COVID-19 related deaths in a country.",
# COVID-19 Deaths / Population:
"the cumulative number of COVID-19 related deaths in a country divided
by the country's population.",
# COVID-19 Deaths / Confirmed Cases:
"the cumulative number of COVID-19 related deaths in a country divided
by the cumulative number of confirmed cases in the respective country.",
# Closed: Schools:
"(0) No measures; (1) Recommend closing; (2) Require closing (only some
levels or categories, eg just high school, or just public schools); (3)
Require closing all levels",
# Closed: Workplaces:
"(0) No measures; (1) Recommend closing (or work from home); (2) require
closing for some sectors or categories of workers; (3) require closing
(or work from home) all-but-essential workplaces (eg grocery stores,
doctors)",
# Closed: Transport:
"(0) No measures; (1) Recommend closing (or significantly reduce volume/
route/means of transport available); (2) Require closing (or prohibit
most citizens from using it)",
# Closed: Events:
"(0) No measures; (1) Recommend cancelling; (2) Require cancelling",
# Restriction: Gathering:
"(0) No restrictions; (1) Restrictions on very large gatherings (the
limit is above 1000 people); (2) Restrictions on gatherings between
100-1000 people; (3) Restrictions on gatherings between 10-100 people;
(4) Restrictions on gatherings of less than 10 people",
# Restriction: Stay Home
"(0) No measures; (1) recommend not leaving house; (2) require not
leaving house with exceptions for daily exercise, grocery shopping,
and “essential” trips; (3) Require not leaving house with minimal
exceptions (e.g. allowed to leave only once every few days, or only
one person can leave at a time, etc.)",
# Restriction: Internal Movement:
"(0) No measures; (1) Recommend closing (or significantly reduce
volume/route/means of transport); (2) Require closing (or prohibit
most people from using it)",
# Restriction: International Movement:
"(0) No measures; (1) Screening; (2) Quarantine arrivals from high-risk
regions; (3) Ban on high-risk regions; (4) Total border closure",
# ISO id:
"ISO ID"
)
explainations <- reshape2::melt(data.frame(c(colnames(data)), explaination))
#### User Interface ----------------------------------------------------------
# Defining background color for wellPanels
color <- "#ffffff" # HEX code
color2 <- "#c3e7ff" # HEX code
wellPcolor <- paste("background-color: ", color, ";")
wellPcolor2 <- paste("background-color: ", color2, ";")
# Creating the UI
ui <- navbarPage(title = strong("BAN400 Term Project"),
# Theme for UI
theme = shinythemes::shinytheme("spacelab"),
# Plot Tab
tabPanel(title = "Graph Creator",
# Header Plot Tab
fluidRow(wellPanel(style = wellPcolor2,
h1(strong("Graph Creator for COVID-19"),
align = "center")
),
),
sidebarLayout(
# Inputs: Plot Tab
sidebarPanel(style = wellPcolor,
h4(strong("Inputs")),
hr(),
# Variables
radioButtons(inputId = "variablePLOT",
label = "Select variable",
choices = colnames(data)[-c(1:3, length(colnames(data)))]
),
helpText("Please see the note under the plot for description of the chosen variable."
),
# Countries
selectInput(inputId = "idPLOT",
label = "Select countries",
choices = unique(data$Country),
selected = c("Australia","Norway", "Denmark"),
multiple = T
),
helpText("Select as many countries as you want."
),
# Time period
dateRangeInput(inputId = "datesPLOT",
label = "Select time period",
start = "2020-03-15",
end = Sys.Date() - 7,
min = min(data$Date),
max = max(data$Date)
),
helpText(paste("Latest data update:",
max(data$Date))
)
),
# Outputs: Plot Tab
mainPanel(
wellPanel(style = wellPcolor,
# Header
h2(strong(textOutput("plotheader")),
align = "center"),
br(),
# Plot
plotOutput("plot2",
height = 485),
),
fluidRow(column(width = 7,
wellPanel(style = wellPcolor,
# Description header
strong(textOutput("desc")),
hr(),
# Description
em(textOutput("descriptionPLOT"))
)
),
column(width = 5,
wellPanel(style = wellPcolor,
strong("Download .png file"),
hr(),
fluidRow(
column(width = 6,
# Download: Height
numericInput(inputId = "height",
label = "Height in cm",
value = 18,
min = 1,
max = 100
)
),
column(width = 6,
# Download: Width
numericInput(inputId = "width",
label = "Width in cm",
value = 36,
min = 1,
max = 100
)
)
),
# Download button
downloadButton(outputId = "downloadPlot",
label = "Download!",
class = "btn btn-success"
)
)
)
)
)
)
),
# Table Tab
tabPanel(title = "Data Table",
# Header Data Table Tab
fluidRow(wellPanel(style = wellPcolor2,
h1(strong("COVID-19 Data Tables"),
align = "center"))
),
sidebarLayout(
# Inputs: Table Tab
sidebarPanel(style = wellPcolor,
h4(strong("Inputs")),
hr(),
# Variables
radioButtons(inputId = "variableTABLE",
label = "Select variable",
choices = colnames(data)[-c(1:3, length(colnames(data)))]
),
# Countries
selectInput(inputId = "idTABLE",
label = "Select countries",
choices = unique(data$Country),
selected = c("Australia","Norway", "Denmark", "United States"),
multiple = T
),
helpText("Choose as many countries as you want."
),
# Time Period
dateRangeInput(inputId = "datesTABLE",
label = "Select time period",
start = Sys.Date() - 31,
end = Sys.Date() - 10,
min = min(data$Date),
max = max(data$Date)
),
helpText(paste("Data updated:",
max(data$Date))
),
strong("Download table"),
br(),
# Download button
downloadButton(outputId = "downloadTABLE",
label = "Download Table",
class = "btn btn-success"
)
),
# Outputs: Table Tab
mainPanel(
wellPanel(style = wellPcolor,
# Table header
h3(strong(textOutput("tableheader")),
align = "left"),
# Table
tableOutput("table")
)
)
)
),
# Interactive Map Tab
tabPanel(title = "Interactive Map",
# Header Interactive Map Tab
fluidRow(wellPanel(style = wellPcolor2,
h1(strong("Interactive COVID-19 Map"),
align = "center")
)
),
# Inputs: Interactive Map Tab
fluidRow(
wellPanel(style = wellPcolor,
# Date
sliderInput(inputId = "dateMAP",
label = "Select date",
min = min(data$Date),
max = max(data$Date),
value = Sys.Date() - 60,
timeFormat = "%d %b 20%y",
animate = animationOptions(interval = 7,
loop = F)
),
helpText(paste("Data last updated:",
max(data$Date))
),
# Variable
selectInput(inputId = "variableMAP",
label = "What do you want to compare?",
choices = colnames(data)[-c(1:3, 8, length(colnames(data)))]
)
)
),
# Outputs: Interactive Map Tab
fluidRow(
h2(textOutput("mapheader"),
align = "center"),
leafletOutput("map",
height = 700)
)
),
h5("Data source: Guidotti and Ardia (2020). See", a(href="https://covid19datahub.io", "https://covid19datahub.io"),
align = "center")
)
#### Server ------------------------------------------------------------------
server <- function(input, output) {
## PLOT
# Plot Header
output$plotheader <- renderText({
print(input$variablePLOT)
})
# Plot Variable Descriptions
output$descriptionPLOT <- renderText({
vars <- input$variablePLOT
show_text <- explainations %>%
filter(c.colnames.data.. == vars)
show <- show_text[1, 2]
print(paste("'",vars,"'",
"is defined as",
show))
})
# Plot Variable
output$desc <- renderText({
paste("Description of displayed variable:", input$variablePLOT)
})
# Plot inputs
plotInput <- reactive({
# Plot inputs from UI
date_min <- input$datesPLOT[1]
date_max <- input$datesPLOT[2]
ids <- input$idPLOT
vars <- input$variablePLOT
# Relevant data
df <- data %>%
filter(Date >= date_min,
Date <= date_max,
Country %in% ids) %>%
select(Country, Date, vars)
# Defining plot
p <- ggplot(data = df,
aes(x = Date,
y = df[[input$variablePLOT]],
col = Country)) +
geom_line(size = 1.3) +
scale_y_continuous(labels = scales::comma) +
scale_x_date(date_breaks = "1 month",
date_labels = "%b") +
xlab("Date") +
ylab("")+
labs(title = paste("COVID-19:", input$variablePLOT),
subtitle = paste(format.Date(input$datesPLOT[1], format = "%d %B 20%y"),
"-",
format.Date(input$datesPLOT[2], format = "%d %B 20%y")),
caption = "Data source: Guidotti and Ardia (2020), www.covid19datahub.io",
fill = "Country") +
theme_linedraw(base_size = 16,
base_family = "serif") +
theme(plot.title = element_text(face = "bold"),
plot.caption = element_text(face = "italic"),
legend.title = element_text(face = "bold"),
axis.title.x = element_text(vjust = -0.5)
)
})
# Draw the plot
output$plot2 <- renderPlot({
print(plotInput())
})
# Download the plot
output$downloadPlot <- downloadHandler(
# Filename: Downloaded Plot
filename = function(){paste("BAN400,",
input$variablePLOT,
",",
unique(list(input$idPLOT)),
input$datesPLOT[1],
" to",
input$datesPLOT[2],
".png")
},
# Content: Downloaded Plot
content = function(file) {ggsave(file,
plot = plotInput(),
device = "png",
dpi = "retina",
units = "cm",
width = input$width,
height = input$height)
}
)
## TABLE
# Table Header
output$tableheader <- renderText({
print(input$variableTABLE)
})
# Table data
tabledata <- reactive({
# Table inputs from UI
date_min <- input$datesTABLE[1]
date_max <- input$datesTABLE[2]
ids <- input$idTABLE
vars <- input$variableTABLE
# Relevant data set
data_app <- data %>%
filter(Date >= date_min,
Date <= date_max,
Country %in% ids) %>%
select(Country, Date, vars)
# Formatting date column
data_app$Date <- format(as.Date(data_app$Date), "20%y-%m-%d")
# Reshaping table
data_app %>%
reshape::cast(., Date ~ Country)
})
# Table itself
output$table <- renderTable({
tabledata()
},
# Table options
digits = 4,
na = "Not Available")
# Table download
output$downloadTABLE <- downloadHandler(
# Filename: Downloaded Table
filename = function(){paste("COVID-19 Data,",
input$variableTABLE,
".csv")},
# Content: Downloaded Table
content = function(filename) {write.csv(tabledata(), filename)}
)
## MAP
# Map Header
output$mapheader <- renderText({
print(paste(input$variableMAP,
"per",
format(input$dateMAP, "%A, %B %d, %Y")))
})
# Creating map
output$map <- renderLeaflet({
# Map inputs
date_map <- input$dateMAP
vars <- input$variableMAP
# Relevant data
data_app <- data %>%
filter(Date == date_map)
# sfc_MULTIPLOYGON data
world <- spData::world
# Adding geoms to our relevant data set
df <- merge(data_app, world[ ,c(1,11)],
by = "iso_a2") %>%
sf::st_as_sf()
# Set colors for map
pal <- colorNumeric(palette = "Reds",
domain = df[[input$variableMAP]],
na.color = "#000000")
# Map itself
leaflet(df) %>%
addProviderTiles("CartoDB.Voyager") %>%
addPolygons(
fillColor = ~pal(df[[input$variableMAP]]),
weight = 2,
opacity = 1,
color = "white",
dashArray = "3",
fillOpacity = 0.9,
highlight = highlightOptions(weight = 1,
color = "red",
dashArray = "",
fillOpacity = 0.7,
bringToFront = TRUE),
label = ~paste(Country, ":" ,format(round(as.numeric(df[[input$variableMAP]]), 7), nsmall=0, big.mark=",")),
labelOptions = labelOptions(style = list("font-weight" = "normal",
padding = "3px 8px"),
textsize = "15px",
direction = "auto")) %>%
addLegend(pal = pal,
values = ~df[[input$variableMAP]],
opacity = 1,
title = NULL,
position = "bottomright")
})
}
#### App ---------------------------------------------------------------------
runApp(shinyApp(ui = ui,
server = server),
launch.browser = T)
|
05997f07c0731e0a7fcc7d0421063a3223dcd9e4
|
d033955c754784fed28e497f4aca27fbc06cc574
|
/data-types.R
|
a91397b8c68e641c9bb84ecf61f03b17abd50090
|
[] |
no_license
|
hammyasf/learning-R
|
51e1bf4bb9c9c3deffac8bd0597203376779f6f6
|
4b2d2b329948431b65621a534002d2fb87229f99
|
refs/heads/master
| 2021-05-31T16:44:00.899233
| 2016-03-07T23:28:33
| 2016-03-07T23:28:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 136
|
r
|
data-types.R
|
a <- 32
b <- "Bugs"
c <- 12114.44
d <- -121
class(a)
class(b)
class(c)
class(c)
is.numeric(a)
is.numeric(b)
is.numeric(c)
is.numeric(d)
|
537b30eb1ae6eeb4d8b75256b61e2d1ffae97b84
|
0db6dd83f3358bd98f944c36d4a618e43de52345
|
/TimeCapsule_Code/RunFile.r
|
f2688452716cd307ad952fcd2c21f307a485d426
|
[] |
no_license
|
yanliangs/A-Bayesian-Approach-to-Multistate-Hidden-Markov-Models-Application-to-Dementia-Progression
|
f38bf524f0b93113b08d80501a6da321a23be668
|
aa2e576eadf16e942e81653c2bad71bf51032470
|
refs/heads/master
| 2023-03-20T04:52:19.430189
| 2019-12-01T17:28:32
| 2019-12-01T17:28:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,270
|
r
|
RunFile.r
|
library(hmm)
source('SetupFunc.r')
args <- commandArgs(TRUE)
set.seed(as.integer(args[1]))
sensitivity_test <- args[4]
NCORES <- as.integer(args[5])
Dir <- args[6]
if(args[2]=='Simulation'){
load(paste0(Dir,'demData',args[1],'.rda'))
DATA <- demData
STEPS <- 15000
BURNIN <- 10000
} else if(args[2]=='RealData'){
load('synthetic_MCSA_data.rda')
DATA <- useData
STEPS <- 30000
BURNIN <- 10000
}
states <- c("A-N-", "A+N-", "A-N+", "A+N+", "DemA-", "DemA+", "Dead")
qmat <- matrix(c(0, .1, .1, 0, 0, 0, .09,
0, 0, 0, .1, 0, 0, .09,
0, 0, 0, .1, .1, 0, .09,
0, 0, 0, 0, 0, .1, .09,
0, 0, 0, 0, 0, .1, .1,
0, 0, 0, 0, 0, 0, .1,
0, 0, 0, 0, 0, 0, 0), ncol=7, byrow=TRUE)
dimnames(qmat) <- list(states, states)
qcoef <- data.frame(state1 = c( rep(1,12), rep(1,5), rep(2,5), rep(3,5), rep(3,5), rep(4,5), rep(5,5),
rep(1,16), rep(2,16), rep(3,16), rep(4,16), rep(5,5), rep(6,5) ),
state2 = c( rep(2,12), rep(3,5), rep(4,5), rep(4,5), rep(5,5), rep(6,5), rep(6,5),
rep(7,16), rep(7,16), rep(7,16), rep(7,16), rep(7,5), rep(7,5) ),
term = c('(Intercept)','male','educ','apoe4','c1_amyl','c2_amyl','c3_amyl','c4_amyl',
'c5_amyl','c6_amyl','c7_amyl','c8_amyl',
'(Intercept)','iage','male','educ','apoe4',
'(Intercept)','iage','male','educ','apoe4',
'(Intercept)','iage','male','educ','apoe4',
'(Intercept)','iage','male','educ','apoe4',
'(Intercept)','iage','male','educ','apoe4',
'(Intercept)','iage','male','educ','apoe4',
'(Intercept)','iage','male','educ','apoe4','c1','c2','c3','c4','c5','c6','c7',
'c8','c9','c10','c11',
'(Intercept)','iage','male','educ','apoe4','c1','c2','c3','c4','c5','c6','c7',
'c8','c9','c10','c11',
'(Intercept)','iage','male','educ','apoe4','c1','c2','c3','c4','c5','c6','c7',
'c8','c9','c10','c11',
'(Intercept)','iage','male','educ','apoe4','c1','c2','c3','c4','c5','c6','c7',
'c8','c9','c10','c11',
'(Intercept)','iage','male','educ','apoe4',
'(Intercept)','iage','male','educ','apoe4'),
coef = c( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,
12,13,14,15,16,
17,18,19,20,21,
22,23,24,25,26,
27,28,29,30,31,
32,33,34,35,36,
37,38,39,40,41,
42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,
42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,
42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,
42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,
58,59,60,61,62,
63,64,65,66,67),
init = c( 0.000000000,-0.331664059, 0.014273922, 0.945136344,-6.580919931,-6.501250885,
-6.427228628,-4.673031945,
-3.965086053,-3.111756265,
-1.950950409, 3.588549810,
-5.000000000, 0.094236594, 0.141479641,-0.095807126, 0.486230191,
-4.200000000, 0.093838248,-0.116771101,-0.098580487, 0.399355964,
-3.881605509, 0.065857892,-0.330382263, 0.019152541, 0.444423684,
-4.148148694, 0.075589995, 0.248030366, 0.033380640, 0.765503215,
-2.473865801, 0.045208611, 0.082975736, 0.054272656, 0.256422760,
-2.739931535, 0.047242201, 0.050003054, 0.033386975, 0.239889316,
-4.329458534, 0.110780340, 0.387452143,-0.042883267,-0.008324821,
-1.242275027,-1.112106935,-0.981938842,-0.851770750,-0.721602657,
-0.591434565,-0.461266472,-0.331098380,-0.200930287,-0.070762195,
0.000000000,
-4.329458534, 0.110780340, 0.387452143,-0.042883267,-0.008324821,
-1.242275027,-1.112106935,-0.981938842,-0.851770750,-0.721602657,
-0.591434565,-0.461266472,-0.331098380,-0.200930287,-0.070762195,
0.000000000,
-4.329458534, 0.110780340, 0.387452143,-0.042883267,-0.008324821,
-1.242275027,-1.112106935,-0.981938842,-0.851770750,-0.721602657,
-0.591434565,-0.461266472,-0.331098380,-0.200930287,-0.070762195,
0.000000000,
-4.329458534, 0.110780340, 0.387452143,-0.042883267,-0.008324821,
-1.242275027,-1.112106935,-0.981938842,-0.851770750,-0.721602657,
-0.591434565,-0.461266472,-0.331098380,-0.200930287,-0.070762195,
0.000000000,
-3.500000000, 0.120000000, 0.360000000,-0.007003006, 0.104718992,
-1.100000000, 0.060000000, 0.224925174, 0.003157103, 0.236539477),
stringsAsFactors=FALSE )
rcoef <- data.frame(response = c(rep(1,37),rep(2,3),rep(3,3),4,4),
lp = c(1,1,1,1,1,1,
2,2,2,2,2,2,
3,3,3,3,3,3,
4,4,4,4,4,4,
5,5,5,5,5,5,
6,6,6,6,6,6,
7,
8,9,10,
11,12,13,
14,15),
term = c('(Intercept)','age','male','educ','apoe4','ntests', # State 1 (Intercept).
'(Intercept)','age','male','educ','apoe4','ntests', # State 2 (Intercept).
'(Intercept)','age','male','educ','apoe4','ntests', # State 3 (Intercept).
'(Intercept)','age','male','educ','apoe4','ntests', # State 4 (Intercept).
'(Intercept)','age','male','educ','apoe4','ntests', # State 5 (Intercept).
'(Intercept)','age','male','educ','apoe4','ntests', # State 6 (Intercept).
'(Intercept)',
rep('(Intercept)',8)),
coef = c( 1,2,3,4,5,6,
7,2,3,4,5,6,
8,2,3,4,5,6,
9,2,3,4,5,6,
10,2,3,4,5,6,
11,2,3,4,5,6,
12,
13,14,15,
16,17,18,
19,20),
init = c( 0.538414931,-0.014554096,-0.089207557, 0.058688151,-0.004627584, 0.057826362,
0.178435764,-0.014554096,-0.089207557, 0.058688151,-0.004627584, 0.057826362,
-0.672421219,-0.014554096,-0.089207557, 0.058688151,-0.004627584, 0.057826362,
-1.827065032,-0.014554096,-0.089207557, 0.058688151,-0.004627584, 0.057826362,
-2.341332296,-0.014554096,-0.089207557, 0.058688151,-0.004627584, 0.057826362,
-5.208797471,-0.014554096,-0.089207557, 0.058688151,-0.004627584, 0.057826362,
-1.261838927,
-1.084010116, 0.110684536,-2.240498159,
2.655601472, 2.498948044,-3.869343130,
-4.712799832,-2.163822374), stringsAsFactors=FALSE )
pcoef <- data.frame(lp = c(1,2,3),
term = c('(Intercept)','(Intercept)','(Intercept)'),
coef = c(1,2,3),
init = c(-3.9835,-4.9499,-6.5185), stringsAsFactors=FALSE )
cmap <- matrix(c( 1,12,17,22,27,32,37,42,42,42,42,58,63, 68,74,75,76,77,78,79, 80:87, 88,89,90,
2,13,18,23,28,33,38,43,43,43,43,59,64, 69,69,69,69,69,69, 0, rep(0,8), 0, 0, 0,
3,14,19,24,29,34,39,44,44,44,44,60,65, 70,70,70,70,70,70, 0, rep(0,8), 0, 0, 0,
4,15,20,25,30,35,40,45,45,45,45,61,66, 71,71,71,71,71,71, 0, rep(0,8), 0, 0, 0,
5,16,21,26,31,36,41,46,46,46,46,62,67, 72,72,72,72,72,72, 0, rep(0,8), 0, 0, 0,
6, 0, 0, 0, 0, 0, 0,47,47,47,47, 0, 0, 73,73,73,73,73,73, 0, rep(0,8), 0, 0, 0,
7, 0, 0, 0, 0, 0, 0,48,48,48,48, 0, 0, 0, 0, 0, 0, 0, 0, 0, rep(0,8), 0, 0, 0,
8, 0, 0, 0, 0, 0, 0,49,49,49,49, 0, 0, 0, 0, 0, 0, 0, 0, 0, rep(0,8), 0, 0, 0,
9, 0, 0, 0, 0, 0, 0,50,50,50,50, 0, 0, 0, 0, 0, 0, 0, 0, 0, rep(0,8), 0, 0, 0,
10, 0, 0, 0, 0, 0, 0,51,51,51,51, 0, 0, 0, 0, 0, 0, 0, 0, 0, rep(0,8), 0, 0, 0,
11, 0, 0, 0, 0, 0, 0,52,52,52,52, 0, 0, 0, 0, 0, 0, 0, 0, 0, rep(0,8), 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,53,53,53,53, 0, 0, 0, 0, 0, 0, 0, 0, 0, rep(0,8), 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,54,54,54,54, 0, 0, 0, 0, 0, 0, 0, 0, 0, rep(0,8), 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,55,55,55,55, 0, 0, 0, 0, 0, 0, 0, 0, 0, rep(0,8), 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,56,56,56,56, 0, 0, 0, 0, 0, 0, 0, 0, 0, rep(0,8), 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,57,57,57,57, 0, 0, 0, 0, 0, 0, 0, 0, 0, rep(0,8), 0, 0, 0
), 16, 31, byrow=TRUE)
lpibM <- c( -1.3, -.5, log((.4/3)^2))
lpibSd <- c( .2, .2, 2)
thickM <- c( 3.14, 2.34, log((.4/3)^2))
thickSd <- c( .2, .2, 2)
# Death rate adjustment for women.
# 507.1/590.4 = 0.859
# log(exp(-4.26)* 0.859) = -4.411986
# Death rate adjustment for men.
# 724.8/817.3 = 0.887
# log(exp(-4.26+.44)* 0.887) = -3.93991
# -3.93991--4.411986 = 0.472076
means <- matrix(c( NA,-3,-3,-3,-3,-3,-3,-4.41,-4,-4, rep(-.28,4),-7.3,-7.3,-.7, lpibM,thickM,-3,-3,-3.5,-6,-6,
0,.1,.1,.1,.1,.1,.1, .094,.1,.1, 0,NA,NA,NA, NA, NA, NA, rep(NA,11),
0, 0, 0, 0, 0, 0, 0, .47, 0, 0, 0,NA,NA,NA, NA, NA, NA, rep(NA,11),
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,NA,NA,NA, NA, NA, NA, rep(NA,11),
-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,NA,NA,NA, NA, NA, NA, rep(NA,11),
-4,NA,NA,NA,NA,NA,NA, -.75,NA,NA, 0,NA,NA,NA, NA, NA, NA, rep(NA,11),
-3,NA,NA,NA,NA,NA,NA, -.60,NA,NA, NA,NA,NA,NA, NA, NA, NA, rep(NA,11),
-2,NA,NA,NA,NA,NA,NA, 0,NA,NA, NA,NA,NA,NA, NA, NA, NA, rep(NA,11),
-1,NA,NA,NA,NA,NA,NA, 0,NA,NA, NA,NA,NA,NA, NA, NA, NA, rep(NA,11),
0,NA,NA,NA,NA,NA,NA, 0,NA,NA, NA,NA,NA,NA, NA, NA, NA, rep(NA,11),
1,NA,NA,NA,NA,NA,NA, 0,NA,NA, NA,NA,NA,NA, NA, NA, NA, rep(NA,11),
2,NA,NA,NA,NA,NA,NA, 0,NA,NA, NA,NA,NA,NA, NA, NA, NA, rep(NA,11),
NA,NA,NA,NA,NA,NA,NA, 0,NA,NA, NA,NA,NA,NA, NA, NA, NA, rep(NA,11),
NA,NA,NA,NA,NA,NA,NA, 0,NA,NA, NA,NA,NA,NA, NA, NA, NA, rep(NA,11),
NA,NA,NA,NA,NA,NA,NA, 0,NA,NA, NA,NA,NA,NA, NA, NA, NA, rep(NA,11),
NA,NA,NA,NA,NA,NA,NA, 0,NA,NA, NA,NA,NA,NA, NA, NA, NA, rep(NA,11)
), 16, 28, byrow=TRUE)
prior.means <- c(means[!is.na(means)])
if(sensitivity_test=='no_scale'){
stdev <- matrix(c( NA, 1, 1, 1, 1, 1, 1, .1, 1, 1, rep(.75,4), 3, 3, 2, lpibSd,thickSd,1,1,.25,1,1,
1,.05,.05,.05,.05,.05,.05, .01,.05,.05, 1,NA,NA,NA,NA,NA,NA, rep(NA,11),
.1, 1, 1, 1, 1, 1, 1, .05, 1, 1, 1,NA,NA,NA,NA,NA,NA, rep(NA,11),
1, .1, .1, .1, .1, .1, .1, .1, .1, .1, 1,NA,NA,NA,NA,NA,NA, rep(NA,11),
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,NA,NA,NA,NA,NA,NA, rep(NA,11),
2, NA, NA, NA, NA, NA, NA,.375, NA, NA, 1,NA,NA,NA,NA,NA,NA, rep(NA,11),
2, NA, NA, NA, NA, NA, NA, .3, NA, NA, NA,NA,NA,NA,NA,NA,NA, rep(NA,11),
2, NA, NA, NA, NA, NA, NA, 0, NA, NA, NA,NA,NA,NA,NA,NA,NA, rep(NA,11),
2, NA, NA, NA, NA, NA, NA, 0, NA, NA, NA,NA,NA,NA,NA,NA,NA, rep(NA,11),
3, NA, NA, NA, NA, NA, NA, 0, NA, NA, NA,NA,NA,NA,NA,NA,NA, rep(NA,11),
3, NA, NA, NA, NA, NA, NA, 0, NA, NA, NA,NA,NA,NA,NA,NA,NA, rep(NA,11),
3, NA, NA, NA, NA, NA, NA, 0, NA, NA, NA,NA,NA,NA,NA,NA,NA, rep(NA,11),
NA, NA, NA, NA, NA, NA, NA, 0, NA, NA, NA,NA,NA,NA,NA,NA,NA, rep(NA,11),
NA, NA, NA, NA, NA, NA, NA, 0, NA, NA, NA,NA,NA,NA,NA,NA,NA, rep(NA,11),
NA, NA, NA, NA, NA, NA, NA, 0, NA, NA, NA,NA,NA,NA,NA,NA,NA, rep(NA,11),
NA, NA, NA, NA, NA, NA, NA, 0, NA, NA, NA,NA,NA,NA,NA,NA,NA, rep(NA,11)
), 16, 28, byrow=TRUE)
} else if(sensitivity_test=='10x_scale'){
stdev <- matrix(c( NA, 1, 1, 1, 1, 1, 1, .01, 1, 1, rep(.75,4), 3, 3, 2, lpibSd,thickSd,1,1,.25,1,1,
1,.05,.05,.05,.05,.05,.05, .001,.05,.05, 1,NA,NA,NA,NA,NA,NA, rep(NA,11),
.1, 1, 1, 1, 1, 1, 1, .005, 1, 1, 1,NA,NA,NA,NA,NA,NA, rep(NA,11),
1, .1, .1, .1, .1, .1, .1, .01, .1, .1, 1,NA,NA,NA,NA,NA,NA, rep(NA,11),
1, 1, 1, 1, 1, 1, 1, .1, 1, 1, 1,NA,NA,NA,NA,NA,NA, rep(NA,11),
2, NA, NA, NA, NA, NA, NA,.0375, NA, NA, 1,NA,NA,NA,NA,NA,NA, rep(NA,11),
2, NA, NA, NA, NA, NA, NA, .03, NA, NA, NA,NA,NA,NA,NA,NA,NA, rep(NA,11),
2, NA, NA, NA, NA, NA, NA, 0, NA, NA, NA,NA,NA,NA,NA,NA,NA, rep(NA,11),
2, NA, NA, NA, NA, NA, NA, 0, NA, NA, NA,NA,NA,NA,NA,NA,NA, rep(NA,11),
3, NA, NA, NA, NA, NA, NA, 0, NA, NA, NA,NA,NA,NA,NA,NA,NA, rep(NA,11),
3, NA, NA, NA, NA, NA, NA, 0, NA, NA, NA,NA,NA,NA,NA,NA,NA, rep(NA,11),
3, NA, NA, NA, NA, NA, NA, 0, NA, NA, NA,NA,NA,NA,NA,NA,NA, rep(NA,11),
NA, NA, NA, NA, NA, NA, NA, 0, NA, NA, NA,NA,NA,NA,NA,NA,NA, rep(NA,11),
NA, NA, NA, NA, NA, NA, NA, 0, NA, NA, NA,NA,NA,NA,NA,NA,NA, rep(NA,11),
NA, NA, NA, NA, NA, NA, NA, 0, NA, NA, NA,NA,NA,NA,NA,NA,NA, rep(NA,11),
NA, NA, NA, NA, NA, NA, NA, 0, NA, NA, NA,NA,NA,NA,NA,NA,NA, rep(NA,11)
), 16, 28, byrow=TRUE) * 10
}
prior.std <- c(stdev[!is.na(stdev)])
if(args[3] == 'Bayesian'){
source('mcmcRoutine.r')
splineParam = 47:57
designMat <- as.matrix( cbind( '(Intercept)'=rep(1,nrow(DATA)), DATA[,c('iage','male','educ','apoe4')]) )
cubicSplineMat <- as.matrix( DATA[,c('male','educ','apoe4','c1_amyl','c2_amyl','c3_amyl','c4_amyl','c5_amyl',
'c6_amyl','c7_amyl','c8_amyl')])
splineMat <- as.matrix( cbind( '(Intercept)'=rep(1,nrow(DATA)), DATA[,c('iage','male','educ','apoe4','c1',
'c2','c3','c4','c5','c6','c7','c8',
'c9','c10','c11')]) )
Output <- hmm(hbind(age, mmse, lpib, thickness, DemStatus) ~ 1 + iage + age + male + educ + apoe4 + ntests +
c1 + c2 + c3 + c4 + c5 + c6 + c7 + c8 + c9 + c10 + c11 + c1_amyl + c2_amyl + c3_amyl +
c4_amyl + c5_amyl + c6_amyl + c7_amyl + c8_amyl, data=DATA, scale=FALSE,
mc.cores=NCORES, entry=c(1,1,1,1,0,0,0), id=ptnum, qmatrix=qmat, qcoef=qcoef, death=7,
rfun=list(MMSEResp, lpibResp, thickResp, demResp), otype=obstype, rcoef=rcoef, pfun=pfun,
pcoef=pcoef, mfun=mcmcRoutine, mpar=list(cmap=cmap, splineParam=splineParam, steps=STEPS,
burnin=BURNIN, prior.means=prior.means, prior.std=prior.std, designMat=designMat,
splineMat=splineMat, cubicSplineMat=cubicSplineMat))
} else if(args[3] == 'ML'){
Output <- hmm(hbind(age, mmse, lpib, thickness, DemStatus) ~ 1 + iage + age + male + educ + apoe4 + ntests +
c1 + c2 + c3 + c4 + c5 + c6 + c7 + c8 + c9 + c10 + c11 + c1_amyl + c2_amyl + c3_amyl +
c4_amyl + c5_amyl + c6_amyl + c7_amyl + c8_amyl, data=DATA, scale=FALSE,
mc.cores=NCORES, entry=c(1,1,1,1,10^-8,10^-8,10^-8), id=ptnum, qmatrix=qmat, qcoef=qcoef,
death=7, rfun=list(MMSEResp, lpibResp, thickResp, demResp), otype=obstype, rcoef=rcoef,
pfun=pfun, pcoef=pcoef, mfun=optim, mpar=list(control=list(fnscale= -1, maxit=100),
method='BFGS', gr= "hmmgrad", hessian=TRUE))
}
save(Output, file=paste0(Dir,'Output',args[1],'.rda'))
|
c4e45ab427d1ffa68d1f3ffcd675377dd19d9dd3
|
88b5925e6cd7c318e82cbedd429bdad8d6229c72
|
/Rpackage/scrbook/R/scr2secr.R
|
dcd8589369691963b897416f3dbffc34f6087b40
|
[] |
no_license
|
jaroyle/scrbook
|
290a6055cbcc6a34b2915a6267cbddbfab632a06
|
6554f7cf3af819870a001022a25c020379db475f
|
refs/heads/master
| 2021-03-19T16:02:26.707679
| 2017-12-19T15:05:35
| 2017-12-19T15:05:35
| 2,988,377
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 429
|
r
|
scr2secr.R
|
scr2secr <-
function(scrtraps,type="proximity"){
hold<-rep(NA,nrow(scrtraps))
for(i in 1:nrow(scrtraps)){
hold[i]<-paste(scrtraps[i,4:ncol(scrtraps)],collapse="")
}
traps1<- cbind(scrtraps[,1:3],"usage"=hold)
#
# to make use of the trap operation information you have to do this I think:
#
write.table(traps1, "traps.txt", row.names=FALSE, col.names=FALSE)
trapfile2<-read.traps("traps.txt",detector=type)
return(trapfile2)
}
|
5bc6d668b70087a0379d1847d64454bf6a55e57c
|
f84139438cc48d29c45d14596638d5027ca640af
|
/man/stretch.Rd
|
7d990a53d05eb64fcee4965e31788394cedc539f
|
[] |
no_license
|
cran/currentSurvival
|
15c99ed83f3d31b6e9efab253f1cb5147ceb64a4
|
076bfc7cc68a49acedd51befa0bcab02a165e674
|
refs/heads/master
| 2022-06-05T23:53:27.567904
| 2022-05-12T06:20:02
| 2022-05-12T06:20:02
| 17,695,347
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,407
|
rd
|
stretch.Rd
|
\name{stretch}
\alias{stretch}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Assigns Survival Estimates to Each Day of the Follow-up }
\description{
This is an internal function and is not usually called by user. \cr
This function assigns survival estimates to each day of the follow-up.
}
\usage{
stretch(S, maxx)
}
\arguments{
\item{S}{ a list containing: \cr
x - the time points in which the survival curve has a step \cr
y - survival estimates at the time points in which the survival curve has a step }
\item{maxx}{ maximum follow-up time in days }
}
\value{ a list containing the following elements:
\item{x}{ days from 0 to maximum follow-up time \env{maxx} }
\item{y}{ survival estimates at each day }
}
\author{
Eva Janousova, Tomas Pavlik \cr
Institute of Biostatistics and Analyses \cr
Masaryk University, Brno, Czech Republic \cr
\email{ janousova@iba.muni.cz }
}
\references{
Pavlik T., Janousova E., Pospisil Z., et al. (2011). Estimation of current cumulative incidence of leukaemia-free patients and current leukaemia-free survival in chronic myeloid leukaemia in the era of modern pharmacotherapy. \emph{BMC Med Res Methodol} \bold{11}:140.
}
\seealso{
\code{\link{clfs}}
}
\examples{
# This is an internal function and is not usually called by user.
}
\keyword{ survival }
|
a56745935d3c4f56cc5f1d1465bec2e863bbdebf
|
005bb9edaf643be9c8548d803483628c80cc0225
|
/second_fall_experiment/scripts/clay_R_scripts/crap/scale_data_analysis_3.R
|
ffc7efd422dac5cf90996676ab431dcc8a2aaf8e
|
[] |
no_license
|
sean-gl/2020_greenhouse
|
16b35b6b035a1926dc8858c7d0b2eba6b8dbe864
|
691c3923c75eea1bd57b8d218b343e8fdc10c33c
|
refs/heads/master
| 2021-05-22T00:22:46.456072
| 2020-05-25T17:28:54
| 2020-05-25T17:28:54
| 252,879,077
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,784
|
r
|
scale_data_analysis_3.R
|
rm(list = ls())
wd <- "/home/sean/Documents/Clay/greenhouse/2019 greenhouse data/experiment2/scale_output/"
setwd(wd)
require(ggplot2)
require(lubridate)
require(plyr)
require(dplyr)
require(tidyr)
###
### ------Read and process data
###
# read in files
files <- dir(wd)
files <- files[grepl('.csv', files)]
files
datList <- lapply(files, function(x) read.csv(x, header = F, skipNul = F, stringsAsFactors = F))
names(datList) <- files
for(i in 1:length(datList)) {
datList[[i]][,4] <- files[i]
colnames(datList[[i]]) <- c('scale','timestamp','weight','file')
datList[[i]]$timestamp <- as.POSIXct(datList[[i]]$timestamp, format="%Y-%m-%d %H:%M:%S", tz = 'MST')
}
### ------Preliminary combine data and check for duplicate rows
scaledat <- do.call(rbind, datList)
rownames(scaledat) <- NULL
anyDuplicated(scaledat[,c('scale','timestamp')])
### ------ Fix time zone issues with certain csv files
# NOTE: This file has a "jump back" 1 hour for daylight savings time
head(datList$`scale_output_wet_moderate_11-04.csv`)
tail(datList$`scale_output_wet_moderate_11-04.csv`)
x = datList$`scale_output_wet_moderate_11-04.csv`
x$d = c(0, diff(x$timestamp))
which(x$d == -3570) # time set back 1 hour here (3600 seconds minus 30 seconds = 3570)
x$timestamp[57737:nrow(x)] <- x$timestamp[57737:nrow(x)] + 3600
x[57720:57750,]
x$d <- NULL
datList$`scale_output_wet_moderate_11-04.csv` <- x
rm(x)
head(datList$`scale_output_wet_moderate_11-04.csv`)
tail(datList$`scale_output_wet_moderate_11-04.csv`)
# Re-Combine all data
scaledat <- do.call(rbind, datList)
rownames(scaledat) <- NULL
## Need to convert pre-baseline data from MDT to MST by subtracting 1 hour
# timestamps before 2019-10-23 17:00 MDT are the following files:
MDT_files <- c('scale_output_wet_moderate_10-08.csv',
'scale_output_wet_moderate_10-22.csv',
'scale_output_wet_moderate_10-23_MDT.csv',
'scale_output_dry_10-4.csv',
'scale_output_dry_10-17.csv',
'scale_output_dry_10-23.csv')
head(scaledat[scaledat$file == "scale_output_wet_moderate_10-23_MDT.csv", ])
ind <- scaledat$file %in% MDT_files
scaledat$timestamp[ind] <- scaledat$timestamp[ind] - 3600
dups <- duplicated(scaledat[,c('scale','timestamp')])
which(dups)
View(scaledat[dups,])
# remove one duplicated row
scaledat <- scaledat[!dups, ]
head(scaledat[scaledat$file == "scale_output_wet_moderate_10-23_MDT.csv", ])
### ------ Change data types and further data "massaging"
scaledat$scale <- factor(scaledat$scale)
# NOTE: these are blocks, not treatments!!
scaledat[scaledat$scale %in% c(1:4, 15), 'treatment'] <- 'W'
scaledat[scaledat$scale %in% 5:8, 'treatment'] <- 'M'
scaledat[scaledat$scale %in% c(9, 10, 12, 14, 16), 'treatment'] <- 'D'
scaledat$treatment <- factor(scaledat$treatment)
scaledat$date <- date(scaledat$timestamp)
scaledat$hour <- hour(scaledat$timestamp)
scaledat$minute <- minute(scaledat$timestamp)
scaledat$timeofday <- scaledat$hour * 60 + scaledat$minute
# remove row with no scale number
which(is.na(scaledat$scale))
scaledat[is.na(scaledat$scale),]
scaledat <- scaledat[!is.na(scaledat$scale), ]
# remove erroneous dates
summary(scaledat$date)
# these are bad data (I checked original csv file, date is wrong there), remove them
ind <- scaledat$date == '2019-02-14'
scaledat <- scaledat[!ind, ]
# order by timestamp and scale
scaledat <- scaledat[with(scaledat, order(scale, timestamp)), ]
# add relative_weight (relative to starting weight)
for(s in unique(scaledat$scale)) {
initial_wt <- scaledat$weight[scaledat$scale == s][1]
print(initial_wt)
scaledat$rel_weight[scaledat$scale == s] <- scaledat$weight[scaledat$scale == s] - initial_wt
}
# Table dates adn treatments
with(scaledat[scaledat$date <= '2019-11-11', ], table(date, treatment))
###
### Plotting
###
# plot all data
ggplot(scaledat, aes(x=timestamp, y=rel_weight, color=scale)) +
geom_point() +
facet_wrap(~treatment) +
scale_x_datetime(date_labels = "%H", breaks = '12 hours') +
NULL
# ylim
### plot subsets
# date range
subscale <- subset(scaledat, date >= '2019-11-24')
# keep only every 5 minutes
rowsToKeep <- seq(1, nrow(scaledat), 10)
subscale <- scaledat[rowsToKeep, ]
subscale <- subset(subscale, date >= '2019-11-04')
ggplot(subscale, aes(x=timestamp, y=weight, color=scale)) +
# geom_point() +
geom_line() +
facet_wrap(~treatment) +
ylim(c(9,15)) +
scale_x_datetime(date_labels = "%d", breaks = '1 day')
#### fit a line and calculate slope every 10 minutes (n=20 readings)
testdat <- subset(scaledat, date >= '2019-11-15' & scale == 10)
# plot(weight~timestamp, testdat)
min_interval = 30
s <- seq(1, nrow(testdat), min_interval*2)
s2 <- testdat$timestamp[s]
head(s)
head(s2)
z <- lapply(s, function(x) {
mod = lm(weight ~ timestamp, testdat[x:(x+(min_interval*2)-1), ])
slope = coef(mod)[2]
rsq = summary(mod)$adj.r.squared
dat = data.frame(m=slope, arsq=rsq, timestamp = testdat$timestamp[x])
return(dat)
})
d = do.call(rbind, z)
rm(z)
d$m = abs(d$m * 3600) # convert to L/hr
ggplot(d, aes(x=timestamp, y=m)) +
geom_point() +
geom_line() +
ylim(c(0, 0.15)) +
# ylim(c(-4e-5, 1e-5)) +
# scale_x_datetime(date_labels = "%H", breaks = '4 hours') +
scale_x_datetime(date_labels = "%d", breaks = '1 day') +
ggtitle('W-10 (drought) transpiration rate, L/hr', subtitle = 'Nov. 14-19')
plot(s2, z, ylim = c(-4e-5, 1e-5))
zpos = abs(z)
zpos = zpos / max(zpos, na.rm = T)
plot(s2, zpos)
# plot data between 6 and 9 pm only
scaleSub <- subset(scaledat, hour >= 17 & hour <= 20 & date == '2019-10-28')
scaleSub <- subset(scaledat, hour >= 17 & hour <= 20)
ggplot(scaleSub, aes(x=timestamp, y=rel_weight, color=scale)) +
geom_point() +
facet_wrap(~treatment) +
scale_x_datetime(date_labels = "%H", breaks = '1 hours') +
NULL
###
### Analysis: Water given each day
###
# calculate max - min during 1-hr interval surrounding watering
waterGiven <- ddply(scaledat, .(date, scale, treatment), function(x) {
# For dates < 10/28, water was given at 19:00 MST. For dates >= 10/28, water was at 20:05 MST.
if(x$date < '2019-10-28') {
w <- subset(x, timeofday >= 17*60 + 30 & timeofday <= 18*60 + 30)
} else {
if(x$date < '2019-11-27') {
w <- subset(x, timeofday >= 18*60 + 35 & timeofday <= 19*60 + 35)
} else {
w <- subset(x, timeofday >= 19*60 + 35 & timeofday <= 20*60 + 35)
}
}
return(setNames(max(w$weight) - min(w$weight), 'water_given'))
})
# summarize by treatment
waterGiven %>%
filter(water_given != '-Inf') %>%
group_by(treatment) %>%
summarize(mean_water_given = mean(water_given), sd_water_given = sd(water_given))
# summarize by scale
waterGiven %>%
filter(water_given != '-Inf') %>%
group_by(scale) %>%
summarize(mean_water_given = mean(water_given), sd_water_given = sd(water_given))
### Compute water used daily, between 2 am and 8 pm
head(scaledat)
scaleWide <- subset(scaledat, date >= "2019-11-15")
# scaleWide <- scaledat
scaleWide <- scaleWide[ , c('scale','timestamp','weight')]
scaleWide <- spread(scaleWide, key = scale, value = weight)
head(scaleWide)
formalArgs(SummarizeTimeseries)
scaleSummary <- SummarizeTimeseries(dat = scaleWide,
tsCol = 'timestamp',
# measCols = as.character(c(1:10, 12, 14:16)),
measCols = as.character(c(1:10, 12, 14)),
interval = 15,
fillGaps = TRUE)
# dat = scaleWide
# tsCol = 'timestamp'
# measCols = as.character(c(1:10, 12, 14:16))
# interval = 15
# fillGaps = TRUE
# GET 5-MINUTE MEAN VALUES
scaleMeans <- scaleSummary$means
View(scaleMeans)
View(scaleSummary$sampleSizes)
# NOTE THIS IS A ROUGH APPROXIMATION...NEEDTO REFINE....
dailyUseWide <- ddply(scaleMeans, .(date), function(x) {
# startingValues <- x[x$interval == 60*4, as.character(c(1:10, 12, 14:16))]
# endingValues <- x[x$interval == 60*19, as.character(c(1:10, 12, 14:16))]
startingValues <- x[x$interval == 60*4, as.character(c(1:10, 12, 14))]
endingValues <- x[x$interval == 60*19, as.character(c(1:10, 12, 14))]
return(startingValues - endingValues)
})
# convert to long for plotting
# dailyUse <- gather(dailyUseWide, 'scale', 'water_use', 2:15)
dailyUse <- gather(dailyUseWide, 'scale', 'water_use', as.character(c(1:10, 12, 14)))
dailyUse$treatment[dailyUse$scale %in% c(1:4, 15)] <- 'W'
dailyUse$treatment[dailyUse$scale %in% 5:8] <- 'M'
dailyUse$treatment[dailyUse$scale %in% c(9,10,12,14,16)] <- 'D'
ggplot(dailyUse, aes(x = date, y = water_use, color = scale, group = scale)) +
geom_line() +
geom_point() +
facet_wrap(~treatment) +
scale_x_date(date_labels = "%m-%d", breaks = '7 days')
dailyUseMeans <- ddply(dailyUse, .(date, treatment), function(x) {
setNames(mean(x$water_use, na.rm = T), 'mean_water_use')
})
ggplot(dailyUseMeans, aes(x=date, y=mean_water_use, color=treatment)) +
geom_line() + geom_point() +
scale_x_date(date_labels = "%m-%d", breaks = '7 days')
dailyUseMeans %>%
filter(date >= "2019-11-11" & date != '2019-11-18') %>%
group_by(treatment) %>%
summarize(mean = mean(mean_water_use, na.rm = T))
# convert WaterGiven to wide format
waterGivenWide <- waterGiven %>%
filter(date >= '2019-11-04') %>%
select(date, scale, water_given) %>%
spread(key = 'scale', value = 'water_given')
givenMinusUsed <- waterGivenWide - dailyUseWide
givenMinusUsed$date <- waterGivenWide$date
givenMinusUsedLong <- gather(givenMinusUsed, 'scale', 'given_used', 2:ncol(givenMinusUsed))
givenMinusUsedLong$treatment[givenMinusUsedLong$scale %in% c(1:4, 15)] <- 'dry'
givenMinusUsedLong$treatment[givenMinusUsedLong$scale %in% 5:8] <- 'moderate'
givenMinusUsedLong$treatment[givenMinusUsedLong$scale %in% c(9,10,12,14,16)] <- 'wet'
ggplot(givenMinusUsedLong, aes(x = date, y = given_used, color = scale)) +
geom_line() +
geom_point() +
facet_wrap(~treatment)
#### OLD CODE .....
# plot date range
subdat <- scaledat[scaledat$timestamp > '2019-10-01 00:00' & scaledat$timestamp < '2019-10-05 20:00', ]
subdat <- scaledat[scaledat$timestamp > '2019-08-29 02:00' & scaledat$timestamp < '2019-08-30 19:10', ]
# see data from 7 am to 7 pm only
subdat <- scaledat[scaledat$hour >= 7 & scaledat$hour <= 19, ]
ggplot(subdat, aes(x=timestamp, y=weight, color=scale)) +
geom_point() +
facet_wrap(~treatment) +
scale_x_datetime(date_labels = "%H", breaks = '2 hours') +
scale_y_continuous(limits = c(10, 15))
### ----- Analysis: Calculate daily minima, this is indicative of biomass gain (??)
# get daily min weights by scale
daily_min <- ddply(scaledat, .(date, scale), function(x) setNames(min(x$weight), 'weight'))
# plot them
ggplot(daily_min, aes(x=date, y=weight, color=scale)) +
geom_line()
# get daily differences in min weight
ddply(daily_min, .(scale), function(x) diff(x$weight))
## Takeaway: Hard to detect any biomass differences between days
## max minus min (this should be indicative of the amount of water given each day)
daily_maxmin <- ddply(scaledat, .(date, scale), function(x) setNames(max(x$weight) - min(x$weight), 'weight_diff'))
# plot them
ggplot(daily_maxmin, aes(x=date, y=weight_diff, color=scale)) +
geom_line()
require(dplyr)
# filter out dates with erratic data
daily_maxmin %>% filter(date < '2019-09-23' & date > '2019-09-17') %>% group_by(scale) %>% summarise(Mean=mean(weight_diff))
x = daily_maxmin %>% filter(date < '2019-09-23' & date > '2019-09-17') %>% group_by(scale)
boxplot(x$weight_diff ~ x$scale)
x$trt <- 'none'
x$trt[x$scale %in% 1:4] <- 'wet'
x$trt[x$scale %in% 5:8] <- 'mod'
x$trt[x$scale %in% 9:14] <- 'dry'
boxplot(x$weight_diff ~ x$trt)
### ----- Analysis:
|
bd8aff71da8345a611a9a8a46c1050f5be270686
|
2e731f06724220b65c2357d6ce825cf8648fdd30
|
/UniIsoRegression/inst/testfiles/pre_2d_l2_inc/libFuzzer_pre_2d_l2_inc/pre_2d_l2_inc_valgrind_files/1612736876-test.R
|
32eded7b6d1d70fc24e73264b86125ac52f75df3
|
[] |
no_license
|
akhikolla/updatedatatype-list1
|
6bdca217d940327d3ad42144b964d0aa7b7f5d25
|
3c69a987b90f1adb52899c37b23e43ae82f9856a
|
refs/heads/master
| 2023-03-19T11:41:13.361220
| 2021-03-20T15:40:18
| 2021-03-20T15:40:18
| 349,763,120
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 312
|
r
|
1612736876-test.R
|
testlist <- list(data = structure(0, .Dim = c(1L, 1L)), w = structure(c(1.92302623483659e-314, 4.94065645841247e-324, 2.87111428111265e-319, 8.20251227200115e-304, 2.71615461243555e-312, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 3L)))
result <- do.call(UniIsoRegression:::pre_2d_l2_inc,testlist)
str(result)
|
20412e4056441080be6b70dff62be906462e79c5
|
5ad706fdaa7ec49564fe8f48f37aafd44ebed397
|
/R/import.pems.working.R
|
99f377b720ec5245825c0a45c9268d963186f643
|
[] |
no_license
|
cran/pems.utils
|
34d79305d0cbfdddc5f798e49381a264eee3d864
|
8763120b096dc7fe37482f47f0f6188931ab404a
|
refs/heads/master
| 2021-07-07T11:48:43.028189
| 2021-04-25T05:40:02
| 2021-04-25T05:40:02
| 52,953,467
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,699
|
r
|
import.pems.working.R
|
##########################
##########################
#various pems imports
##########################
##########################
#additional
#work in progress
#############################
#############################
##importRoyalTek2PEMS
#############################
#############################
#quick importer for Leed City Council GPS
#started 05-11-2011
#kr v0.0.2 (09-11-2011)
#three functions
###################
#importRoyalTek2PEMS
#importRoyalTekTXT
#importRoyalTekNMEA
#
#currently only exporting the wrapper
###################
#to do
###################
#unit assignment
##
#lat, long correction
#for north east, west, south
#
###################
#notes
###################
#
###################
#importRoyalTek2PEMS
###################
#parent function
#(TXT/NMEA switch)
importRoyalTek2PEMS <- function(file.name = file.choose(), file.type = c("special", "txt", "nmea"),
vbox = "RoyalTEk", history = NULL, constants = NULL, ... ){
#setup
this.call <- match.call()
fun.name <- "importRoyalTek2PEMS"
file.type <- checkOption(file.type[1], formals(importRoyalTek2PEMS)$file.type,
"file.type", "known import methods",
fun.name = fun.name)
#set file.type if null, guessimate format
if(file.type == "special"){
temp <- tolower(substr(file.name, nchar(file.name)-3, nchar(file.name)))
if(temp == ".txt")
file.type <- "txt"
if(temp == "nmea")
file.type <- "nmea"
}
#import is type valid
ans <- NULL
units <- NA
if(is.character(file.type)){
if(file.type == "txt")
ans <- importRoyalTekTXT(file.name = file.name, ...)
if(file.type == "nmea")
ans <- importRoyalTekNMEA(file.name = file.name, ...)
}
#stop is not valid
if(is.null(ans))
stop(paste("\t In ", fun.name,"(...) selected file type not recognised", sep=""),
paste("\n\t [suggest setting file.type in call if known]", sep=""),
call. = FALSE, domain = NA)
output <- makePEMS(x = ans, units = NULL, constants = constants, history = history, ...)
output$vbox <- vbox
#reset history?
output$history[[length(output$history)]] <- this.call
#return output
invisible(output)
}
####################
#importRoyalTekTXT
####################
importRoyalTekTXT <- function(
file.name = file.choose(), n = NULL, to.lower = TRUE,
fields = c("Record", "Event Type", "Year", "Month", "Day",
"Hour", "Minute", "Second", "Latitude", "Longitude",
"Altitude", "PDOP", "HDOP", "Satellite No",
"Speed", "Direction" ),
info = c("Datalogs", "Date", "Time", "Device ID",
"About total Numbers"),
exclude = c(": ", "\\(KMs/hr\\)"),
...
){
#set up
if(is.null(n))
n <- -1L
#import
source <- readLines(file.name, n = n, ...)
#recover columns and strip field names
a <- sapply(fields, function(x){
temp <- source[grep(x, source)]
gsub(x, "", temp)})
#strip excludes
for(i in exclude)
a <- gsub(i, "", a)
#make data frame
a <- data.frame(a, stringsAsFactors = FALSE)
#get date
date <- paste(a$Year, a$Month, a$Day, sep="-")
date <- paste(date, paste(a$Hour, a$Minute, a$Second, sep="-"), sep=" ")
#make basic data numeric
a <- as.data.frame(cbind(date = date, a), stringsAsFactors = FALSE)
#set date format
a$date <- as.POSIXct(strptime(a$date, format = "%Y-%m-%d %H-%M-%OS", "GMT"))
a[,2:ncol(a)] <- apply(a[,2:ncol(a)], 2, as.numeric)
#name to lower?
if(to.lower)
names(a)<-tolower(names(a))
#get info
b <- sapply(info, function(x) source[grep(x, source)])
comment(a) <- paste(b)
#output
invisible(a)
}
#####################
#importRoyalTekNMEA
#####################
#quick importer for Leed City Council GPS
#NMEA handler
importRoyalTekNMEA <- function(
file.name = file.choose(), n = NULL, to.lower = TRUE,
fields = NULL, filter.method = NULL,
filter = "\\$GPGGA",
...
){
#set up
if(is.null(n))
n <- -1L
#import
source <- readLines(file.name, n = n, ...)
#filter
if(filter == "\\$GPGGA"){
if(is.null(filter.method))
filter.method <- c("del", "time", "lat", "f", "long", "f", "n", "n", "n", "n", "c", "c", "c", "c", "c")
if(is.null(fields))
fields <- c("time", "latitude", "North", "longitude", "West", "X", "X", "X", "X", "X", "X", "X", "X", "X")
}
#stop if invalid filter
if(is.null(filter.method) | is.null(fields))
stop("INVALID FILTER/FIELDS")
ans <- source[grep(filter[1], source)]
#make dataframe for filter
#comma delim currently hard coded
ans <- data.frame(do.call(rbind, strsplit(ans, ",")), stringsAsFactors = FALSE)
#drop deleted cases
ans <- ans[filter.method != "del"]
filter.method <- filter.method[filter.method != "del"]
#rename
names(ans) <- make.names(fields, unique = TRUE)
if(to.lower)
names(ans) <- tolower(names(ans))
#finish below
#validate below
#rationalise below
#get list of times
#work to do here
#need date for full time stamp
temp <- names(ans)[filter.method=="time"]
for(i in temp){
ans[, i] <- as.numeric(ans[, i])
}
#get list of lat, long
#needs validating
temp <- names(ans)[filter.method=="lat" | filter.method=="long"]
for(i in temp){
unit <- as.numeric(ans[, i])
ans[, i] <- floor(unit / 100)
unit <- unit - (ans[, i] * 100)
# ans[, i] <- ans[, i] + (floor(unit) / 60)
# ans[, i] <- ans[, i] + (unit - floor(unit)) / 60
ans[, i] <- ans[, i] + ((unit) / 60)
}
#get list of numerics
temp <- names(ans)[filter.method=="n"]
for(i in temp){
ans[, i] <- as.numeric(ans[, i])
}
#get list of characters
temp <- names(ans)[filter.method=="c"]
for(i in temp){
ans[, i] <- as.character(ans[, i])
}
#get list of factors
temp <- names(ans)[filter.method=="f"]
for(i in temp){
ans[, i] <- as.factor(ans[, i])
}
#output
invisible(ans)
}
|
e80cdcd07dcc8b42136fab5c0e03f0f54ebff9dc
|
bd204a62b4fcf1c6714cc66eaf08217b03a0a36d
|
/cyclistic_bike_share/scripts/data_summarize.R
|
bfe89a2a920fef2ffdee9b960a07673b49aa8090
|
[] |
no_license
|
labwilliam/data_analysis_projects
|
672bb4200b049b686f8440e810675efc7a706e7a
|
eb194d933cf96ab2212b1edfd0e7ce3958b56c27
|
refs/heads/main
| 2023-05-27T21:23:34.170078
| 2021-06-11T03:19:35
| 2021-06-11T03:19:35
| 375,850,046
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,189
|
r
|
data_summarize.R
|
# Create mode function
mode_func <- function(data){
unique_data <- unique(data)
tabulate_data <- tabulate(match(data, unique_data))
unique_data[tabulate_data == max(tabulate_data)]
}
# Create save function
save_file <- function(data, file_name){
write_csv(data, paste("../datasets/analyzed_datasets/", file_name, ".csv",
sep = ""))
}
# Summarizing data
# Read the aggregated data
data <- read_csv("../datasets/analyzed_datasets/aggregated_data.csv",
col_types = cols(start_station_id = col_character(),
end_station_id = col_character()))
# Identify the top 10 stations that members start a ride
member <- filter(data, membership == "member")
member <- count(member, start_station_id, name = "frequency", sort = TRUE)
member$membership = "member"
top_member <- head(member, 10)
# Identify the top 10 stations that casuals start a ride
casual <- filter(data, membership == "casual")
casual <- count(casual, start_station_id, name = "frequency", sort = TRUE)
casual$membership = "casual"
top_casual <- head(casual, 10)
# Complement the top stations of members with the casual
top_member_complete <- inner_join(select(top_member, "start_station_id"),
unique(casual), by = "start_station_id")
top_member_complete <- union(top_member, top_member_complete)
# Complement the top stations of casual with the members
top_casual_complete <- inner_join(select(top_casual, "start_station_id"),
unique(member), by = "start_station_id")
top_casual_complete <- union(top_casual, top_casual_complete)
# List the stations names
stations_names <- unique(select(data, "start_station_id", "start_station_name"))
# Add the stations names to top stations of members
top_member_complete <- inner_join(top_member_complete, stations_names,
by = "start_station_id")
top_member_complete <- top_member_complete[, c(1, 4, 2, 3)]
# Add the stations names to top stations of casuals
top_casual_complete <- inner_join(top_casual_complete, stations_names,
by = "start_station_id")
top_casual_complete <- top_casual_complete[, c(1, 4, 2, 3)]
# Save the frequency of top stations of members data as a CSV file
save_file(top_member_complete, "top_member")
# Save the frequency of top stations of casuals data as a CSV file
save_file(top_casual_complete, "top_casual")
# Group data by member relationship
data <- group_by(data, membership)
# Summarize the data
summarized_data <- summarize(data, min_ride_length = min(ride_length),
mean_ride_length = mean(ride_length),
max_ride_length = max(ride_length),
mode_day_week = mode_func(day_of_week))
frequency_rides <- count(data, membership, name = "frequency_rides")
summarized_data <- inner_join(summarized_data, frequency_rides,
by = "membership")
summarized_data <- summarized_data[, c(2, 3, 4, 5, 6, 1)]
save_file(summarized_data, "summarized_data")
# Frequency of rides by day
frequency_week <- count(data, day_of_week, name = "frequency")
frequency_week$day_of_week <- recode(frequency_week$day_of_week, "1" = "Sunday",
"2" = "Monday", "3" = "Tuesday",
"4" = "Wednesday", "5" = "Thursday",
"6" = "Friday", "7" = "Saturday")
frequency_week <- rename(frequency_week, weekday = day_of_week)
frequency_week <- frequency_week[, c(2, 3, 1)]
# Save the frequency of week data as a CSV file
save_file(frequency_week, "frequency_week")
# Frequency of rideables
frequency_rideables <- count(data, rideable_type, name = "frequency")
frequency_rideables$rideable_type <- str_replace(frequency_rideables$rideable_type, "_", " ")
frequency_rideables <- frequency_rideables[, c(2, 3, 1)]
# Save the frequency of rideables data as a CSV file
save_file(frequency_rideables, "frequency_rideables")
# Splitting datetime to date and time
tmp <- as_date(data$started_at)
#data$year <- format(tmp, "%Y")
#data$month <- format(tmp, "%m")
data$month_year <- format(tmp, "%m/%Y")
#data$day <- format(tmp, "%d")
#data$time <- format(data$started_at, format = "%H:%M:%S")
data$hour <- as.numeric(format(data$started_at, format = "%H"))
# Frequency of rides by month
frequency_rides_month <- count(data, month_year, name = "frequency")
frequency_rides_month <- frequency_rides_month[, c(2, 3, 1)]
# Save the frequency of rides by month data as a CSV file
save_file(frequency_rides_month, "frequency_rides_months")
# Frequency of rides by period of the day
period_time <- hour(hm("00:00", "06:00", "12:00", "18:00", "23:59"))
period_name <- c("Night", "Morning", "Afternoon", "Evening")
data$period <- cut(x = data$hour, breaks = period_time,
labels = period_name, include.lowest = TRUE)
frequency_period <- count(data, period, name = "frequency")
frequency_period <- frequency_period[, c(2, 3, 1)]
# Save the frequency of period of day data as a CSV file
save_file(frequency_period, "frequency_rides_period")
|
4b3e37e363ca4a46f9231c32ce5a2974fccce55b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sequoia/examples/writeSeq.Rd.R
|
f7d306a788ac0cec9699d94fd3c630a3bb6f5aaa
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 433
|
r
|
writeSeq.Rd.R
|
library(sequoia)
### Name: writeSeq
### Title: write sequoia output to excel or text files
### Aliases: writeSeq
### ** Examples
## Not run:
##D writeSeq(SeqList, OutFormat="xls", file="MyFile.xlsx")
##D
##D # add additional sheets to the excel file:
##D library(xlsx)
##D write.xlsx(MyData, file = "MyFile.xlsx", sheetName="ExtraData",
##D col.names=TRUE, row.names=FALSE, append=TRUE, showNA=FALSE)
## End(Not run)
|
6b8a0aadc4a5792298ec49e7d1f70241c2a4eac6
|
421ec6d784e9da250cb7d976cbacb8784cf782e4
|
/R/ParameterSet_methods.R
|
2398206194fed229db9360fac1fa12f767f816d9
|
[
"MIT"
] |
permissive
|
hadley/param6
|
b03ed91e2d5f1646d51c75c5490d25c69ece0ab3
|
7eb791f0685df4045f10b362a4e1cdd14dd6d40e
|
refs/heads/main
| 2023-08-14T10:48:56.854254
| 2021-07-28T20:23:31
| 2021-07-28T20:23:31
| 398,088,712
| 1
| 0
|
NOASSERTION
| 2021-08-19T22:22:13
| 2021-08-19T22:22:13
| null |
UTF-8
|
R
| false
| false
| 13,929
|
r
|
ParameterSet_methods.R
|
#---------------
# Public Methods
#---------------
.ParameterSet__initialize <- function(self, private, prms, tag_properties) { # nolint
if (length(prms)) {
checkmate::assert_list(prms, "prm", any.missing = FALSE)
prms <- unname(prms)
ids <- vapply(prms, "[[", character(1), "id")
if (any(duplicated(ids))) {
stop("ids are not unique.")
} else {
names(prms) <- ids
private$.id <- ids
}
private$.supports <- vapply(prms, "[[", character(1), "support")
private$.isupports <- invert_names(private$.supports)
private$.value <- un_null_list(lapply(prms, "[[", "value"))
tag_list <- un_null_list(lapply(prms, "[[", "tags"))
if (length(tag_list)) {
private$.tags <- tag_list
private$.tag_properties <-
.assert_tag_properties(tag_properties, unique(unlist(tag_list)), self)
if ("immutable" %in% private$.tag_properties) {
private$.immutable <- self$get_values(
tags = self$tag_properties["immutable"], simplify = FALSE
)
}
if (any(duplicated(c(private$.id, unique(unlist(private$.tags)))))) {
stop("ids and tags must have different names.")
}
} else {
private$.tags <- list()
}
} else {
private$.value <- list()
private$.id <- list()
private$.tags <- list()
}
invisible(self)
}
.ParameterSet__print <- function(self, private, sort) { # nolint
dt <- suppressWarnings(as.data.table(self, sort = sort))
if (nrow(dt)) {
dt$Support <- vapply(dt$Support, function(x) x$strprint(), character(1))
}
print(dt)
}
.ParameterSet__get_values <- function(self, private, id, tags, transform, # nolint
inc_null, simplify) {
.get_values(self, private, private$.value, id, tags, transform, inc_null,
simplify)
}
.ParameterSet__add_dep <- function(self, private, id, on, cnd) { # nolint
checkmate::assert_class(cnd, "cnd")
all_ids <- unique(c(self$ids, unprefix(self$ids)))
checkmate::assert_subset(id, all_ids)
checkmate::assert_subset(on, all_ids)
if (!is.null(attr(cnd, "id"))) {
checkmate::assert_choice(attr(cnd, "id"), on)
}
if (id == on) {
stop("Parameters cannot depend on themselves.")
}
# hacky fix
aid <- id
aon <- on
nok <- !is.null(private$.deps) &&
nrow(subset(private$.deps, grepl(aid, id) & grepl(aon, on)))
if (nok) {
stop(sprintf("%s already depends on %s.", id, on))
}
support <- unique(
unlist(private$.supports[grepl(on, names(private$.supports))]))
support <- support_dictionary$get(support)
if (is.null(self$deps)) {
deps <- data.table(id = character(0L), on = character(0L),
cond = list())
} else {
deps <- self$deps
}
new_dt <- rbind(deps,
data.table(id = id, on = on,
cond = list(assert_condition(on, support, cnd))))
assert_no_cycles(new_dt)
.check_deps(self, self$values, new_dt, id, TRUE)
private$.deps <- new_dt
invisible(self)
}
.ParameterSet__rep <- function(self, private, times, prefix) { # nolint
if (length(prefix) == 1) {
prefix <- paste0(prefix, seq_len(times))
} else if (length(prefix) != times) {
stop(sprintf("'prefix' should either be length '1' or same as 'times' (%d)", times)) # nolint
}
assert_alphanum(prefix)
lng <- length(self)
private$.id <- paste(rep(prefix, each = lng), rep(private$.id),
sep = "__")
private$.isupports <- lapply(private$.isupports,
function(x) paste(rep(prefix,
each = length(x)),
rep(x, times), sep = "__"))
private$.supports <- rep(private$.supports, times)
names(private$.supports) <- paste(rep(prefix, each = lng),
names(private$.supports), sep = "__")
values <- rep(private$.value, times)
names(values) <- paste(rep(prefix, each = length(private$.value)),
names(values), sep = "__")
private$.value <- values
tags <- rep(private$.tags, times)
names(tags) <- paste(rep(prefix, each = length(private$.tags)),
names(tags), sep = "__")
private$.tags <- tags
if (!is.null(private$.immutable)) {
imm <- rep(private$.immutable, times)
names(imm) <- paste(rep(prefix, each = length(private$.immutable)),
names(imm), sep = "__")
private$.immutable <- imm
}
invisible(self)
}
.ParameterSet__extract <- function(self, private, id, tags, prefix) { # nolint
if (is.null(id) && is.null(prefix) && is.null(tags)) {
stop("One argument must be non-NULL.")
} else if ((!is.null(id) || !is.null(tags)) && !is.null(prefix)) {
stop("'prefix' must be NULL if 'id' or 'tags' is non-NULL")
}
if (!is.null(prefix)) {
ids <- names(.filter_field(self, private$.value,
sprintf("^%s__", assert_alphanum(prefix))))
} else {
ids <- names(.filter_field(self, private$.value, id, tags))
}
rm_ids <- setdiff(self$ids, ids)
## create new parameterset
pnew <- self$clone(deep = TRUE)
## remove non-extracted ids
pnew$remove(rm_ids)
## remove prefix if required
if (!is.null(prefix)) {
get_private(pnew)$.unprefix(prefix)
}
pnew
}
.ParameterSet__remove <- function(self, private, id, prefix) { # nolint
if (sum(is.null(id) + is.null(prefix)) != 1) {
stop("Exactly one argument must be non-NULL.")
}
if (!is.null(prefix)) {
stopifnot(length(prefix) == 1)
pars <- self$ids[grepl(prefix, get_prefix(self$ids))]
} else {
pars <- id
}
if (setequal(pars, self$ids)) {
stop("Can't remove all parameters")
}
mtc_pars <- paste0(pars, collapse = "|")
private$.immutable[pars] <- NULL
if (length(private$.immutable) == 0) {
private$.immutable <- NULL
}
if (!is.null(private$.deps)) {
private$.deps <- private$.deps[!(id %in% pars | on %in% pars), ]
if (nrow(private$.deps) == 0) {
private$.deps <- NULL
}
}
if (is.list(private$.trafo)) {
private$.trafo[c(prefix, pars)] <- NULL
if (length(private$.trafo) == 0) {
private$.trafo <- NULL
} else if (checkmate::test_list(private$.trafo, len = 1) &&
(is.null(names(private$.trafo)) || names(private$.trafo) == "")) {
private$.trafo <- private$.trafo[[1]]
}
}
private$.tags[pars] <- NULL
if (length(private$.tags) == 0) {
private$.tags <- list()
private$.tag_properties <- NULL
}
## TODO: Consider adding removal of tag property
private$.value[pars] <- NULL
if (length(private$.value) == 0) {
private$.value <- list()
}
private$.supports <- private$.supports[setdiff(names(private$.supports),
pars)]
which <- grepl(mtc_pars, private$.isupports)
private$.isupports[which] <- lapply(private$.isupports[which],
function(.x) setdiff(.x, pars))
private$.isupports <- drop_null(private$.isupports)
private$.id <- setdiff(private$.id, pars)
invisible(self)
}
.ParameterSet__transform <- function(self, private, x) { # nolint
trafo <- self$trafo
if (is.null(trafo)) {
return(x)
}
if (checkmate::test_function(trafo)) {
x <- trafo(x, self)
} else {
if (is.null(nms <- names(trafo))) {
for (i in seq_along(trafo)) {
x <- trafo[[i]](x, self)
}
} else {
newx <- x[!grepl(paste0(sprintf("%s__", nms), collapse = "|"), names(x))]
for (i in seq_along(trafo)) {
## if unnamed then apply to all
if (is.na(nms[[i]]) || nms[[i]] == "") {
newx <- append(newx, trafo[[i]](x, self))
} else {
which <- grepl(sprintf("%s__", nms[[i]]), names(x))
newx <- append(newx, trafo[[i]](x[which], self))
}
}
x <- newx
}
}
x
}
#---------------
# Active Bindings
#---------------
.ParameterSet__supports <- function(self, private) { # nolint
sups <- support_dictionary$get_list(private$.supports)
names(sups) <- self$ids
sups
}
.ParameterSet__tag_properties <- function(self, private, x) { # nolint
if (missing(x)) {
private$.tag_properties
} else {
private$.tag_properties <-
.assert_tag_properties(x, unlist(self$tags), self)
invisible(self)
}
}
.ParameterSet__values <- function(self, private, x) { # nolint
if (missing(x)) {
return(sort_named_list(private$.value))
} else {
x <- un_null_list(x)
bad_nms <- names(x) %nin% self$ids
if (any(bad_nms)) {
stop(
sprintf("You can't set ids that don't exist in the parameter set: %s",
string_as_set(names(x)[bad_nms]))
)
}
if (length(x)) {
.check(self, private,
id = names(x), value_check = x,
support_check = private$.isupports, dep_check = self$deps,
tag_check = self$tag_properties
)
} else if (!is.null(self$tag_properties) &&
"required" %in% names(self$tag_properties)) {
stop("Not all required parameters are set")
} else if (!is.null(self$tag_properties) &&
"immutable" %in% names(self$tag_properties)) {
stop("Immutable parameters cannot be updated after construction")
}
which <- intersect(names(private$.immutable), names(x))
x[which] <- private$.immutable[which]
x <- c(x, private$.immutable[names(private$.immutable) %nin% names(x)])
private$.value <- x
invisible(self)
}
}
.ParameterSet__trafo <- function(self, private, x) { # nolint
if (missing(x)) {
private$.trafo
} else {
if (length(x)) {
if (checkmate::test_list(x)) {
x <- unlist(x, recursive = FALSE)
if (!is.null(names(x))) {
names(x) <- gsub(".", "__", names(x), fixed = TRUE)
}
x <- x[!duplicated(x)]
lapply(x, checkmate::assert_function, args = c("x", "self"),
ordered = TRUE)
if (length(x) == 1 && (is.null(names(x)) || is.na(names(x)))) {
x <- x[[1]]
}
} else {
checkmate::assert_function(x, args = c("x", "self"), TRUE)
}
} else {
x <- NULL
}
otrafo <- private$.trafo
private$.trafo <- x
vals <- checkmate::assert_list(self$transform(self$values))
tryCatch(.check(self, private, id = names(vals), value_check = vals,
support_check = private$.isupports,
dep_check = self$deps, transform = FALSE),
error = function(e) {
private$.trafo <- otrafo
stop(e)
})
invisible(self)
}
}
#---------------
# Private Methods
#---------------
.ParameterSet__.update_support <- function(self, private, x) { # nolint
## get sets as strings
strs <- vapply(x, as.character, character(1), n = Inf)
## add to dictionary as required
miss <- !support_dictionary$has(strs)
if (any(miss)) {
support_dictionary$add(setNames(x[miss], strs[miss]))
}
## update supports
private$.supports[names(x)] <- strs
private$.isupports <- invert_names(private$.supports)
invisible(self)
}
.ParameterSet__.prefix <- function(self, private, prefix) { # nolint
private$.id <- give_prefix(self$ids, prefix)
private$.immutable <- prefix_list(private$.immutable, prefix)
private$.tags <- prefix_list(private$.tags, prefix)
private$.value <- prefix_list(private$.value, prefix)
private$.supports <- prefix_list(private$.supports, prefix)
private$.isupports <- invert_names(private$.supports)
if (is.list(private$.trafo)) {
private$.trafo <- prefix_list(private$.trafo, prefix)
}
if (length(private$.deps)) {
private$.deps[, id := give_prefix(id, prefix)]
private$.deps[, on := give_prefix(on, prefix)]
private$.deps$cond <- lapply(private$.deps$cond, function(.x) {
at <- attr(.x, "id")
if (!is.null(at)) {
attr(.x, "id") <- give_prefix(at, prefix)
}
.x
})
}
if (length(private$.tag_properties) &&
"linked" %in% names(private$.tag_properties)) {
tags <- private$.tag_properties$linked
private$.tag_properties$linked <-
give_prefix(private$.tag_properties$linked, prefix)
which <- grepl(paste0(tags, collapse = "|"), private$.tags)
if (any(which)) {
for (i in seq_along(private$.tags[which])) {
iwhich <- private$.tags[which][[i]] %in% tags
private$.tags[which][[i]][iwhich] <-
give_prefix(private$.tags[which][[i]][iwhich], prefix)
}
}
}
invisible(self)
}
.ParameterSet__.unprefix <- function(self, private, prefix) { # nolint
private$.id <- unprefix(self$ids)
private$.immutable <- unprefix_list(private$.immutable)
private$.tags <- unprefix_list(private$.tags)
private$.value <- unprefix_list(private$.value)
private$.supports <- unprefix_list(private$.supports)
private$.isupports <- invert_names(private$.supports)
if (is.list(private$.trafo)) {
private$.trafo <- unprefix_list(private$.trafo)
}
if (length(private$.deps)) {
private$.deps[, id := unprefix(id)]
private$.deps[, on := unprefix(on)]
private$.deps$cond <- lapply(private$.deps$cond, function(.x) {
at <- attr(.x, "id")
if (!is.null(at)) {
attr(.x, "id") <- unprefix(at)
}
.x
})
}
if (length(private$.tag_properties) &&
"linked" %in% names(private$.tag_properties)) {
tags <- private$.tag_properties$linked
private$.tag_properties$linked <-
unprefix(private$.tag_properties$linked)
which <- private$.tags %in% tags
if (any(which)) {
for (i in seq_along(private$.tags[which])) {
iwhich <- private$.tags[which][[i]] %in% tags
private$.tags[which][[i]][[iwhich]] <-
unprefix(private$.tags[which][[i]][[iwhich]])
}
}
}
invisible(self)
}
|
1a0826a132eafce740152616653eb363bda690cb
|
75db022357f0aaff30d419c13eafb9dddfce885a
|
/R/gridTrends.r
|
27b7280aaaa2ec67664422437998a68d35689811
|
[] |
no_license
|
LobsterScience/bio.lobster
|
d4c553f0f55f561bb9f9cd4fac52c585e9cd16f8
|
b2af955291cb70c2d994e58fd99d68c6d7907181
|
refs/heads/master
| 2023-09-01T00:12:23.064363
| 2023-08-23T16:34:12
| 2023-08-23T16:34:12
| 60,636,005
| 11
| 5
| null | 2017-01-20T14:35:09
| 2016-06-07T18:18:28
|
R
|
UTF-8
|
R
| false
| false
| 367
|
r
|
gridTrends.r
|
#' @export
gridTrends = function(data,grids,yrs,variable="CPUE",fun=median){
grid.lst=as.list( rep(NA,length(grids)))
for(i in 1:length(yrs)){
griddata = lobGridPlot(subset(logsInSeason,SYEAR==yrs[i],c("LFA","GRID_NUM",variable)),FUN=fun)$pdata
for(j in 1:length(grids)){
grid.lst[[j]][i] = griddata$Z[griddata$SID==grids[j]]
}
}
return(grid.lst)
}
|
4a97e14884a9d39dc11fa4c1723fd1c12c3fcd5c
|
0645b7cf525ec6294c059fe5eda8b743a1416624
|
/R/1-wordcount.R
|
38f28a3c242d29153cdefb6d1d3d97a663889d14
|
[] |
no_license
|
jeffreybreen/tutorial-rmr2-airline
|
d8645c5126aa5bbf1060a495e3df3a608a3e3799
|
26a91e52e295a97fc3d07ef220a3f3bb4ebd0d30
|
refs/heads/master
| 2021-01-13T02:27:54.866452
| 2015-03-06T23:49:56
| 2015-03-06T23:49:56
| 9,311,321
| 2
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,639
|
r
|
1-wordcount.R
|
#!/usr/bin/env Rscript
#
# Example 1: wordcount
#
# Tally the number of occurrences of each word in a text
#
# from https://github.com/RevolutionAnalytics/RHadoop/blob/master/rmr2/docs/tutorial.md
#
library(rmr2)
# Set "LOCAL" variable to T to execute using rmr2's local backend.
# Otherwise, use Hadoop (which needs to be running, correctly configured, etc.)
LOCAL=T
if (LOCAL)
{
rmr.options(backend = 'local')
# we have smaller extracts of the data in this project's 'local' subdirectory
hdfs.data.root = 'data/local/wordcount'
hdfs.data = file.path(hdfs.data.root, 'data', 'all-shakespeare-1000')
hdfs.out.root = hdfs.data.root
} else {
rmr.options(backend = 'hadoop')
# assumes 'wordcount/data' input path exists on HDFS under /rhadoop
hdfs.data.root = '/rhadoop/wordcount'
hdfs.data = file.path(hdfs.data.root, 'data')
# writes output to 'wordcount' directory in user's HDFS home (e.g., /user/cloudera/wordcount/)
hdfs.out.root = 'wordcount'
}
hdfs.out = file.path(hdfs.out.root, 'out')
map.wc = function(k,lines) {
words.list = strsplit(lines, '\\s+') # use '\\W+' instead to exclude punctuation
words = unlist(words.list)
return( keyval(words, 1) )
}
reduce.wc = function(word,counts) {
return( keyval(word, sum(counts)) )
}
wordcount = function (input, output = NULL) {
mapreduce(input = input ,
output = output,
input.format = "text",
map = map.wc,
reduce = reduce.wc,
combine = T)}
out = wordcount(hdfs.data, hdfs.out)
results = from.dfs( out )
results.df = as.data.frame(results, stringsAsFactors=F )
colnames(results.df) = c('word', 'count')
head(results.df)
|
c405864504382867023e327f1459b530947dbdb9
|
77fc5f1afbbcc237e108ec852a574b1633f90eed
|
/Code/DeschutesData.R
|
1114b312e5fb030afbe52b73e4d523134dd18b62
|
[] |
no_license
|
dungates/DSPG
|
ebff52ea4a4653952eec8759df8b07f5fd41e8a3
|
466882664a14ff7fc296388595ae51e2bf2822f8
|
refs/heads/master
| 2023-02-04T23:42:11.118631
| 2020-12-28T20:37:10
| 2020-12-28T20:37:10
| 277,666,166
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 61,996
|
r
|
DeschutesData.R
|
library(plyr)
library(ggpmisc)
library(tidyverse)
library(lubridate)
library(RColorBrewer)
library(directlabels)
library(gridExtra)
library(gtable)
library(grid)
library(lubridate)
library(readxl)
library(broom)
library(hydrostats)
setwd("~/DSPG")
# Biggs USGS Data
BiggsData <- read.table("Data/BiggsData2.txt", header = T, fill = T, sep = "\t")
colnames(BiggsData) <- c("Agency", "Site", "Date_time", "tz_cd", "Temperature", "Temperature_qualification", "Discharge",
"Discharge_qualification", "Gage_height", "Gage_height_qualification")
# Subsetting by specific year if comparisons wanted
# Biggs2011 <- BiggsData[grep("/11 ", BiggsData$Date_time), ]
# Biggs2011$Date_time <- mdy_hm(Biggs2011$Date_time)
# Biggs2012 <- BiggsData[grep("/12 ", BiggsData$Date_time), ]
# Biggs2012$Date_time <- mdy_hm(Biggs2012$Date_time)
# Biggs2013 <- BiggsData[grep("/13 ", BiggsData$Date_time), ]
# Biggs2013$Date_time <- mdy_hm(Biggs2013$Date_time)
# Biggs2014 <- BiggsData[grep("/14 ", BiggsData$Date_time), ]
# Biggs2014$Date_time <- mdy_hm(Biggs2014$Date_time)
# Biggs2015 <- BiggsData[grep("/15 ", BiggsData$Date_time), ]
# Biggs2015$Date_time <- mdy_hm(Biggs2015$Date_time)
# Biggs2016 <- BiggsData[grep("/16 ", BiggsData$Date_time), ]
# Biggs2016$Date_time <- mdy_hm(Biggs2016$Date_time)
# Biggs2017 <- BiggsData[grep("/17 ", BiggsData$Date_time), ]
# Biggs2017$Date_time <- mdy_hm(Biggs2017$Date_time)
# Biggs2018 <- BiggsData[grep("/18 ", BiggsData$Date_time), ]
# Biggs2018$Date_time <- mdy_hm(Biggs2018$Date_time)
# Biggs2019 <- BiggsData[grep("/19 ", BiggsData$Date_time), ]
# Biggs2019$Date_time <- mdy_hm(Biggs2019$Date_time)
# Biggs2020 <- BiggsData[grep("/20 ", BiggsData$Date_time), ]
# Biggs2020$Date_time <- mdy_hm(Biggs2020$Date_time)
#
# Biggs2011 <- Biggs2011 %>% mutate(Julian = yday(Date_time))
# Biggs2012 <- Biggs2012 %>% mutate(Julian = yday(Date_time))
# Biggs2013 <- Biggs2013 %>% mutate(Julian = yday(Date_time))
# Biggs2014 <- Biggs2014 %>% mutate(Julian = yday(Date_time))
# Biggs2015 <- Biggs2015 %>% mutate(Julian = yday(Date_time))
# Biggs2016 <- Biggs2016 %>% mutate(Julian = yday(Date_time))
# Biggs2017 <- Biggs2017 %>% mutate(Julian = yday(Date_time))
# Biggs2018 <- Biggs2018 %>% mutate(Julian = yday(Date_time))
# Biggs2019 <- Biggs2019 %>% mutate(Julian = yday(Date_time))
# Biggs2020 <- Biggs2020 %>% mutate(Julian = yday(Date_time))
# Biggs2011plot <- ggplot(data = Biggs2011, aes(x = Date_time, y = Temperature)) + geom_line() +
# theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
# Biggs2012plot <- ggplot(data = Biggs2012, aes(x = Date_time, y = Temperature)) + geom_line() +
# theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
# Biggs2019plot <- ggplot(data = Biggs2019, aes(x = Date_time, y = Temperature)) + geom_line() +
# theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
# Biggs2020plot <- ggplot(data = Biggs2020, aes(x = Date_time, y = Temperature)) + geom_line() +
# theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
# Plot of just a couple years, beaver themed
# ggplot() + geom_line(data = Biggs2012, aes(x = Julian, y = Temperature, color = "2012")) +
# geom_line(data = Biggs2019, aes(x = Julian, y = Temperature, color = "2019")) +
# theme(axis.text.x = element_text(angle = 90, vjust = 0.5)) + theme_light() + scale_color_manual(values=c("darkorange", "gray1")) +
# annotate("text", x = 200, y = 10, label = "Go Beavs")
# Changing year names to be more interpretable
BiggsData <- BiggsData %>% mutate(Year = case_when(grepl("/11 ", Date_time) ~ 2011, grepl("/12 ", Date_time) ~ 2012,
grepl("/13 ", Date_time) ~ 2013, grepl("/14 ", Date_time) ~ 2014,
grepl("/15 ", Date_time) ~ 2015, grepl("/16 ", Date_time) ~ 2016,
grepl("/17 ", Date_time) ~ 2017, grepl("/18 ", Date_time) ~ 2018,
grepl("/19 ", Date_time) ~ 2019, grepl("/20 ", Date_time) ~ 2020))
# Subsetting out extraneous observations
BiggsData2 <- subset(BiggsData, !is.na(Year))
BiggsData2 <- subset(BiggsData2, !is.na(Temperature))
BiggsData2$Date_time <- mdy_hm(BiggsData2$Date_time)
# Using lubridate to standardize Date_time
BiggsData2 <- BiggsData2 %>% mutate(Julian = yday(Date_time))
# Plot of temperature data in Biggs by year - only goes back to 2011
# Consider doing by month as well
# ggplot(data = BiggsData2, aes(x = Julian, y = Temperature, color = factor(Year))) + geom_line() +
# facet_wrap( ~ Year) +
# theme(axis.text.x = element_text(angle = 90, vjust = 0.5)) +
# labs(title = "Temperature Data from Biggs, OR", x = "Year", color = "Year")
#### CULVER DATA
CulverData <- read.table("Data/CulverData.txt", header = T, fill = T, sep = "\t")
colnames(CulverData) <- c("Agency", "Site", "Date_time", "tz_cd", "Temperature", "Temperature_qualification", "Discharge",
"Discharge_qualification", "Gage_height", "Gage_height_qualification")
CulverData <- CulverData %>% mutate(Year = case_when(grepl("/07 ", Date_time) ~ 2007, grepl("/08 ", Date_time) ~ 2008,
grepl("/09 ", Date_time) ~ 2009, grepl("/10 ", Date_time) ~ 2010,
grepl("/11 ", Date_time) ~ 2011, grepl("/12 ", Date_time) ~ 2012,
grepl("/13 ", Date_time) ~ 2013, grepl("/14 ", Date_time) ~ 2014,
grepl("/15 ", Date_time) ~ 2015, grepl("/16 ", Date_time) ~ 2016,
grepl("/17 ", Date_time) ~ 2017, grepl("/18 ", Date_time) ~ 2018,
grepl("/19 ", Date_time) ~ 2019, grepl("/20 ", Date_time) ~ 2020))
# Just making sure data is saved in preserved in earlier versions in case different uses come up
CulverData2 <- CulverData
# Using lubridate again
CulverData2$Date_time <- mdy_hm(CulverData2$Date_time)
CulverData2 <- CulverData2 %>% mutate(Julian = yday(Date_time))
# Plot of Culver USGS temperature data by year
# Definitely do a summer based monthly comparison of temperature increase between years of 2008-2016
# ggplot(data = CulverData2, aes(x = Julian, y = Temperature, color = factor(Year))) + geom_line() +
# facet_wrap( ~ Year) +
# theme(axis.text.x = element_text(angle = 90, vjust = 0.5)) +
# labs(title = "Temperature Data from Culver, OR", x = "Year", color = "Year")
#### MADRAS DATA
MadrasData <- read.table("Data/MadrasData.txt", header = T, fill = T, sep = "\t")
colnames(MadrasData) <- c("Agency", "Site", "Date_time", "tz_cd", "Temperature", "Temperature_qualification", "Discharge",
"Discharge_qualification", "Gage_height", "Gage_height_qualification")
MadrasData <- MadrasData %>% mutate(Year = case_when(grepl("/07 ", Date_time) ~ 2007, grepl("/08 ", Date_time) ~ 2008,
grepl("/09 ", Date_time) ~ 2009, grepl("/10 ", Date_time) ~ 2010,
grepl("/11 ", Date_time) ~ 2011, grepl("/12 ", Date_time) ~ 2012,
grepl("/13 ", Date_time) ~ 2013, grepl("/14 ", Date_time) ~ 2014,
grepl("/15 ", Date_time) ~ 2015, grepl("/16 ", Date_time) ~ 2016,
grepl("/17 ", Date_time) ~ 2017, grepl("/18 ", Date_time) ~ 2018,
grepl("/19 ", Date_time) ~ 2019, grepl("/20 ", Date_time) ~ 2020))
MadrasData2 <- MadrasData
MadrasData2$Date_time <- mdy_hm(MadrasData2$Date_time)
MadrasData2 <- MadrasData2 %>% mutate(Julian = yday(Date_time))
# Plot of Madras USGS temperature data by year
# Definitely do a summer based monthly comparison of temperature increase between years of 2008-2016
# ggplot(data = MadrasData2, aes(x = Julian, y = Temperature, color = factor(Year))) + geom_line() +
# facet_wrap( ~ Year) +
# theme(axis.text.x = element_text(angle = 90, vjust = 0.5)) +
# labs(title = "Temperature Data from Madras, OR", x = "Year", color = "Year")
#This is May 15 to August 15 graph of just Madras
# justsummerdata <- MadrasData2 %>% filter(Julian > 135 & Julian < 227)
# ggplot(data = justsummerdata, aes(x = Julian, y = Temperature, color = factor(Year))) + geom_line() +
# geom_hline(aes(yintercept = 10)) +
# facet_wrap( ~ Year) +
# theme(axis.text.x = element_text(angle = 90, vjust = 0.5)) +
# labs(title = "Temperature Data from Madras, OR", x = "Year", color = "Year")
# Graphing before and after SWW Tower for Madras, gridded
# Madras2009 <- MadrasData2 %>% filter(Year == 2009)
# Madras2019 <- MadrasData2 %>% filter(Year == 2019)
# Madras2019 <- Madras2019 %>% mutate(Year = month(Julian))
#
# ggplot() + geom_line(data = Madras2009, aes(x = Julian, y = Temperature, color = "2009")) +
# geom_line(data = Madras2019, aes(x = Julian, y = Temperature, color = "2019")) +
# theme(axis.text.x = element_text(angle = 90, vjust = 0.5)) + theme_light() + scale_color_manual(values=c("darkorange", "gray1")) +
# labs(y = "Temperature °C", x = "Day of Year") + ggtitle("Madras Temperature Daily") +
# theme(plot.title = element_text(hjust = 0.5)) + scale_color_discrete(name = "Year")
## MERGING DATA
BiggsData2 <- BiggsData2 %>% mutate(Location = paste("Biggs"))
MadrasData2 <- MadrasData2 %>% mutate(Location = paste("Madras"))
CulverData2 <- CulverData2 %>% mutate(Location = paste("Culver"))
### MERGING USGS DATA
partusgsData <- rbind(BiggsData2, MadrasData2)
allusgsData <- rbind(partusgsData, CulverData2)
### Merging PGE data
mergeCols <- c("Temperature","Location","Date_time")
# Note that df5 is from the PGE data file
df <- read_excel("Data/pge-water-chemistry-2015-2017.xlsx")
# Using spread to make columns for different Parameters which are listed initially under Temperature column
# df2 <- spread(data = df, Parameter, Value)
# str(df2)
# Making a new column that is composed of the units and parameters from data
df3 <- df %>% mutate(new = paste(Parameter, "in", Units))
# Deleting columns that were just pasted
df3 <- subset(df3, select = -c(Units, Parameter))
# Try later, not useful right now
# df4 <- df3 %>% gather()
# Subsetting out what appears to be additional measurements that are similar enough to be irrelevant
df4 <- df3[-c(2525,2524,2253,2254,1982,1983,1711,1712,1441,1442),]
# Spreading data by Value and then renaming the columns, deleting station ID
df5 <- spread(df4, new, Value)
df5$`Station ID` <- NULL
colnames(df5)[7] <- c("Temperature")
colnames(df5)[1] <- c("Location")
colnames(df5)[2] <- c("Date_time")
rivertempbigData <- merge(allusgsData, df5, by = mergeCols, all = TRUE)
## Taking in ODEQ data and then merging also mutatubg location names
odeqData <- read_excel("Data/ODEQ_data_datetime_revised.xlsx")
odeqData$Date <- NULL
odeqData$Time <- NULL
colnames(odeqData) <- c("Location", "Temperature", "Date_time")
rivertempbigData <- merge(rivertempbigData, odeqData, by = mergeCols, all = TRUE)
rivertempbigData <- rivertempbigData %>% mutate(Location = if_else(Location == "10511-ORDEQ", "Mirror Pond", Location)) %>%
mutate(Location = if_else(Location == "10508-ORDEQ", "Lower Bridge", Location)) %>%
mutate(Location = if_else(Location == "10506-ORDEQ", "Warm Springs", Location)) %>%
mutate(Location = if_else(Location == "10411-ORDEQ", "Deschutes River Park", Location))
# df13 <- rivertempbigData %>% filter(!is.na(`Conductivity in μS/cm`))
# CONDUCTIVITY DATA GRAPHED
# ggplot(df13, aes(x = Date_time, y = `Conductivity in μS/cm`, color = Location)) +
# geom_line() + theme(legend.position = "none") +
# geom_dl(aes(label = Location), method = list(dl.combine("last.points")), cex = 0.8)
## REMOVING USELESS COLUMNS
colnames(rivertempbigData)
rivertempbigData$Agency <- NULL
rivertempbigData$Site <- NULL
rivertempbigData$tz_cd <- NULL
rivertempbigData$Agency <- NULL
rivertempbigData$Temperature_qualification <- NULL
rivertempbigData$Discharge_qualification <- NULL
rivertempbigData$Gage_height_qualification <- NULL
## FILLING IN DATA WHERE POSSIBLE, Specifically Year and Julian, adding season and a additional date format for air temperature merging
# Creating a function for seasons
getSeason <- function(input.date){
numeric.date <- 100*month(input.date)+day(input.date)
## input Seasons upper limits in the form MMDD in the "break =" option:
cuts <- base::cut(numeric.date, breaks = c(0,319,0620,0921,1220,1231))
# rename the resulting groups (could've been done within cut(...levels=) if "Winter" wasn't double
levels(cuts) <- c("Winter","Spring","Summer","Fall","Winter")
return(cuts)
}
rivertempbigData <- rivertempbigData %>% mutate(Julian = yday(Date_time))
rivertempbigData <- rivertempbigData %>% mutate(Year = year(Date_time))
rivertempbigData <- rivertempbigData %>% mutate(mergeDate = ymd(as.Date(Date_time)))
rivertempbigData <- rivertempbigData %>% mutate(Season = getSeason(Date_time))
## RUN UP TO HERE FOR FIRST DATAFRAME
### MERGED DATA GRAPHS, find out how to smooth with so many observations
# ggplot() + geom_line(data = BiggsData2, aes(x = Julian, y = Temperature, color = "Biggs")) +
# geom_line(data = CulverData2, aes(x = Julian, y = Temperature, color = "Culver")) +
# geom_line(data = MadrasData2, aes(x = Julian, y = Temperature, color = "Madras")) +
# facet_wrap( ~ Year) +
# theme(axis.text.x = element_text(angle = 90, vjust = 0.5)) +
# labs(title = "Temperature Data from Madras, Biggs, and Culver", x = "Day of Year", color = "Location")
# Graph of all temperature data from all years available, needs data smoothing and transparency
# ggplot() + geom_line(data = BiggsData2, aes(x = Julian, y = Temperature, group = Year, color = as.factor(Year))) +
# geom_line(data = CulverData2, aes(x = Julian, y = Temperature, group = Year, color = as.factor(Year))) +
# geom_line(data = MadrasData2, aes(x = Julian, y = Temperature, group = Year, color = as.factor(Year)))
# Going just by the year 2012 which is extremely arbitrary
Biggs2012 <- BiggsData2 %>% filter(Year == 2012)
Madras2012 <- MadrasData2 %>% filter(Year == 2012)
Culver2012 <- CulverData2 %>% filter(Year == 2012)
# ggplot() + geom_line(data = Biggs2012, aes(x = Date_time, y = Temperature, color = "Biggs")) +
# geom_line(data = Culver2012, aes(x = Date_time, y = Temperature, color = "Culver")) +
# geom_line(data = Madras2012, aes(x = Date_time, y = Temperature, color = "Madras"))
#### TIME SERIES CORRELATIONS
#Try acf and pacf, test different sites data against each other, read studies, also make an overlapping plot with fish capture
plotter <- rivertempbigData %>% group_by(Location)
# PGE data has unclear location labels, this comparison makes it clear that River Mouth and Biggs data are the same locations
# riverMouth1 <- rivertempbigData %>% filter(Location == "River Mouth" | Location == "Biggs")
# ggplot(riverMouth1, aes(x = Date_time, y = Temperature, color = Location)) +
# geom_line() + scale_color_manual(values=c("darkorange", "gray1"))
## AIR TEMPERATURE DATA
airtempData <- read.csv("Data/2207755.csv")
# Converting data to better formats
airtempData$DATE <- ymd(airtempData$DATE)
airtempData$NAME <- as.character(airtempData$NAME)
# Adding a column to airtempData first for average temperature then for celsius observations
airtempData <- airtempData %>% mutate(TAVG = (TMAX+TMIN)/2)
airtempData <- airtempData %>% mutate(cTAVG = (TAVG -32)/1.8)
airtempData2 <- airtempData %>% mutate(NAME = if_else(NAME == "PELTON DAM, OR US", "Madras", NAME))
# airtempData2 <- airtempData2 %>% mutate(NAME = if_else(NAME == "POLLYWOG OREGON, OR US", "Maupin", NAME))
# airtempData2 <- airtempData2 %>% mutate(NAME = if_else(NAME == "MAUPIN 10.0 SSE, OR US", "Maupin", NAME))
# airtempData2 <- airtempData2 %>% mutate(NAME = if_else(NAME == "MOUNT WILSON OREGON, OR US", "Maupin", NAME))
airtempData2 <- airtempData2 %>% mutate(NAME = if_else(NAME == "WAMIC MILL OREGON, OR US", "Maupin", NAME))
# airtempData2 <- airtempData2 %>% mutate(NAME = if_else(NAME == "NORTH POLE RIDGE OREGON, OR US", "Maupin", NAME))
# airtempData2 <- airtempData2 %>% mutate(NAME = if_else(NAME == "WASCO BUTTE OREGON, OR US", "Maupin", NAME))
airtempData2 <- airtempData2 %>% mutate(NAME = if_else(NAME == "THE DALLES, OR US", "Biggs", NAME))
# airtempData2 <- airtempData2 %>% mutate(NAME = if_else(NAME == "CITY OF THE DALLES 3.0 W, OR US", "Biggs", NAME))
# airtempData2 <- airtempData2 %>% mutate(NAME = if_else(NAME == "MOSIER 3.8 E, OR US", "Biggs", NAME))
# airtempData2 <- airtempData2 %>% mutate(NAME = if_else(NAME == "DUFUR, OR US", "Biggs", NAME))
# airtempData2 <- airtempData2 %>% mutate(NAME = if_else(NAME == "ANTELOPE 6 SSW, OR US", "Madras", NAME))
airtempData2 <- airtempData2 %>% mutate(NAME = if_else(NAME == "MADRAS, OR US", "Culver", NAME))
# airtempData2 <- airtempData2 %>% mutate(NAME = if_else(NAME == "CLEAR LAKE, OR US", "Madras", NAME))
# airtempData2 <- airtempData2 %>% mutate(NAME = if_else(NAME == "HEHE 1 OREGON, OR US", "Madras", NAME))
# airtempData2 <- airtempData2 %>% mutate(NAME = if_else(NAME == "MUTTON MOUNTAIN OREGON, OR US", "Madras", NAME))
#Adding a year column
airtempData2 <- airtempData2 %>% mutate(Year = year(DATE))
#Testing plot
#ggplot(airtempData2, aes(x = DATE, y = cTAVG, color = NAME)) + geom_line(linetype = "dotted")
#START HERE FOR DATA MERGE, ORGANIZE CODE ABOVE, PUT GRAPHS BELOW
airtempData2$STATION <- NULL
colnames(airtempData2) <- c("Location", "Latitude", "Longitude", "Elevation", "mergeDate", "Tavg", "Tmax", "Tmin", "Tobs", "cTAVG", "Year")
airtempData3 <- airtempData2 %>% filter(Location == "Maupin" | Location == "Madras" | Location == "Biggs" | Location == "Culver")
airtempmerge <- airtempData3 %>% filter(Year > 2006) %>% select(cTAVG, Year, Location, mergeDate) %>% group_by(Location, mergeDate)
airandwatertempdf <- rivertempbigData %>% left_join(airtempmerge, by = c("mergeDate","Year","Location")) #Note that this currently excludes ODEQ AND PGE Data, also that Maupin air temp data won't merge
airandwatertempdf <- airandwatertempdf %>% relocate(cTAVG, .after = Temperature)
# Subsets out additional observations
airandwatertempdf <- airandwatertempdf %>% distinct(mergeDate, Location, .keep_all = T) %>% arrange(mergeDate)
summary(lm(Temperature ~ cTAVG + as.factor(Year), data = airandwatertempdf))
MadrasSeasonYearlyRegression <- airandwatertempdf %>% filter(Location == "Madras") %>%
group_by(Season, Year) %>% do(model = lm(Temperature ~ cTAVG, data = .)) %>% tidy(model) %>% filter(term == "cTAVG") # try augmenting model
BiggsSeasonYearlyRegression <- airandwatertempdf %>% filter(Location == "Biggs") %>%
group_by(Season, Year) %>% do(tidy(lm(Temperature ~ cTAVG, data = .))) %>% filter(term == "cTAVG")
CulverSeasonYearlyRegression <- airandwatertempdf %>% filter(Location == "Culver" & Year > 2007) %>%
group_by(Season, Year) %>% do(tidy(lm(Temperature ~ cTAVG, data = .))) %>% filter(term == "cTAVG")
#Table of regression coefficients
allregressioncoefficients <- MadrasSeasonYearlyRegression %>%
left_join(select(BiggsSeasonYearlyRegression, estimate), by = c("Year","Season")) %>%
left_join(select(CulverSeasonYearlyRegression, estimate), by = c("Year","Season"))
allregressioncoefficients <- allregressioncoefficients[, -c(5,6,7)]
colnames(allregressioncoefficients) <- c("Season", "Year", "Explanatory Variable",
"MadrasEstimate", "BiggsEstimate", "CulverEstimate")
# Just checking the correlation coefficients of each season, spring is by far the greatest corresponding increase, fall is close second interestingly, check stargazer for more info
testplot <- airandwatertempdf %>% filter(Location == "Madras" & Season == "Spring")
testplot2 <- airandwatertempdf %>% filter(Location == "Madras" & Season == "Summer")
testplot3 <- airandwatertempdf %>% filter(Location == "Madras" & Season == "Fall")
testplot4 <- airandwatertempdf %>% filter(Location == "Madras" & Season == "Winter")
# ggplot() + geom_line(data = testplot, aes(x = Date_time, y = Temperature), color = "blue") +
# geom_line(data = testplot, aes(x = Date_time, y = cTAVG), linetype = "dashed", color = "red") + facet_wrap( ~ Year)
# Table of regression coefficients by season, all time
ols1 <- lm(Temperature ~ cTAVG, data = testplot)
ols2 <- lm(Temperature ~ cTAVG, data = testplot2)
ols3 <- lm(Temperature ~ cTAVG, data = testplot3)
ols4 <- lm(Temperature ~ cTAVG, data = testplot4)
stargazer(ols1, ols2, ols3, ols4, title = "Results", align = T, type = "text")
# Using PGE river temperature data from river mouth (RM 0), Reregulating DAM (RM 100), Kloan Rapids (RM 6.8), and Lower Wapinitia (RM 55.2)
test <- rivertempbigData %>%
filter(Location == "River Mouth" | Location == "Reregulating Dam" | Location == "Kloan Rapids" | Location == "Lower Wapinitia") %>%
select(Temperature, Date_time, Location) %>% arrange(Date_time)
riverMouthFilter <- rivertempbigData %>% filter(Location == "River Mouth")
reregDamFilter <- rivertempbigData %>% filter(Location == "Reregulating Dam")
kloanRapidsFilter <- rivertempbigData %>% filter(Location == "Kloan Rapids")
lowerWapFilter <- rivertempbigData %>% filter(Location == "Lower Wapinitia")
# Creating air temperature average by max and min then converting to Celsius and plotting over PGE river temperature data, some data
# conversion necessary
airplot <- airtempData2 %>% filter(Location == "Madras") %>% select(cTAVG, mergeDate, Tmax, Tmin)
date1 <- as.Date("2015-01-01")
date2 <- as.Date("2017-08-30")
airplot <- airplot[airplot$mergeDate >= date1 & airplot$mergeDate <= date2,]
linecolor <- c("red", "blue", "forestgreen", "purple", "bisque2")
# ggplot() + geom_line(data = test, aes(x = Date_time, y = Temperature, color = Location)) +
# geom_line(data = airplot, aes(x = as.POSIXct(mergeDate), y = cTAVG, color = "Pelton Dam Air Temperature"), linetype = "dashed") +
# scale_color_manual(values = linecolor)
# Same thing but for USGS data and plotted at specific locations
# Here is for Biggs and the Dalles
airplot2 <- airtempData2 %>% filter(Location == "Biggs") %>% select(cTAVG, mergeDate, Tmax, Tmin)
date3 <- as.Date("2010-01-01")
date4 <- as.Date("2020-07-01")
airplot2 <- airplot2[airplot2$mergeDate >= date3 & airplot2$mergeDate <= date4,]
BiggsAirandWater <- airandwatertempdf %>% filter(Location == "Biggs")
MadrasAirandWater <- airandwatertempdf %>% filter(Location == "Madras")
CulverAirandWater <- airandwatertempdf %>% filter(Location == "Culver" & Year > 2007)
# Graph of Biggs air vs water temp
# ggplot() + geom_line(data = BiggsData2, aes(x = Date_time, y = Temperature, color = Location)) +
# geom_line(data = airplot2, aes(x = as.POSIXct(mergeDate), y = cTAVG, color = "Air Temperature in The Dalles"), linetype = "dashed") +
# scale_color_brewer(palette = "Paired")
### FACET WRAPS OF BIGGS, MADRAS, AND CULVER FOR COMPARISON, add titles later
lm_eqn <- function(BiggsAirandWater) {
m = lm(Temperature ~ cTAVG, BiggsAirandWater);
eq <- substitute(italic(y) == a + b %.% italic(x)*","~~italic(R)^2~"="~r2,
list(a = format(as.numeric(coef(m)[1]), digits = 2),
b = format(as.numeric(coef(m)[2]), digits = 2),
r2 = format(summary(m)$r.squared, digits = 3)))
as.character(as.expression(eq));
}
eq <- ddply(BiggsAirandWater,.(Year, Season),lm_eqn)
# ggplot(data = BiggsAirandWater, aes(cTAVG, Temperature)) +
# geom_point() +
# geom_smooth(method = "lm", se = F) + geom_text(data = eq, aes(x = -1, y = 18, label = V1), parse = T, cex = 2, inherit.aes = F) +
# facet_wrap(Year ~ Season, ncol = 4)
# Coef stuff for Biggs
# fitmodels <- BiggsAirandWater %>% group_by(Year, Season) %>% do(model = cor.test( ~ Temperature + cTAVG, data = .))
#
# for (i in 1:37) {
# pval[i] <- fitmodels$model[[i]]$p.value
# coef[i] <- fitmodels$model[[i]]$estimate
# year[i] <- fitmodels$Year[i]
# season[i] <- fitmodels$Season[i]
# }
#
# df19 <- data.frame(pval = c(pval), coef = c(coef), year = c(year), season = c(season))
# df19$year <- as.Date(paste0(year, '-01-01'))
# ggplot(df19, aes(x = year, y = coef, color = season)) + geom_point()
#
#
# # Coef stuff for Madras
#
# fitmodels1 <- MadrasAirandWater %>% group_by(Year, Season) %>% do(model = cor.test( ~ Temperature + cTAVG, data = .))
#
# for (i in 1:52) {
# pval[i] <- fitmodels1$model[[i]]$p.value
# coef[i] <- fitmodels1$model[[i]]$estimate
# year[i] <- fitmodels1$Year[i]
# season[i] <- fitmodels1$Season[i]
# }
#
# df20 <- data.frame(pval = c(pval), coef = c(coef), year = c(year), season = c(season))
# df20 <- df20 %>% mutate(Season = case_when(season == "1" ~ "Winter", season == "2" ~ "Spring",
# season == "3" ~ "Summer", season == "4" ~ "Fall"))
#
# df20$year <- as.Date(paste0(year, '-01-01'))
# ggplot(df20, aes(x = Season, y = coef, color = pval)) + geom_point() + facet_wrap( ~ year)
# MADRAS
lm_eqn1 <- function(MadrasAirandWater) {
m1 = lm(Temperature ~ cTAVG, MadrasAirandWater);
eq1 <- substitute(italic(y) == a + b %.% italic(x)*","~~italic(R)^2~"="~r2,
list(a = format(as.numeric(coef(m1)[1]), digits = 2),
b = format(as.numeric(coef(m1)[2]), digits = 2),
r2 = format(summary(m1)$r.squared, digits = 3)))
as.character(as.expression(eq1));
}
eq1 <- ddply(MadrasAirandWater,.(Year, Season),lm_eqn1)
# ggplot(data = MadrasAirandWater, aes(cTAVG, Temperature)) +
# geom_point() +
# geom_smooth(method = "lm", se = F) + geom_text(data = eq1, aes(x = -1, y = 18, label = V1), parse = T, cex = 2, inherit.aes = F) +
# facet_wrap(Year ~ Season, ncol = 4)
# CULVER
lm_eqn2 <- function(CulverAirandWater) {
m2 = lm(Temperature ~ cTAVG, CulverAirandWater);
eq2 <- substitute(italic(y) == a + b %.% italic(x)*","~~italic(R)^2~"="~r2,
list(a = format(as.numeric(coef(m2)[1]), digits = 2),
b = format(as.numeric(coef(m2)[2]), digits = 2),
r2 = format(summary(m2)$r.squared, digits = 3)))
as.character(as.expression(eq2));
}
eq2 <- ddply(CulverAirandWater,.(Year, Season),lm_eqn2)
# ggplot(data = CulverAirandWater, aes(cTAVG, Temperature)) +
# geom_point() +
# geom_smooth(method = "lm", se = F) + geom_text(data = eq2, aes(x = -1, y = 18, label = V1), parse = T, cex = 2, inherit.aes = F) +
# facet_wrap(Year ~ Season, ncol = 4)
# Here is for Madras (USGS) and Madras (NOAA)
airplot3 <- airtempData2 %>% filter(Location == "Madras") %>% select(cTAVG, mergeDate, Tmax, Tmin, Year)
date5 <- as.Date("2007-01-01")
date6 <- as.Date("2020-07-01")
airplot3 <- airplot3[airplot3$mergeDate >= date5 & airplot3$mergeDate <= date6,]
idklol <- airplot3 %>% filter(Year == 2008)
idklol2 <- airplot3 %>% filter(Year == 2012)
idklol4 <- airplot3 %>% filter(Year == 2018)
idklol3 <- airplot3 %>% filter(Year == 2019)
Madras2008 <- MadrasData2 %>% filter(Year == 2008)
Madras2012 <- MadrasData2 %>% filter(Year == 2012)
Madras2018 <- MadrasData2 %>% filter(Year == 2018)
Madras2019 <- MadrasData2 %>% filter(Year == 2019)
# Graphs of 2008 and 2012
plot1 <- ggplot() + geom_line(data = Madras2008, aes(x = Date_time, y = Temperature, color = Location)) +
geom_line(data = idklol, aes(x = as.POSIXct(mergeDate), y = cTAVG, color = "Madras Air Temperature"), linetype = "dashed") +
scale_color_brewer(palette = "Paired") + theme(legend.position='none') + xlab("Date")
plot2 <- ggplot() + geom_line(data = Madras2012, aes(x = Date_time, y = Temperature, color = Location)) +
geom_line(data = idklol2, aes(x = as.POSIXct(mergeDate), y = cTAVG, color = "Madras Air Temperature"), linetype = "dashed") +
scale_color_brewer(palette = "Paired") + ylim(-15, 30) + theme(legend.position='none') + xlab("Date")
plot3 <- ggplot() + geom_line(data = Madras2019, aes(x = Date_time, y = Temperature, color = Location)) +
geom_line(data = idklol3, aes(x = as.POSIXct(mergeDate), y = cTAVG, color = "Madras Air Temperature"), linetype = "dashed") +
scale_color_brewer(palette = "Paired") + ylim(-15, 30) + theme(legend.position='none') + xlab("Date")
plot4 <- ggplot() + geom_line(data = Madras2018, aes(x = Date_time, y = Temperature, color = Location)) +
geom_line(data = idklol4, aes(x = as.POSIXct(mergeDate), y = cTAVG, color = "Madras Air Temperature"), linetype = "dashed") +
scale_color_brewer(palette = "Paired") + ylim(-15, 30) + theme(legend.position='none') + xlab("Date")
#This is a solid graph run it if you don't remember
# grid.arrange(plot1, plot2, plot4, plot3, ncol = 2)
# Facet wrapped graph
# ggplot() + geom_line(data = MadrasData2, aes(x = Date_time, y = Temperature, color = Location)) +
# geom_line(data = airplot3, aes(x = as.POSIXct(DATE), y = cTAVG, color = "Madras Air Temperature"), linetype = "dashed") +
# scale_color_brewer(palette = "Dark2") + facet_wrap( ~ Year)
# Compare river temperature, air temperature data ratio to determine impact above and below the dam
# Some pH graphing
df10 <- rivertempbigData %>%
filter(!is.na(`pH in NA`)) %>% arrange(Location, desc(Date_time))
min <- as.Date("2015-01-01")
max <- as.Date("2017-01-01")
# ggplot(df10, aes(as.Date(Date_time), y = `pH in NA`, color = Location, group = Location)) + geom_line() + scale_x_date(limits = c(min, max)) +
# geom_hline(yintercept = 8.5, linetype = "dashed") +
# geom_dl(aes(label = Location), method = list(dl.combine("last.points")), cex = 0.8) + theme(legend.position = "none") +
# labs(x = "Date", y = "pH") + annotate("text", x = as.Date("2015-02-15"), y = 8.6, label = "ODEQ Standard (8.5)")
#
# df11 <- df10 %>% filter(Location == "Whitehorse" | Location == "Shitike Creek" | Location == "River Mouth" | Location == "Reregulating Dam")
# ggplot(df11, aes(Date_time, `pH in NA`, color = Location)) + geom_line()
# Overlaid plot of just Madras, Biggs, Culver, plots used in midterm presentation
# usgsdata1 <- rivertempbigData %>% filter(Location == "Biggs" | Location == "Culver" | Location == "Madras") %>% filter(Year == 2008)
# plt2008ylims <- layer_scales(plot2015)$y$range$range
# plot2008 <- ggplot(usgsdata1, aes(x = Date_time, y = Temperature, color = Location)) + geom_line() +
# scale_color_manual(values = c("red", "blue")) +
# geom_hline(yintercept = 16, linetype = "dashed") + ylim(plt2008ylims) + theme(axis.title.x = element_blank(),
# axis.title.y = element_blank())
# usgsdata2 <- rivertempbigData %>% filter(Location == "Biggs" | Location == "Culver" | Location == "Madras") %>% filter(Year == 2015)
# plot2015 <- ggplot(usgsdata2, aes(x = Date_time, y = Temperature, color = Location)) + geom_line() +
# scale_color_manual(values = c("forestgreen", "red", "blue")) +
# geom_hline(yintercept = 16, linetype = "dashed") + theme(axis.title.x = element_blank(),
# axis.title.y = element_blank())
# legend1 <- gtable_filter(ggplotGrob(plot2015), "guide-box")
#
#
# # Plot years 2008, 2012, 2019
#
# grid.arrange(arrangeGrob(plot2008 + theme(legend.position = "none"),
# plot2015 + theme(legend.position = "none"),
# nrow = 1,
# top = textGrob("Water Temperature before and After SWW Activation", vjust = 1,
# gp = gpar(fontface = "bold", cex = 1.5, col = "goldenrod4")),
# left = textGrob("Temperature (°C)", rot = 90, vjust = 1)),
# bottom = textGrob("Date", hjust = 1),
# legend1,
# widths = unit.c(unit(1, "npc") - legend1$width, legend1$width), ncol = 2)
#
pgeLocations <- unique(df5$Location)
# pgedata2015 <- rivertempbigData %>% filter(Location %in% pgeLocations) %>% filter(Year == 2015)
# odeqdata2015 <- rivertempbigData %>% filter(Location == "Mirror Pond" |
# Location == "Lower Bridge" | Location == "Warm Springs" |
# Location == "Deschutes River Park") %>% filter(Year == 2015)
#
# pgeandodeq <- rivertempbigData %>% filter(Location != "Biggs" & Location != "Culver" & Location != "Madras")
# fakeplot <- ggplot(pgeandodeq, aes(x = Date_time, y = Temperature, color = Location)) + geom_line()
# legend2 <- gtable_filter(ggplotGrob(fakeplot), "guide-box")
#
# pgeplot2015 <- ggplot(pgedata2015, aes(Date_time, Temperature, color = Location)) + geom_line() +
# theme(axis.title.x = element_blank(), axis.title.y = element_blank()) + geom_hline(yintercept = 16, linetype = "dashed")
# odeqplot2015 <- ggplot(odeqdata2015, aes(Date_time, Temperature, color = Location)) + geom_line() +
# theme(axis.title.x = element_blank(), axis.title.y = element_blank()) + geom_hline(yintercept = 16, linetype = "dashed")
# #Update legend
# grid.arrange(arrangeGrob(pgeplot2015 + theme(legend.position = "none"),
# odeqplot2015 + theme(legend.position = "none"),
# nrow = 2,
# top = textGrob("PGE and ODEQ Water Temperature Data", vjust = 1,
# gp = gpar(fontface = "bold", cex = 1.5, col = "goldenrod4")),
# left = textGrob("Temperature (°C)", rot = 90, vjust = 1)),
# bottom = textGrob("Date", hjust = 1),
# legend2,
# widths = unit.c(unit(1, "npc") - legend2$width, legend2$width), ncol = 2)
# Make sure to have upstream and downstream of dam graph somewhere in here
# Trying to replicate the PGE graph of figure 6-4 here
# rivertempbigData %>% filter(Location == "Reregulating Dam") %>%
# ggplot(aes(x = as.Date(Julian, origin = as.Date("2015-01-01")), y = Temperature)) + geom_line() + facet_wrap( ~ Year) +
# scale_x_date(date_labels = "%b")
# Attempting to replicate 24 degrees celsius observation of river mouth - max is 23.3 from what I see
# rivertempbigData %>% filter(Location == "Biggs") %>% ggplot(aes(Date_time, Temperature, color = Location)) + geom_line() +
# stat_peaks(aes(label = stat(y.label)), geom = "label", color = "red", hjust = -0.1)
# Replicating figure 6-5
# rivertempbigData %>% filter(Date_time >= "2015-01-01" & Date_time <= "2017-01-01") %>%
# filter(Location == "Madras" | Location == "Biggs") %>% ggplot(aes(Date_time, Temperature, color = Location)) + geom_line()
### NEW USGS DATA READIN
MadrasGageData <- read.table("Data/MadrasTemperatureData.txt", header = T, fill = T, sep = "\t")
MoodyGageData <- read.table("Data/MoodyTemperatureData.txt", header = T, fill = T, sep = "\t")
MadrasGageData <- MadrasGageData %>% mutate(Location = "Madras")
MadrasGageData$X113433_00010_00001_cd <- NULL
MadrasGageData$X113434_00010_00002_cd <- NULL
MadrasGageData$X113435_00010_00003_cd <- NULL # Subsetting out approval columns since data is already quality controlled
MadrasGageData$X113436_00060_00003_cd <- NULL
colnames(MadrasGageData) <- c("Agency", "Site", "Date_time", "Max Temperature", "Min Temperature", "Mean Temperature",
"Discharge (cfs)", "Location")
MadrasGageData <- MadrasGageData %>% mutate(`Mean Temperature` = case_when(is.na(`Mean Temperature`) ~
(`Max Temperature` + `Min Temperature`) / 2,
!is.na(`Mean Temperature`) ~ `Mean Temperature`))
MoodyGageData <- MoodyGageData %>% mutate(Location = "Moody")
MoodyGageData$X113455_00010_00001_cd <- NULL
MoodyGageData$X113456_00010_00002_cd <- NULL
MoodyGageData$X113457_00010_00003_cd <- NULL
MoodyGageData$X113458_00060_00003_cd <- NULL
MoodyGageData$X265533_00010_00011_cd <- NULL
colnames(MoodyGageData) <- c("Agency", "Site", "Date_time", "Max Temperature", "Min Temperature", "Mean Temperature",
"Discharge (cfs)", "Instantaneous Temperature", "Location")
MoodyGageData <- MoodyGageData %>% mutate(`Mean Temperature` = coalesce(`Instantaneous Temperature`, `Mean Temperature`))
MoodyGageData$`Instantaneous Temperature` <- NULL
MoodyGageData <- MoodyGageData %>% mutate(`Mean Temperature` = case_when(is.na(`Mean Temperature`) ~
(`Max Temperature` + `Min Temperature`) / 2,
!is.na(`Mean Temperature`) ~ `Mean Temperature`))
allusgsdata2 <- rbind(MadrasGageData, MoodyGageData)
allusgsdata2$Year <- four.digit.year(as.POSIXct(allusgsdata2$Date_time, format = "%m/%d/%y"), year = 1951) #lubridate is set in 1970 gotta transform data
allusgsdata2$Date_time <- mdy(allusgsdata2$Date_time)
allusgsdata2 <- allusgsdata2 %>%
mutate(Date_time = case_when(year(Date_time) > 2021 ~ 'year<-'(Date_time, Year), TRUE ~ Date_time))
allusgsdata2 <- allusgsdata2 %>% mutate(Season = getSeason(Date_time)) %>% mutate(Julian = yday(Date_time))
allusgsdata2 <- allusgsdata2 %>% mutate(lat = case_when(Location == "Madras" ~ c(45.62222222), Location == "Moody" ~ c(44.72611111))) %>%
mutate(long = case_when(Location == "Madras" ~ c(-120.90444444), Location == "Moody" ~ c(-121.24583333)))
# allusgsdata2 <- allusgsdata2 %>% mutate(Yearagain = paste(Date_time[1:4], "/", Year, sep = "")) this doesn't work
MadrasDataMedians <- allusgsdata2 %>% filter(Location == "Madras") %>% group_by(Year, Season) %>%
summarize(median = median(`Temperature`, na.rm = T), mean = mean(`Temperature`, na.rm = T)) %>%
filter(Year == 1953 | Year == 1956 | Year == 2008 | Year == 2009 | Year == 2016 | Year == 2019)
MadrasDataMedians %>% ggplot(aes(Season, mean)) + geom_bar(aes(fill = as.factor(Year)), position = "dodge", stat = "identity") +
labs(y = "Mean", fill = "Year") + scale_fill_brewer(palette = "Dark2") + theme_bw()
MadrasDataMedians$x = as.Date(c("1953-06-15",
"1956-06-15",
"2008-06-15",
"2009-06-15",
"2016-06-15",
"2019-06-15"))
MadrasDataMedians$Julian <- c("183", "183","183", "183","183", "183")
longtermtempplot <- allusgsdata2 %>% filter(Location == "Madras" & Year == 2008 | Year == 2009 & Location == "Madras"| Year == 1953 & Location == "Madras" |
Year == 1955 & Location == "Madras" | Year == 2016 & Location == "Madras" | Year == 2019 & Location == "Madras") %>%
ggplot(aes(x = as.Date(Julian, origin = "1952-01-01"), y = `Mean Temperature`, color = Year)) + geom_line(show.legend = F) +
facet_wrap( ~ as.factor(Year), ncol = 2) + theme_bw() +
scale_x_date(date_labels = "%b") + ggtitle("Temperature Before and After Dam Installation") + labs(x = "Date") +
theme(axis.title.y = element_text(color = temperatureColor, size = 13),
axis.title.x = element_text(color = fishColor, size = 13),
plot.title = element_text(hjust = 0.5))
colorset = c('1953' = "red", '1956' = "red", '2008' = "goldenrod", '2009' = "goldenrod", '2016' = "forestgreen", '2019' = "forestgreen")
longtermtempplot + scale_fill_manual(values = colorset)
longtermtempplot + geom_text(
data = MadrasDataMedians,
mapping = aes(x = yday(x), y = 10, label = lab),
show.legend = F
)
# Working here
# ggplot(allusgsdata2, aes(Date_time, `Mean Temperature`, color = Location)) + geom_line()
### FISH DATA
fishCountsSteelheadEstimated <- read.csv("Data/EstimatedSteelheadODFW.csv")
fishCounts <- read.csv("Data/adult counts deschutes PGE 2014-2020.csv")
fishCounts2 <- read.csv("Data/RM43-steelhead-counts.csv")
fishCountsSteelhead <- fishCounts2 %>% select("BeginDate","EndDate","CountValue","TrendCom") %>% arrange(EndDate) # Actual captured rate
fishCounts3 <- read.csv("Data/FallChinookODFW.csv")[1:43,1:7]
fishCounts3 <- gather(fishCounts3, Month, Count, June, July, August, September, October, -Total)
fishCounts3 <- mutate(fishCounts3, monthNum = case_when(grepl("June", Month) ~ "-06", grepl("July", Month) ~ "-07",
grepl("August", Month) ~ "-08",
grepl("September", Month) ~ "-09", grepl("October", Month) ~ "-10"))
fishCounts3 <- fishCounts3 %>% mutate(Date_time = paste0(Year, monthNum))
fishCounts3$Date_time <- parse_date_time(fishCounts3$Date_time, orders = c("Y-m"))
fishCounts3$Date_time <- ymd(fishCounts3$Date_time)
fishCounts3$Total <- NULL
fishCounts3$monthNum <- NULL
colnames(fishCounts3)[3] <- "Fall Chinook"
fishCounts3$`Fall Chinook` <- as.numeric(fishCounts3$`Fall Chinook`)
fishCounts4 <- read.csv("Data/HatcherySteelhead.csv")[1:43,1:7]
odfwmergedata <- fishCounts4 %>% select("Year", "Total")
fishCounts4 <- gather(fishCounts4, Month, Count, June, July, August, September, October, -Total)
fishCounts4 <- mutate(fishCounts4, monthNum = case_when(grepl("June", Month) ~ "-06", grepl("July", Month) ~ "-07",
grepl("August", Month) ~ "-08",
grepl("September", Month) ~ "-09", grepl("October", Month) ~ "-10"))
fishCounts4 <- fishCounts4 %>% mutate(Date_time = paste0(Year, monthNum))
fishCounts4$Date_time <- parse_date_time(fishCounts4$Date_time, orders = c("Y-m"))
fishCounts4$Date_time <- ymd(fishCounts4$Date_time)
fishCounts4$Total <- NULL
fishCounts4$monthNum <- NULL
colnames(fishCounts4)[3] <- "Hatchery Summer Steelhead"
fishCounts4$`Hatchery Summer Steelhead` <- as.numeric(fishCounts4$`Hatchery Summer Steelhead`)
fishCounts5 <- read.csv("Data/WildSteelhead.csv")[1:43,1:7]
odfwmergedata <- odfwmergedata %>% left_join(select(fishCounts5, Total, Year), by = "Year")
colnames(odfwmergedata) <- c("Year","Total Number of Captured Hatchery Summer Steelhead", "Number of Captured Wild Summer Steelhead")
odfwmergedata <- odfwmergedata %>% mutate_all(funs(as.numeric(gsub(",", "", .))))
fishCounts5 <- gather(fishCounts5, Month, Count, June, July, August, September, October, -Total)
fishCounts5 <- mutate(fishCounts5, monthNum = case_when(grepl("June", Month) ~ "-06", grepl("July", Month) ~ "-07",
grepl("August", Month) ~ "-08",
grepl("September", Month) ~ "-09", grepl("October", Month) ~ "-10"))
fishCounts5 <- fishCounts5 %>% mutate(Date_time = paste0(Year, monthNum))
fishCounts5$Date_time <- parse_date_time(fishCounts5$Date_time, orders = c("Y-m"))
fishCounts5$Date_time <- ymd(fishCounts5$Date_time)
fishCounts5$Total <- NULL
fishCounts5$monthNum <- NULL
colnames(fishCounts5)[3] <- "Wild Summer Steelhead"
fishCounts5$`Wild Summer Steelhead` <- as.numeric(fishCounts5$`Wild Summer Steelhead`)
ODFWData <- fishCounts3 %>% left_join(fishCounts4) %>% left_join(fishCounts5) %>% arrange(Date_time)
ODFWData <- ODFWData %>% mutate(Season = getSeason(Date_time))
ODFWData <- ODFWData[,c(4,1,2,7,3,5,6)] #Reordering data, note that overwhelming majority of fish occurrence is in summer, about a third as much in fall - basically none in spring
ODFWData2 <- ODFWData %>% gather(Variable, Value, -c("Date_time", "Year","Season","Month"))
#Plot by season and species of ODFW Data before merge
ODFWData %>% gather(Variable, Value, -c("Date_time", "Year","Season","Month")) %>% filter(Season != "Spring") %>%
ggplot(aes(Date_time, Value, color = Variable)) + geom_line(show.legend = F) + facet_grid(Variable ~ Season) + theme_bw() +
ggtitle("Seasonal ODFW Data at Sherars Falls") + labs(x = "Date", y = "Fish Count", color = "Species") +
geom_vline(aes(xintercept = as.Date("2010-01-01")), linetype = "dashed") +
theme(axis.title.y = element_text(color = temperatureColor, size = 13),
axis.title.y.right = element_text(color = fishColor, size = 13),
plot.title = element_text(hjust = 0.5))
fishCounts$Date <- mdy(fishCounts$Date)
fishCounts$Year <- year(fishCounts$Date)
fishCounts$Season <- getSeason(fishCounts$Date)
fishCounts <- fishCounts %>% mutate(Month = month(Date, label = T, abbr = F))
fishCounts %>% gather(Variable, Value, -Date, -Year, -Season, -Month) %>% filter(Variable != "Total") %>%
ggplot(aes(Month, Value, color = Variable, fill = Variable)) + geom_col() + geom_line() + facet_grid(Variable ~ Year) + #Decent visualizationn
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
#ANOVA TEST
prob <- fishCounts %>% gather(Variable, Count, - Date, -Year, -Season) %>% aov(Count ~ Variable, .)
summary(prob)
fishandtempdfMadras <- allusgsdata2 %>% filter(Location == "Madras") %>%
inner_join(fishCounts, by = c("Date_time" = "Date", "Season", "Year"))
fishandtempdfMoody <- allusgsdata2 %>% filter(Location == "Moody") %>%
inner_join(fishCounts, by = c("Date_time" = "Date", "Season", "Year"))
fishandtempdfMadras <- fishandtempdfMadras %>%
select(-c("Agency", "Site", "Discharge (cfs)", "lat", "long", "Max Temperature", "Min Temperature", "Julian", "Location"))
colnames(fishandtempdfMadras) <- c("Date_time","Mean Temperature","Year","Season","Hatchery Summer Steelhead","Wild Summer Steelhead",
"Summer Steelhead RM", "Summer Steelhead LM", "Hatchery Spring Chinook", "Wild Spring Chinook",
"Spring Chinook RM", "Spring Chinook LM","No Mark Sockeye", "Sockeye RM", "Sockeye LM", "Fall Chinook",
"Bull Trout", "Rainbow Trout", "Total")
fishandtempdfMadras <- fishandtempdfMadras %>% mutate(Month = month(Date_time, label = T, abbr = F))
ODFWData1 <- ODFWData %>% filter(Year < 2014) %>% mutate(Source = "ODFW")
fishCounts <- fishCounts %>% mutate(Source = "PGE")
fullfishandtemp <- fishCounts %>%
full_join(ODFWData1, by = c("Date" = "Date_time","Season","Year","Month","Source",
"SUMMER.STEELHEAD_Hatchery" = "Hatchery Summer Steelhead", "SUMMER.STEELHEAD_Wild" = "Wild Summer Steelhead",
"Fall.Chinook" = "Fall Chinook")) %>% arrange(Date)
colnames(fullfishandtemp) <- c("Date_time","Hatchery Summer Steelhead","Wild Summer Steelhead",
"Summer Steelhead RM", "Summer Steelhead LM", "Hatchery Spring Chinook", "Wild Spring Chinook",
"Spring Chinook RM", "Spring Chinook LM","No Mark Sockeye", "Sockeye RM", "Sockeye LM", "Fall Chinook",
"Bull Trout", "Rainbow Trout", "Total", "Year", "Season", "Month", "Source")
allusgsdata3 <- allusgsdata2 %>% filter(Location == "Madras") %>% select("Date_time","Mean Temperature")
fullfishandtemp1 <- fullfishandtemp %>% left_join(allusgsdata3, by = "Date_time") %>% arrange(Date_time)
test1 <- fullfishandtemp1 %>% group_by(Date_time) %>% filter(n() > 1) %>% summarize(n = n())
fullfishandtemp1 %>% gather(Variable, Value, -c("Date_time", "Year","Season","Month")) %>% filter(Variable == "Fall Chinook" |
Variable == "Hatchery Summer Steelhead" |
Variable == "Wild Summer Steelhead") %>%
ggplot(aes(Date_time, Value, color = Variable)) + geom_line() + geom_smooth(se = F, method = "lm",
formula = y ~ x + I(x^2) + I(x^3)) + facet_wrap( ~ Variable) + theme_bw() +
ggtitle("Fish Count data by Species") + labs(x = "Date", y = "Fish Count", color = "Species") +
theme(axis.title.y = element_text(color = temperatureColor, size = 13),
axis.title.y.right = element_text(color = fishColor, size = 13),
plot.title = element_text(hjust = 0.5)) + stat_poly_eq(formula = y ~ x + I(x^2) + I(x^3),aes(label = ..adj.rr.label..), parse = T)
summary(lm(`Hatchery Summer Steelhead` ~ poly(Year,2), data = fullfishandtemp1))
summary(lm(`Hatchery Summer Steelhead` ~ poly(Year,3), data = fullfishandtemp1))
fishandtempdfMoody <- fishandtempdfMoody %>%
select(-c("Agency", "Site", "Discharge (cfs)", "lat", "long", "Max Temperature", "Min Temperature", "Julian", "Location"))
fishandtempdfMadras %>% mutate(lFishCount = log(Total)) %>% gather(Variable, Value, -Date_time, -Year, -Season, -Month) %>%
filter(Variable == "lFishCount" | Variable == "Mean Temperature") %>%
ggplot(aes(Date_time, Value, color = Variable)) + geom_line() + facet_wrap( ~ Year, scales = "free") +
scale_color_manual(values = c("red","blue")) + labs(x = "Date", y = "Temperature")
coeff <- max(fishandtempdfMadras$Total, na.rm = T)/max(fishandtempdfMadras$`Mean Temperature`)
temperatureColor <- "#C92A2A"
fishColor <- rgb(0.2, 0.6, 0.9, 1)
ggplot(data = fishandtempdfMadras, aes(x = Date_time)) + geom_line(aes(y = `Mean Temperature`), color = temperatureColor) +
geom_line(aes(y = Total / coeff), color = fishColor) + scale_y_continuous(name = "Temperature (Celsius °)",
sec.axis = sec_axis(~.*coeff, name = "Total Fish Count")) + theme_bw() +
ggtitle("Temperature vs. Fish Count") + theme(axis.title.y = element_text(color = temperatureColor, size = 13),
axis.title.y.right = element_text(color = fishColor, size = 13),
plot.title = element_text(hjust = 0.5)) + xlab("Date")
# Graph with bar and line different axes and then different fish species
coeff2 <- fishandtempdfMadras %>% gather(Variable, Value, -Date_time, -Year, -Season, -`Mean Temperature`, -Month) %>%
group_by(Variable) %>%
summarise(coeff = max(as.numeric(Value), na.rm = T)/15.4)
fishandtempdfMadras %>% gather(Variable, Value, -Date_time, -Year, -Season, -`Mean Temperature`, -Month) %>%
ggplot(aes(x = Date_time)) + geom_col(aes(y = Value, fill = Variable), show.legend = F) + facet_wrap( ~ Variable) +
geom_line(aes(y = `Mean Temperature`), color = temperatureColor) +
scale_y_continuous(name = "Temperature (Celsius °)", sec.axis = sec_axis(~.*10, name = "Fish Count")) +
theme_bw() + labs(x = "Date") + ggtitle("Temperature and Fish Count by Species (PGE Data 2014-2020)") +
theme(axis.title.y = element_text(color = temperatureColor, size = 13),
axis.title.y.right = element_text(color = fishColor, size = 13),
plot.title = element_text(hjust = 0.5))
# justforplotting <- fishandtempdfMadras %>% gather(Variable, Value, -Date_time, -Year, -Season, -`Mean Temperature`, -Month)
# justforplotting$Variable <- factor(justforplotting$Variable)
# ggplot(data = justforplotting, aes(x = Date_time, y = Value)) + geom_bar(stat = "identity") +
# facet_wrap( ~ Variable, scales = "free") + theme_bw()
# Linear models for Madras and Moody
fishtempmodelMadras <- lm(log(Total) ~ `Mean Temperature`, data = fishandtempdfMadras)
fishtempmodelMoody <- lm(Total ~ `Mean Temperature`, data = fishandtempdfMoody)
summary(fishtempmodelMadras) # For every 2.53 degree celsius decrease in temperature there is a corresponding increase of 1 total fish count
summary(fishtempmodelMoody) # For every 1.9 degree celsius decrease in temperature there is a corresponding increase of 1 total fish count
formula <- log(y) ~ poly(x, raw = T)
ggplot(data = fishandtempdfMadras, aes(x = `Mean Temperature`, y = Total)) + geom_point() + geom_smooth(method = "lm", formula = formula) +
stat_poly_eq(aes(label = ..adj.rr.label..), formula = formula, parse = T)
totalsumoffish <- fishandtempdfMadras %>% group_by(Year) %>% select(-Date_time, -Season) %>% replace(is.na(.), 0) %>% summarise_all(funs(sum))
sumsoftotalfishsums <- totalsumoffish %>% summarise_all(funs(sum))
totalsumoffish <- rbind(totalsumoffish, sumsoftotalfishsums)
# Only significant numbers of fish rainbow trout, hatchery steelhead, hatchery spring chinook, fall chinook
fishCountsSteelhead <- fishCountsSteelhead %>% mutate(Year = year(BeginDate)) %>% select("Year","TrendCom","CountValue")
fishCountsSteelhead <- spread(fishCountsSteelhead, TrendCom, CountValue)
fishCountsSteelheadEstimated <- fishCountsSteelheadEstimated %>% mutate(Year = substr(Year, start = 1, stop = 4))
fishCountsSteelheadEstimated$Year <- as.numeric(fishCountsSteelheadEstimated$Year)
SteelheadODFWDF <- fishCountsSteelhead %>% right_join(fishCountsSteelheadEstimated, by = c("Year"))
SteelheadODFWDF <- SteelheadODFWDF %>% mutate_all(funs(as.numeric(gsub(",", "", .))))
colnames(SteelheadODFWDF) <- c("Year", "Number of Captured Wild Summer Steelhead",
"Number of Captured Round Butte Hatchery Summer Steelhead",
"Number of Captured Stray Hatchery Summer Steelhead",
"Total Number of Captured Hatchery Summer Steelhead",
"Estimated Wild Summer Steelhead", "Estimated Round Butte Hatchery Summer Steelhead",
"Estimated Stray Hatchery Summer Steelhead", "Estimated Total Hatchery Summer Steelhead")
odfwmergedata <- odfwmergedata %>% filter(Year > 2006)
# SteelheadODFWDF <- SteelheadODFWDF %>% replace(is.na(.), 0)
# SteelheadODFWDF <- SteelheadODFWDF %>% mutate_all(na_if, 0) go back and forth between na and 0
testdf <- full_join(SteelheadODFWDF, odfwmergedata,
by = c("Year"))
testdf$`Number of Captured Wild Summer Steelhead.x`[is.na(testdf$`Number of Captured Wild Summer Steelhead.x`)] <-
testdf$`Number of Captured Wild Summer Steelhead.y`[!is.na(testdf$`Number of Captured Wild Summer Steelhead.y`)]
testdf$`Total Number of Captured Hatchery Summer Steelhead.x`[is.na(testdf$`Total Number of Captured Hatchery Summer Steelhead.x`)] <-
testdf$`Total Number of Captured Hatchery Summer Steelhead.y`[!is.na(testdf$`Total Number of Captured Hatchery Summer Steelhead.y`)]
testdf$`Number of Captured Wild Summer Steelhead.y` <- NULL
testdf$`Total Number of Captured Hatchery Summer Steelhead.y` <- NULL
steelheadFinaldf <- testdf
colnames(steelheadFinaldf) <- c("Year", "Number of Captured Wild Summer Steelhead",
"Number of Captured Round Butte Hatchery Summer Steelhead",
"Number of Captured Stray Hatchery Summer Steelhead",
"Total Number of Captured Hatchery Summer Steelhead",
"Estimated Wild Summer Steelhead", "Estimated Round Butte Hatchery Summer Steelhead",
"Estimated Stray Hatchery Summer Steelhead", "Estimated Total Hatchery Summer Steelhead")
# Working here
#INITIAL STEELHEAD DATA PLOT
steelheadListInitial <- c("Year", "Proportion of Estimated to Captured Total Hatchery", "Proportion of Estimated to Captured Stray Hatchery",
"Proportion of Estimated to Captured Round Butte Hatchery", "Proportion of Estimated to Captured Wild")
steelheadListProp <- c("Year",
"Number of Captured Wild Summer Steelhead",
"Number of Captured Round Butte Hatchery Summer Steelhead",
"Number of Captured Stray Hatchery Summer Steelhead",
"Total Number of Captured Hatchery Summer Steelhead",
"Estimated Wild Summer Steelhead",
"Estimated Round Butte Hatchery Summer Steelhead",
"Estimated Stray Hatchery Summer Steelhead",
"Estimated Total Hatchery Summer Steelhead")
steelheadFinaldf %>% gather(Variable, Value, -steelheadListInitial) %>%
mutate(Estimates = case_when(grepl("Wild", Variable) ~ "Wild", grepl("Round Butte", Variable) ~ "Round Butte Hatchery",
grepl("Total", Variable) ~ "Total Hatchery", grepl("Stray", Variable) ~ "Stray Hatchery")) %>%
ggplot(aes(x = Year, y = Value, color = Variable)) + facet_wrap( ~ Estimates) +
geom_line() + geom_point() +
ggtitle("sherarsFalls Trap Data") + theme_bw() + labs(y = "Fish Count") +
theme(plot.title = element_text(hjust = 0.5),
legend.position = "none")
steelheadFinaldf <- steelheadFinaldf %>% mutate(`Proportion of Estimated to Captured Round Butte Hatchery` = `Number of Captured Round Butte Hatchery Summer Steelhead` / `Estimated Round Butte Hatchery Summer Steelhead`,
`Proportion of Estimated to Captured Total Hatchery` = `Total Number of Captured Hatchery Summer Steelhead` / `Estimated Total Hatchery Summer Steelhead`,
`Proportion of Estimated to Captured Stray Hatchery` = `Number of Captured Stray Hatchery Summer Steelhead` / `Estimated Stray Hatchery Summer Steelhead`,
`Proportion of Estimated to Captured Wild` = `Number of Captured Wild Summer Steelhead` / `Estimated Wild Summer Steelhead`)
steelheadFinaldf %>% gather(Variable, Value, -steelheadListProp) %>% ggplot(aes(x = Year, y = Value, color = Variable)) +
geom_line(show.legend = F) +
geom_point(show.legend = F) + facet_wrap( ~ Variable) +
ggtitle("Sherars Falls Trap Data by Proportions")
### READING IN ODEQ Data - mostly useless
# allodeqData <- read.csv("Data/Standard Export 11365.csv")
#
# allodeqData <- allodeqData %>% select("Result.Value", "Result.Unit", "Characteristic.Name","Monitoring.Location.Name",
# "Monitoring.Location.Latitude", "Monitoring.Location.Longitude","Activity.Start.Date")
# allodeqData <- allodeqData %>% mutate(new = paste(Characteristic.Name, "in", Result.Unit))
# allodeqData <- subset(allodeqData, select = -c(Characteristic.Name, Result.Unit))
# allodeqData <- as.data.frame(sapply(allodeqData, gsub, pattern = "<|>", replacement = ""))
# allodeqData$Result.Value <- as.numeric(as.character(allodeqData$Result.Value))
# allodeqData1 <- pivot_wider(allodeqData, names_from = new, values_from = Result.Value, values_fn = max)
# colnames(allodeqData1) <- c("Location","Lat","Long","Date_time","pH","Dissolved Oxygen % Saturation","Temperature","Dissolved Oxygen mg/l",
# "Biochemical Oxygen Demand", "Total Coliform", "Total Solids", "Ammonia", "Nitrate + Nitrite",
# "Escherichiac in cfu/100ml", "Escherichia in MPN/100ml")
# allodeqData1$Date_time <- mdy(allodeqData1$Date_time)
# allodeqData1 <- allodeqData1 %>% mutate(Year = year(Date_time))
#
allodeqData1 %>% filter(Location == "Deschutes River at Deschutes River Park" | Location == "John Day River at Hwy 206" |
Location == "Deschutes River at Maupin") %>%
ggplot(aes(x = Date_time, y = Temperature, color = Location)) + geom_point(show.legend = F) +
geom_line(show.legend = F) + facet_wrap( ~ Location, ncol = 1)
### Reading in new PGE data
path <- "Data/Rereg Sonde Data 2004-2006_ 2010-2017.xlsx"
mad <- path %>% excel_sheets() %>% set_names() %>% map_df(read_excel, path = path)
mad$Time <- as.character(mad$Time)
mad <- mad %>% mutate(Time = sub(".* ", "", Time))
# test <- mad[is.na(mad$Time), ] 2010-07-31,2011-05-08,2015-05-01,2015-11-02 don't have times
newpgeData <- mad %>% mutate(Date_time = paste(Date, Time))
newpgeData$Date_time <- ymd_hms(newpgeData$Date_time)
newpgeData <- subset(newpgeData, select = -c(Date, Time))
colnames(newpgeData) <- c("Temperature", "Dissolved Oxygen mg/l", "Dissolved Oxygen % Saturation", "pH", "Date_time")
ggplot(newpgeData, aes(x = Date_time, y = Temperature)) + geom_line(color = "blue") + stat_peaks(color = "red", ignore_threshold = 0.6) +
annotate(geom = "point", x = as.POSIXct("2008-04-01"), y = 9, size = 50, shape = 21, fill = "transparent") +
annotate(geom = "text", x = as.POSIXct("2008-04-01"), y = 13, label = "No Data Collected") +
stat_peaks(geom = "text", color = "red", vjust = -1, span = 25, ignore_threshold = 0.58, angle = 20, hjust = 1) + theme_bw() +
labs(y = "Temperature (Celsius °)", x = "Date") +
ggtitle("PGE Data at Pelton Round Butte Dam on Temperature") + theme(plot.title = element_text(hjust = 0.5)) # Dates highlighted
ggplot(newpgeData, aes(x = Date_time, y = Temperature)) + geom_line(color = "blue") + stat_peaks(color = "red", ignore_threshold = 0.6) +
annotate(geom = "point", x = as.POSIXct("2008-04-01"), y = 9, size = 50, shape = 21, fill = "transparent") +
annotate(geom = "text", x = as.POSIXct("2008-04-01"), y = 13, label = "No Data Collected") +
stat_peaks(geom = "text", color = "red", vjust = -0.5, span = 25, ignore_threshold = 0.58, y.label.fm = "%f°", angle = 20, hjust = 1,
aes(label = paste(..y.label..))) +
theme_bw() +
labs(y = "Temperature (Celsius °)", x = "Date") +
ggtitle("PGE Data at Pelton Round Butte Dam on Temperature") + theme(plot.title = element_text(hjust = 0.5)) # Temperatures highlighted, measured by
# difference, ignore_threshold at 0.6 meaning if a peak is within 60% of the data on either side it will be ignored
# df6 <- df5 %>% filter(Location == "Reregulating Dam")
# testforsimilarity <- allusgsdata2 %>% filter(Location == "Madras", Year > 2003)
# ggplot() + geom_line(data = newpgeData, aes(x = Date_time, y = Temperature), color = "forestgreen") +
# geom_line(data = df6, aes(x = Date_time, y = Temperature), color = "red") +
# geom_line(data = testforsimilarity, aes(x = as.POSIXct(Date_time), y = `Mean Temperature`), color = "blue")
# Showing that PGE data is essentially the same from 2015-2017 and new data and USGS data
|
76e693d6221560a8fdd64279b0596d7545bef30e
|
4c4155a486454e3468945f8870d116a96ae31b7a
|
/2-2 -- GLMs/Afternoon/2-2 afternoon -- intro to linear models.R
|
859d0ba88ca5ee918c3d57941c341276d90d667a
|
[] |
no_license
|
gastonnv/2016_class_CMR
|
10edab424f85f7103a78814b8da3837772fc71e8
|
df5e01a6fc544624e9b4d07d11780ea7e04cc962
|
refs/heads/master
| 2022-07-09T14:30:21.291004
| 2022-07-05T00:01:24
| 2022-07-05T00:01:24
| 169,748,312
| 0
| 0
| null | 2019-02-08T14:38:50
| 2019-02-08T14:38:50
| null |
UTF-8
|
R
| false
| false
| 3,445
|
r
|
2-2 afternoon -- intro to linear models.R
|
setwd( "C:/Users/James.Thorson/Desktop/Project_git/2016_classes_private/CMR models/2-2 -- GLMs/Afternoon/" )
###### Example GLm code
# simulate example data
TrueMean = 3
TrueSize = 1
Counts = rnbinom(20, mu=TrueMean, size=TrueSize) # Var = mu + mu^2/size
Covariate = rnorm( length(Counts), mean=0, sd=1)
#Counts = rpois(10000, lambda=TrueMean) # Var = mu + mu^2/size
# Plot example data
png(file="Lab_1_Counts.png", width=4, height=4, res=200, units="in")
par(mar=c(3,3,2,0), mgp=c(1.5,0.25,0))
hist(Counts)
dev.off()
#### Method 1 -- Nonlinear optimization
NegLogLike_Fn = function(Par, Data){
# Parameters
Mean_hat = Par[1]
Size_hat = Par[2]
Slope_hat = Par[3]
# Log-likelihood
LogLike_i = dnbinom( Data$Counts, mu=exp(Mean_hat + Data$Covariate*Slope_hat), size=Size_hat, log=TRUE )
NegLogLike = -1 * sum(LogLike_i)
return( NegLogLike )
}
# Run model
Data = list( 'Counts'=Counts, 'Covariate'=Covariate )
Start = c(1,1, 1)
NegLogLike_Fn( Par=Start, Data=Data)
Opt = optim( par=Start, fn=NegLogLike_Fn, Data=Data, lower=c(0.01,0.01,-Inf), upper=Inf, method="L-BFGS-B", hessian=TRUE )
# Estimated parameters
print( Opt$par ) # Estimated parameters
# Estimated standard errors
print( sqrt(diag( solve(Opt$hessian) )) ) # square root of diagonal elements of the inverse-hessian matrix
#### Method 2 -- GLM function in R
library(MASS)
Glm = glm.nb( Counts ~ 1 )
# Estimated parameters and standard errors
summary(Glm)
#### Method 3 -- GLM in JAGS
library(R2jags)
# Define JAGS model
NegBin = function(){
# Prior distributions
Size ~ dunif(0.001,10)
Ln_Mean ~ dunif(0.001,10)
# Derived quantities
VarInf <- exp(Ln_Mean) / Size
Mean <- exp(Ln_Mean)
# Change to JAGS parameterization
p <- 1 / (VarInf + 1) # WIKIPEDIA: p <- 1 / (1-VarInf)
r <- Mean * p / (1-p)
# Sampling declarations
for(i in 1:Nobs){
Counts[i] ~ dnegbin(p,r)
}
}
# Generate inputs for JAGS
Nsim = Nburnin = 5e2
Data = list(Counts=Counts, Nobs=length(Counts))
# Run jags
Jags <- jags(model.file=NegBin, working.directory=NULL, data=Data, parameters.to.save=c("Ln_Mean","Size"), n.chains=3, n.thin=1, n.iter=Nsim+Nburnin, n.burnin=Nburnin)
# Look at estimates
Jags$BUGSoutput$summary
####### Convergence diagnostics
# Look at effective sample sizes
Jags$BUGSoutput$summary
# Plot MCMC trace
png(file="Lab_1_Trace.png", width=6, height=4, res=200, units="in")
par(mar=c(3,3,2,0), mgp=c(1.5,0.25,0), xaxs="i", yaxs="i")
traceplot(Jags, mfrow=c(1,3), ask=FALSE)
dev.off()
####### Check goodness-of-fit
png(file="Lab_1_Posterior_predictive_check.png", width=6, height=4, res=200, units="in")
par(mfrow=c(1,2), mar=c(3,3,2,0), mgp=c(2,0.5,0))
hist( Jags$BUGSoutput$sims.list$Pred, freq=FALSE, xlim=c(0,max(Data$Counts)), breaks=seq(0,100,by=1), main="Predictive distribution", xlab="Counts" )
hist( Data$Counts, freq=FALSE, xlim=c(0,max(Data$Counts)), breaks=seq(0,100,by=1), main="Available data", xlab="Counts" )
dev.off()
####### Interpret results
# Plot posteriors
png(file="Lab_1_Posteriors.png", width=6, height=4, res=200, units="in")
par(mfrow=c(1,2), mar=c(3,3,2,0), mgp=c(1.5,0.25,0), xaxs="i", yaxs="i")
hist(exp(Jags$BUGSoutput$sims.list[["Ln_Mean"]]), breaks=25, main="Mean", xlab="Value", ylab="Density")
abline( v=TrueMean, col="red" )
hist(Jags$BUGSoutput$sims.list[["Size"]], breaks=25, main="Size", xlab="Value", ylab="Density")
abline( v=TrueSize, col="red" )
dev.off()
|
b9710c0a5d7e271f0998b18f518d5e8d4975ad71
|
55ac76adb9cfd550ba1041f784b1a9d6acaacaee
|
/Assignments/02_Analysis/11-a.R
|
3b1179f4626bf4643a1ce44c1b5f2640888d5c90
|
[] |
no_license
|
sanazy/Hesaba-Data-Science-Course
|
6efc70280776fc8c69d9aa0a931de8d8ecc15ebd
|
f64b96703d7fd2be9fa980b2e9ad3bc7fce650df
|
refs/heads/main
| 2023-07-14T20:23:50.969676
| 2021-08-15T18:48:22
| 2021-08-15T18:48:22
| 396,456,228
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,171
|
r
|
11-a.R
|
# Load libraries
library(engsoccerdata)
library(ggplot2)
library(tidyr)
library(dplyr)
library(lubridate)
############# teams with most consecutive games without lost ##################
# Load Spain dataset
fdb = as_tibble(spain)
# Create empty vectors
num_nl <- vector()
teams_nl <- vector()
# Seperate hosts
df1 <- spain %>%
filter(tier == 1) %>%
summarise(team = home,
GF = hgoal,
GA = vgoal)
# Seperate guests
df2 <- spain %>%
filter(tier == 1) %>%
summarise(team = visitor,
GF = vgoal,
GA = hgoal)
# Bind 2 dataframes
df3 <- bind_rows(df1, df2) %>%
group_by(team) %>%
mutate(NL = ifelse(GF >= GA, 1, 0)) %>%
summarise(NL = max(rle(NL)$lengths[rle(NL)$values == 1])) %>%
arrange(desc(NL))
View(df3)
ans_1 <- df3
save(ans_1, file = "2740430299_11-a.RData")
load("2740430299_11-a.RData")
# Barplot
ggplot(df3[1:5,], aes(reorder(team, -NL), NL, fill=team)) +
geom_bar(stat = "identity") +
theme(axis.text.x = element_text(angle = 45)) +
labs(x='Name of Teams', y='Number of Consecutive not Loosing Games') +
ggtitle("Top 5 Teams with most Consecutive not Loosing Games")
|
9e332918b15a170d3577c3d82cd2840fdfaffdd4
|
49f642b345f41180cf310f8ba4df140282c51cf8
|
/R/factors.R
|
5c1afd62115aae551e0e3af65573526cc483b1c4
|
[] |
no_license
|
jrnold/jrnoldmisc
|
2631d0d9fff71ac8872566f2bf6523fd5747f8ff
|
4418beb0dbfcbc60225f56303fc256924802eb99
|
refs/heads/master
| 2021-09-12T13:15:43.399948
| 2018-04-17T03:07:49
| 2018-04-17T03:07:49
| 77,298,951
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,134
|
r
|
factors.R
|
# copied from forcats:::check_factor
check_factor <- function(f) {
if (is.character(f)) {
factor(f)
}
else if (is.factor(f)) {
f
}
else {
stop("`f` must be a factor (or character vector).", call. = FALSE)
}
}
#' Is a one- or two-sided formula?
#'
#' Functions to test whether an object is a one- or two-sided formula.
#'
#' @param x An object to test
#' @return \code{TRUE} or \code{FALSE}
#' @seealso \code{\link[purrr]{is_formula}} in \pkg{purrr} and \code{\link[lazyeval]{is_formula}} in \pkg{lazyeval} both test objects for formula.
#' @export
#' @examples
#' is_formula2(y ~ x)
#' is_formula2(~ x)
#' is_formula1(y ~ x)
#' is_formula1(~ x)
is_formula2 <- function(x) {
purrr::is_formula(x) && (length(x) == 3)
}
#' @rdname is_formula2
#' @export
is_formula1 <- function(x) {
purrr::is_formula(x) && (length(x) == 2)
}
#' Transform levels of a factor with a regular expression
#'
#' Replace the factor levels with a regular expression.
#' \code{fct_replace} replaces the factor levels using a regular
#'
#' @param f A factor
#' @param pattern,replacement Pattern and replacement regular expressions.
#' See \code{\link[stringr]{str_replace}}.
#' @param all If \code{TRUE}, replace all occurences of \code{pattern},
#' otherwise replace only the first occurrence.
#' @return A factor vector with the values of \code{f} and transformed levels.
#' @export
fct_sub <- function(f, pattern, replacement, all = TRUE) {
f <- check_factor(f)
FUN <- if (all) str_replace_all else str_replace
old_levels <- levels(f)
new_levels <- FUN(old_levels, pattern, replacement)
lvls_revalue(f, new_levels)
}
#' Transform levels of a factor with a function of their index
#'
#' Change the factor levels by a function or pattern based on their current
#' order, .e.g. 1, 2, 3, ....
#'
#' @param f A factor
#' @param .f If a character vector, a \code{\link{sprintf}} pattern in which the
#' only argument will be the order of the factor levels. If a function, then
#' a function in which the first argument is the order of the factor levels.
#' @param ... Arguments passed to \code{.f} if it is a function.
#' @return A factor vector with the values of \code{f} and transformed levels.
#' @export
fct_idx <- function(f, .f = "%d", ...) {
f <- check_factor(f)
lvls_revalue(f, make_seq_names(seq_along(levels(f)), .f, ...))
}
#' Remove levels from a factor
#'
#' Remove levels from a factor, meaning that observations with those levels
#' are set to `NA`.
#'
#' @seealso \code{\link[forcats]{fct_recode}} which can
#' remove factor levels and \code{\link[forcats]{fct_explicit_na}} which
#' is the inverse, converting `NA` to a factor level.
#' @param f A factor
#' @param lvls Character vector of levels to remove
#' @return A factor
#' @export
#' @importFrom forcats lvls_revalue
#' @examples
#' f <- factor(c("Low", "Medium", "High",
#' "Refused to respond", "No response", "Not asked"))
#' fct_remove(f, c("Refused to respond", "No response", "Not asked"))
fct_remove <- function(f, lvls) {
f <- check_factor(f)
factor(f, levels = setdiff(levels(f), as.character(lvls)))
}
|
ca1378f2c87f454d013f2ed5bb976f8cdbbc31a2
|
aebca85114388224fc24481fdfce04be048110db
|
/R/annoXeset.R
|
9347096e1e098920f6e0510cff7ba00dfe923770
|
[] |
no_license
|
mssm-msf-2019/BiostatsALL
|
4f79f2fbb823db8a0cbe60172b3dcd54eac58539
|
0623dd13db576b2501783b31d08ae43340f2080b
|
refs/heads/master
| 2020-05-25T15:16:01.949307
| 2019-05-21T18:11:12
| 2019-05-21T18:11:12
| 187,864,190
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,169
|
r
|
annoXeset.R
|
#' Function for cross annotating a mouse expression set with human attributes.
#' @description This function takes a mouse expression set as input, and outputs a merged expression set with mouse and human annotations.
#' @param eset Mouse expression set
#' @param AnnPkg.in Annotation package that corresponds to the mouse expression set.
#' @keywords cross annotation, expression set, eset, Xanno, mouse, human
#' @examples
#' eset.IL23.probes.Xanno.2 <- annoXeset(eset=eset_IL23,AnnPkg.in = "mouse4302.db")
annoXeset <- function(eset,AnnPkg.in){
##Annotating Mouse to Human
##Depends on the following packages:
require(biomaRt)
##Getting human and mouse marts
human = biomaRt::useMart("ensembl", dataset = "hsapiens_gene_ensembl")
mouse = biomaRt::useMart("ensembl", dataset = "mmusculus_gene_ensembl")
##Checking mart attributes - just for testing
# mouse.attr <- attributes(mouse)$attributes$name
# mouse.attr [grep(mouse.attr ,pattern="sym")]
# human.attr <- attributes(human)$attributes$name
# human.attr [grep(human.attr ,pattern="descr")]
##Annotating combined mouse eset
eset.probes <- data.frame(rownames(exprs(eset)))
rownames(eset.probes) <- rownames(exprs(eset))
eset.probes$PROBEID <- rownames(exprs(eset))
eset.probes.anno <- BiostatsALL::getAnnotationTable(IDS=eset.probes$PROBEID, AnnPkg=AnnPkg.in, w.annot=c("ENSEMBL","ENTREZID","SYMBOL","GENENAME","CHRLOC","PATH","GO"),w.annot.unique=c("ENSEMBL","ENTREZID","SYMBOL","CHRLOC"))
eset.probes.anno <- as.data.frame(eset.probes.anno)
eset.probes.anno <- eset.probes.anno[as.data.frame(eset.probes.anno)$ENSEMBL!="",]
##Doing cross-annotation
eset.probes.Xanno <- biomaRt::getLDS(attributes = c("ensembl_gene_id","mgi_symbol"),filters = "ensembl_gene_id",values =as.character(eset.probes.anno$ENSEMBL), mart = mouse , attributesL = c("ensembl_gene_id","hgnc_symbol", "wikigene_description" ), martL = human )
##Merging Xanno and Eset
eset.probes.Xanno.2 <- merge(x = eset.probes.anno ,y=eset.probes.Xanno,by.x = "ENSEMBL",by.y="Ensembl.Gene.ID",all.x=T,all.y=F)
return(eset.probes.Xanno.2)
}
|
8981e059f1a6fbe8f3051f98cff6ad5d34bb840e
|
7db7705a9e12bf20d7fd60ff0f03e61c9e15e4c2
|
/man/getProjectName.Rd
|
21ca9834878ce41f65dc56a6e79a0ea8605bb3a2
|
[] |
no_license
|
paul-shannon/R-travis-CI-demo
|
d447ae6ae968c3b2d8f134c5e3e841598c52d75b
|
c699ea63294341b05afe712986a8e94bb34b7e51
|
refs/heads/master
| 2020-07-31T01:58:49.111316
| 2019-09-24T14:06:45
| 2019-09-24T14:06:45
| 210,442,493
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 430
|
rd
|
getProjectName.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CIDemo.R
\docType{methods}
\name{getProjectName,CIDemo-method}
\alias{getProjectName,CIDemo-method}
\alias{getProjectName}
\title{get the project name - to be set by derived classes}
\usage{
\S4method{getProjectName}{CIDemo}(obj)
}
\arguments{
\item{obj}{An object of class CIDemo}
}
\description{
get the project name - to be set by derived classes
}
|
69d255e977daa8ea8a333d466303f9f4f48425ee
|
647c208abde7d27a7d55ffb6e41b62e360955c63
|
/man/pancreatic.Rd
|
dab3d3dc0936e87fea7410deda4f5fc230e44f79
|
[] |
no_license
|
cran/asaur
|
b0a309d806ac06eb050ee9aecfabe61f778e30fb
|
0d782825331f15a729be9f999faec7e13180bc06
|
refs/heads/master
| 2016-08-11T15:12:10.855558
| 2016-04-12T06:23:05
| 2016-04-12T06:23:05
| 51,957,067
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,135
|
rd
|
pancreatic.Rd
|
\name{pancreatic}
\alias{pancreatic}
\docType{data}
\title{pancreatic
%% ~~ data name/kind ... ~~
}
\description{
Data from a Phase II clinical trial of patients with locally advanced or metastatic pancreatic cancer.
}
\usage{data("pancreatic")}
\format{
A data frame with 41 observations on the following 4 variables.
\describe{
\item{\code{stage}}{a factor with levels \code{LA} (locally advanced) or \code{M} (metastatic)}
\item{\code{onstudy}}{date of enrollment into the clinical trial, in month/day/year format}
\item{\code{progression}}{date of progression, in month/day/year format}
\item{\code{death}}{date of death, in month/day/year format}
}
}
\details{
Since all patients in this study have known death dates, there is no censoring.
}
\references{
Moss RA, Moore D, Mulcahy MF, Nahum K, Saraiya B, Eddy S, Kleber M, and Poplin EA (2012)
A multi-institutional phase 2 study of imatinib mesylate and gemcitabine for first-line treatment of advanced pancreatic cancer. Gastrointestinal Cancer Research 5, 77 - 83.
}
\examples{
data(pancreatic)
}
\keyword{datasets}
|
a5effa0faa882bc0653394f18e6e1203907d15f3
|
d67d5acbbc10e3eb56b08547d1832ea8e09bc483
|
/R/states.R
|
9fad5b3f4e61e55d0f216fd61a592f00dcf340c4
|
[] |
no_license
|
balachia/renmr
|
4127c760b71ab927da4a2afffca8b1eccda5b356
|
c3333a411d42664b10f5355a780983ab7ee4dc4f
|
refs/heads/master
| 2020-12-24T16:59:51.773939
| 2014-07-28T23:34:27
| 2014-07-28T23:34:27
| 21,967,967
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,980
|
r
|
states.R
|
#' @export
print.renmr.state <- function(obj) {
cat('renmr.state: ', obj$name, '\n')
cat('taking types: ', paste(names(formals(environment(obj$new)$fun)), sep=', '), '\n')
}
# generic state factory
# should be used to build skeleton states
state.factory <- function(name, fun) {
state <- list()
class(state) <- 'renmr.state'
state$name <- name
state$new <- function(kind=NULL,...) {
# res <- fun(self=as.list(state), kind=kind, ...)
res <- fun(kind=kind, ...)
res$name <- if(is.null(kind)) name else paste0(name,':',kind)
res
}
# check state validity maybe?
# 1) this would require us to half default-ful states
# i.e. can be built out of the box
# 2) what does a state need to have?
# a) 'name' - name that incorporates kind
# b) 'init' - init
# c) 'f' - transition function
test.state <- state$new(kind='foo')
if(is.null(test.state$name)) stop('state has no name')
if(test.state$name != paste0(name,':','foo')) stop(paste0('state [', name, '] fails to incorporate kind'))
if(is.null(test.state$init)) warning(paste0('state [', name, '] has null initial state'))
if(is.null(test.state$f)) stop(paste0('state [', name, '] has no transition function'))
if(class(test.state$f) != 'function') warning(paste0('state [', name, '] transition function has unexpected class'))
state
}
# RENMR CONTROL STATE
state.renmr.control <- state.factory(name='renmr.control',
fun=function(properties=list(), ...) {
res <- list(init=properties, f=function(state0, trans.df) state0)
})
# NULL STATE
state.empty <- state.factory(name='empty',
fun=function(...) {
res <- list(init=0, f=function(state0, trans.df) 0)
})
# WEIGHTED NETWORK
halflife_decay <- function(data, halflife, dtime) {
if(is.na(halflife) || dtime==0) return(data)
return(data * exp(-dtime * (log(2) / halflife)))
}
state.wnetwork.matrix <- state.factory(name='renmr.wnetwork.matrix',
fun=function(renmr.control=list(nnetwork=1),
src='__isrc__', trg='__itrg__', dtime='__dtime__', weight='weight',
halflife=NA, init=0, kind=NULL, type=kind, type.var='type', ...) {
res <- list()
n <- renmr.control$nnetwork
res$init <- matrix(init,n,n)
trans.f <- function(state.0, trans.df) {
state.1 <- halflife_decay(state.0, halflife, trans.df[[dtime]])
state.1[trans.df[[src]], trans.df[[trg]]] <-
state.1[trans.df[[src]], trans.df[[trg]]] + trans.df[[weight]]
state.1
}
res$f <- trans.f
if(!is.null(type)) {
res$f <- function(state.0, trans.df) {
if(trans.df[[type.var]] == type) {
trans.f(state.0, trans.df)
} else {
state.0
}
}
}
res
})
|
f7161d1d199ad18b0a95a7f30afcda97074f77a7
|
ce001a65650d8058f082652cc073e959bce4871f
|
/man/j_decode.Rd
|
a6afb85a142db961dc02033db9adb7d0daf202cb
|
[] |
no_license
|
cran/json64
|
f7acd97ae4862e3c3d5b7c577fef6357c68d1cbf
|
cc839eb5ad8c24f70307fbd47b333ea20b713851
|
refs/heads/master
| 2020-12-22T00:39:54.419938
| 2019-06-03T13:20:03
| 2019-06-03T13:20:03
| 236,617,500
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 796
|
rd
|
j_decode.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/decode.R
\name{j_decode}
\alias{j_decode}
\title{Decoding Function}
\usage{
j_decode(str, json = TRUE)
}
\arguments{
\item{str}{The string to be decoded.}
\item{json}{Defaults to TRUE. If TRUE, the function expects str to be an encoded json and will return a data.frame or list, depending on JSON structure. If FALSE, the function will return an string.}
}
\description{
Used to decode a base64 string. By default the function expects an encoded json.
}
\examples{
# Decode an encoded string:
str <- "SGVsbG8gV29ybGQh"
j_decode(str, json = FALSE)
# Decode an encoded json:
encoded_json <- "W3sibXNnIjogIkhlbGxvIFdvcmxkISIsICJqc29uIjogdHJ1ZX1d"
j_decode(encoded_json)
}
\keyword{decode}
|
649d5496b594988ff96b47c6575eb1bd96a1e6ec
|
141320fd3b4d5361f89fc887ac4e7eba6e06d615
|
/R_Basics/09_Boxplots.R
|
0beac40c7b508d0d742e271bba9b9c7e60cc0ca7
|
[] |
no_license
|
CodeInDna/R_Essentials
|
84cb4b488b1f0cc1bdccdf290e300f63e3e58615
|
9b143a0e1d9c149e49c9647894d4bf005a3d53ac
|
refs/heads/master
| 2020-11-29T00:24:09.727997
| 2020-01-10T13:18:52
| 2020-01-10T13:18:52
| 229,960,552
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,439
|
r
|
09_Boxplots.R
|
# File: 09_Boxplots.R
# Project: Baiscs_R
# INSTALL AND LOAD PACKAGES ##########################
# pacman must already be installed; then load contributed
# packages (including pacman) with pacman
pacman::p_load(pacman, tidyverse)
# LOAD DATA ##########################################
# Using the diamonds dataset from ggplot2
?diamonds # Get info about diamonds dataset
diamonds # Display the begining of the dataset
# BARPLOT OF FREQUENCIES ##########################################
?plot # Get info on Generic x-y Plotting
?boxplot # Get info on Box Plot
# Boxplots with defaults
boxplot(diamonds$price)
# Similar command using pipelines
diamonds %>%
select(price) %>%
boxplot()
# Boxplots with options
diamonds %>%
select(price) %>%
boxplot(
horizontal = T,
notch = T,
main = "Boxplot of Price of Diamonds",
sub = "(Source ggplot2::diamonds)",
xlab = "Price of Diamonds",
col = "#CD0000" # red3
)
# Boxplots by group using plot()
diamonds %>%
select(color, price) %>%
plot()
# Boxplots by group using boxplot()
diamonds %>%
select(color, price) %>%
boxplot(
price ~ color, # Tilde indicates formula
data = ., # Dot is a placeholder for pipe
col = "#CD0000" # red3
)
|
df804f7b9679c5e91c3dcebf93b6909e25f81574
|
e02e405d4109a4f35359815e1214fd1d9b78e62c
|
/server.R
|
f443da097d7318ab2c9ac1fb401b44b0a10d20ef
|
[] |
no_license
|
EconometricsBySimulation/Rstylizer
|
57ac45908f428afc15d201bdf74c03b1fe34c1fb
|
86bd1d1addf909ca6b988ab9dffdd23911b5508f
|
refs/heads/master
| 2021-01-19T12:36:05.240820
| 2013-08-27T20:42:57
| 2013-08-27T20:42:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,496
|
r
|
server.R
|
library(shiny)
# We tweak the "am" field to have nicer factor labels. Since this doesn't
# rely on any user inputs we can do this once at startup and then use the
# value throughout the liff.etime of the application
mpgData <- mtcars
mpgData$am <- factor(mpgData$am, labels = c("Automatic", "Manual"))
# Define server logic required to plot various variables against mpg
shinyServer(function(input, output) {
# Return the formula text for printing as a caption
syntax.highlighted <- reactive({
txt <- input$input_code
txt <- unlist(strsplit(txt, split="\n"))
# Uncomment this next line to test the code off the server
# txt <- readLines("https://gist.github.com/EconometricsBySimulation/6235945/raw/02bb07a48d9cb0bd6d76309967be8b827506dea7/gistfile1.txt")
# Choose the formatting tags you would like applied to each field type.
comment.start <- '<span style="color: #669933">'
comment.end <- '</span>'
# I would like to auto format all numbers but I have nto yet been able to figure
# out how to do this.
num.start <- '<span style="color: #990000"><b>'
num.end <- '</b></span>'
punc.start <- '<span style="color: #0000FF">'
punc.end <- '</span>'
command1.start <- '<span style="color: #0000CC"><b>'
command1.end <- '</b></span>'
command2.start <- '<span style="color: #9900FF">'
command2.end <- '</span>'
command3.start <- '<span style="color: #990033">'
command3.end <- '</span>'
# I am not sure where exactly I got this
stata.commands1 <- unlist(strsplit(readLines(
"Stata/C1.txt"), split=" "))
stata.commands2 <- unlist(strsplit(readLines(
"Stata/C2.txt"), split=" "))
stata.commands3 <- unlist(strsplit(readLines(
"Stata/C3.txt"), split=" "))
# I want to figure out how to highlight the puncuation as well but I am having trouble
# with that.
# for (v in punc) txt<- gsub(v,
# paste0(punc.start,v,punc.end), txt)
# Create a vector to tell R to ignore entire lines.
comment <- (1:length(txt))*0
# '*' Star comment recognizer
for (i in grep("[:*:]", txt)) {
# Break each line to discover is the first symbol which is not a space is a *
txt2 <- strsplit(txt[i], split=" ")[[1]]
if (txt2[txt2!=""][1]=="*") {
txt.rep <- paste(c(comment.start,txt[[i]],comment.end), collapse="")
txt[[i]] <- txt.rep
comment[i] <- 1
}
}
# '//' Comment recognizer
for (i in (grep("//", txt))) if (comment[i]==0) {
txt2 <- strsplit(txt[i], split=" ")[[1]]
comment.place <- grep("//", txt2)[1]-1
txt.rep <- paste(c(txt2[1:comment.place], comment.start, txt2[-(1:comment.place)],comment.end), collapse=" ")
txt[[i]] <- txt.rep
}
# Format stata commands that match each list
# "\\<",v,"\\>" ensures only entire word matches
# are used.
for (v in stata.commands1) txt[comment==0]<-
gsub(paste0("\\<",v,"\\>"),
paste0(command1.start,v,command1.end),
txt[comment==0])
for (v in stata.commands2) txt[comment==0]<-
gsub(paste0("\\<",v,"\\>"),
paste0(command2.start,v,command2.end),
txt[comment==0])
for (v in stata.commands3) txt[comment==0]<-
gsub(paste0("\\<",v,"\\>"),
paste0(command3.start,v,command3.end),
txt[comment==0])
# This is my attempt at highlighting all numbers that are not words.
# It did not work.
# <a href ="http://stackoverflow.com/questions/18160131/replacing-numbers-r-regular-expression">stackoverflow topic</a>
# txt <- gsub(".*([[:digit:]]+).*", paste0(num.start,"\\1",num.end), txt)
# Add tags to the end and beginning to help control the general format.
txt <- c('<pre><span style="font-family: monospace">',txt,
'\nFormatted By <a href="http://www.econometricsbysimulation.com/2013/08/Rstylizer.html">Econometrics by Simulation</a>',
'</span></pre>')
# writeClipboard(paste(txt, collapse="\n"))
txt
})
output$formatted <- renderText({paste(syntax.highlighted(), collapse="\n")})
output$htmlformatted <- renderText({
txt <- syntax.highlighted()
txt <- gsub("<","<",txt)
txt <- gsub(">",">",txt)
txt <- c('<pre><span style="font-family: monospace">',txt, '</span></pre>')
paste(txt, collapse="\n")
})
})
|
046b038a2d1b38fb3ce82f76a9d0dff4c35b3dfb
|
efa527bcdaf3c77f408b9da6bdf5b2778b234529
|
/R/awsConnect-package.r
|
54c6a8d9d4bc0d72dd501003b3c0192c87b15184
|
[] |
no_license
|
lalas/awsConnect
|
ee8898b3e5a891eefad26dd1252ad5f4e4e2aa41
|
3f3e5ba79d021319b3b8df8dce0ce333a8e6ea59
|
refs/heads/master
| 2016-09-10T11:08:19.806427
| 2014-11-25T01:35:20
| 2014-11-25T01:35:20
| 22,846,255
| 3
| 2
| null | 2014-11-25T01:35:20
| 2014-08-11T16:34:08
|
R
|
UTF-8
|
R
| false
| false
| 63
|
r
|
awsConnect-package.r
|
#' awsConnect.
#'
#' @name awsConnect
#' @docType package
NULL
|
bef169517f31c524c611a77491955071ea2f1a84
|
8f3bd5ef1b27757c8c16737d24cbb250e736b2f8
|
/man/projectIntoHeight.Rd
|
22cd0de7ba5938f8529130f2fb26b98cc671fd11
|
[
"MIT"
] |
permissive
|
tunelipt/model3d
|
5ba394a84e2ac77602c310385a7b1f92dd1c3d66
|
2a92047e7f66b3dbf8423e5008cf6e6c861a05cd
|
refs/heads/master
| 2020-07-25T14:02:49.769011
| 2019-09-13T17:41:07
| 2019-09-13T17:41:07
| 208,315,730
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 848
|
rd
|
projectIntoHeight.Rd
|
\name{projectIntoHeight}
\alias{projectIntoHeight}
\title{Chops a polygon at two different heights.}
\usage{
projectIntoHeight(p, hmin, hmax, dnum = 3)
}
\arguments{
\item{p}{Polygon, a matrix where the rows are x, y, z
coordinates of the vertices.}
\item{hmin}{Minimum height to chop the polygon.}
\item{hmax}{Maximum height to chop the polygon.}
\item{dnum}{Axis along which the heights are defined.}
}
\value{
Chopped polygon or NULL if there is no intersection.
}
\description{
Given a polygon and two levels, this function chops the
polygon, returning a polygon that is the intersection
between the original polygon and a strip from the lower
height to the upper height. If there is not intersection
this function returns \code{NULL}. The height can be any
coordinate axis: x -> 1, y -> 2 and z -> 3 (default).
}
|
e4341f29e117dfb0087ddf13ec198be67d0b7029
|
80a3908146756e6386bd14b8a7d9df54dc759e15
|
/plot1.R
|
64cb3a59e31260511a75c072437c7cfb59b9d832
|
[] |
no_license
|
matt21511/ExData_Plotting1
|
42fbe50aa1febb3c13546ad02c6b4da44bb18233
|
2f6d3126de3c2510937a988c40afa4e9d4d769ee
|
refs/heads/master
| 2021-01-16T21:31:04.098764
| 2015-02-08T04:03:10
| 2015-02-08T04:03:10
| 30,469,794
| 0
| 0
| null | 2015-02-07T20:51:40
| 2015-02-07T20:51:40
| null |
UTF-8
|
R
| false
| false
| 369
|
r
|
plot1.R
|
#Import Data
mytable <- read.table("household_power_consumption.txt",header=TRUE,sep=";",na.strings="?",colClasses=c(rep("character",2),rep("numeric",7)))
ftable <- mytable[grepl("^[12]/2/2007",mytable$Date),]
#Create Plot
png(filename="plot1.png")
hist(ftable$Global_active_power,main="Global Active Power", xlab="Global Active Power(kilowatts)",col="Red")
dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.