blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
188c6f08f11c4ddcbcc8b7ec3f4c14e9399b9b20
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/morse/examples/ppc.reproFitTT.Rd.R
|
d48338d4beec6965d67f8be9b1f4e8bee4038d5f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 561
|
r
|
ppc.reproFitTT.Rd.R
|
library(morse)
### Name: ppc.reproFitTT
### Title: Posterior predictive check plot for 'reproFitTT' objects
### Aliases: ppc.reproFitTT
### ** Examples
# (1) Load the data
data(cadmium1)
# (2) Create an object of class "reproData"
dataset <- reproData(cadmium1)
## Not run:
##D # (3) Run the reproFitTT function with the log-logistic gamma-Poisson model
##D out <- reproFitTT(dataset, stoc.part = "gammapoisson",
##D ecx = c(5, 10, 15, 20, 30, 50, 80), quiet = TRUE)
##D
##D # (4) Plot observed versus predicted values
##D ppc(out)
## End(Not run)
|
d72ae9150e0844d8f197957857cc55be2f6ef845
|
38b161f15f0d6ca60e386ffbb1e37f91806d944a
|
/man/pptx_content_dimensions.Rd
|
9b9547c1d3d0acd97575ab3b5a7d87f2da4c0731
|
[
"MIT"
] |
permissive
|
MusculusMus/pptxtemplates
|
51c871077023ec5afaa77481e4ba8496502b933c
|
fab50ccdc9ec1caa5494dd307e5781397b057b4e
|
refs/heads/master
| 2023-07-02T18:54:10.146636
| 2021-08-07T22:40:12
| 2021-08-07T22:40:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 395
|
rd
|
pptx_content_dimensions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pptx_content_dimensions.R
\name{pptx_content_dimensions}
\alias{pptx_content_dimensions}
\title{Extract Content Dimension from pptx document}
\usage{
pptx_content_dimensions(file)
}
\arguments{
\item{file}{Filepath to pptx document}
}
\value{
Named list
}
\description{
Extract Content Dimension from pptx document
}
|
7916d909bab6c03b12996cd21aadd0d0394d4de0
|
e1434311fdd51d20e15eb3ae26aa261e712b58f3
|
/man/coreOTUModuleUI.Rd
|
42e332a82c57416843975c9fd0aa465253b38b5a
|
[] |
no_license
|
tseanlu/PathoStat
|
9b88c1ebd0d893e8581e5d664999ceb32d3880f1
|
f346fbaff12faba2e2f603c7019be51a4bb56815
|
refs/heads/master
| 2021-04-28T08:10:40.365224
| 2018-03-06T18:23:26
| 2018-03-06T18:23:26
| 117,146,929
| 1
| 0
| null | 2018-01-11T19:59:05
| 2018-01-11T19:59:05
| null |
UTF-8
|
R
| false
| true
| 1,124
|
rd
|
coreOTUModuleUI.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coreOTUModule.R
\name{coreOTUModuleUI}
\alias{coreOTUModuleUI}
\title{UI function for Core OTU Module}
\usage{
coreOTUModuleUI(id, label = "Core OTUs")
}
\arguments{
\item{id}{Namespace for module}
\item{label}{Tab label}
}
\value{
A \code{\link[shiny]{tabPanel}} that can be included within a
\code{\link[shiny]{tabsetPanel}}.
}
\description{
This function creates the UI for the Core OTU tab. The tab panel can be
included within a tabsetPanel, thus providing a simple way to add or remove
this module from the Shiny app. The first argument, \code{id}, is the ID to
be used for the namespace \emph{and} must match the \code{id} argument
provided to \code{\link{coreOTUModule}}.
}
\examples{
shiny::mainPanel(
shiny::tabsetPanel(
coreOTUModuleUI("coreOTUModule")
)
)
}
\seealso{
\code{\link{coreOTUModule}} for the server function,
\code{\link[shiny]{tabPanel}} for the UI component returned by this
function, or \url{ http://shiny.rstudio.com/articles/modules.html} for
more information about Shiny modules.
}
|
6f5575b7065db076d9a23187c05b2bd7e2341dd1
|
b4210f37c0f781a5caf535700d58f937e4cb1a99
|
/server.R
|
0b241dbb2fffff321f8d0672d40a257191d7bdfd
|
[] |
no_license
|
michaelpboyle/DevDataProducts
|
7bfcedbbbcded8aec20ac688033cb7ef5536ef7f
|
1dd9340b1bef92a189f67c1f0d8cccefc5b18630
|
refs/heads/master
| 2020-04-06T04:34:01.974414
| 2015-07-12T02:11:09
| 2015-07-12T02:11:09
| 38,947,007
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,258
|
r
|
server.R
|
## Load "Shiny" package
library(shiny)
## Load "cars" dataset
data(cars)
## Initialize "Shiny Server"
shinyServer(
function(input,output) {
## Render output objects for use in User Interface.
## Render plot, subsetting speeds from user input.
## Add mean and median lines, and legend
output$myplot <- renderPlot( {
with(subset(cars
,speed >= as.numeric(input$range[1]) &
speed <= as.numeric(input$range[2]) )
,hist(dist,xlab="Stopping Distance (Feet)",col="yellow",
main="Stopping Distances At Selected Speed Range",
breaks = 8,
xlim = c(0,125),
ylim = c(0,20)
)
)
abline( v = median(subset(cars,speed >= as.numeric(input$range[1]) &
speed <= as.numeric(input$range[2])
)$dist
)
, col = "magenta", lwd = 8)
abline( v = mean(subset(cars,speed >= as.numeric(input$range[1]) &
speed <= as.numeric(input$range[2])
)$dist
)
, col = "blue", lwd = 3)
## Add legend
legend("topright",lty,lwd=c(8,3)
,legend=c("Median","Mean")
,col=c("magenta","blue")
)
}
)
## Render test for display in the user interface
output$myLow <- renderText(paste("From:",input$range[1],"MPH"))
output$myHigh <- renderText(paste("To: ",input$range[2],"MPH"))
output$myObs <- renderText(paste("Total # of Observations:",
length(subset(cars
,speed >= as.numeric(input$range[1]) &
speed <= as.numeric(input$range[2])
)$dist
)
)
)
}
)
|
173dfe97182672b070cbf6bd0c60106a3c919cb3
|
ceeff04aac64c2f6d3a4b54c3c4af9eabf08efb3
|
/man/getPosterior.Rd
|
ba8775033b1eac5a1b2aa220a7d51d2336da4dbc
|
[
"MIT"
] |
permissive
|
suleimank/bmsr
|
d1c7a0b616ed06fb52975c71de9e7f960336d5fc
|
e69a764b3cf9f7076e28169f10ba22f49cfadde8
|
refs/heads/master
| 2021-07-02T14:46:51.027450
| 2021-03-07T02:32:52
| 2021-03-07T02:32:52
| 344,309,622
| 0
| 0
|
MIT
| 2021-03-05T22:48:58
| 2021-03-04T01:08:31
|
C++
|
UTF-8
|
R
| false
| true
| 472
|
rd
|
getPosterior.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bmsr.R
\name{getPosterior}
\alias{getPosterior}
\title{baseline function to get posterior}
\usage{
getPosterior(file = NULL, out)
}
\arguments{
\item{file}{is the stan file name containig the stan code.}
\item{out}{is trained STAN model.}
}
\value{
post is a list containing posterior of all model weights.
}
\description{
\code{getPosterior} extracts the posterior values from mode output.
}
|
77a7373d48a95385720736b963e0669cf4ace0c8
|
ee7a448d0cdfe0478a82ba713e1cc6a5c9e0db4a
|
/03_analysis/02_main-analyses/99_run-all/02_run_demographics.R
|
17fde754a69301331bced9b5245d008e93f97e6d
|
[
"CC-BY-4.0"
] |
permissive
|
gpwilliams/levenik
|
ce46b00534b564280ff4f1023e69c2082d842344
|
2eeeb6a6a96849e408f1152ac3391b6caaffedf5
|
refs/heads/master
| 2021-08-06T08:39:03.528594
| 2021-07-28T15:46:01
| 2021-07-28T15:46:01
| 165,866,366
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 262
|
r
|
02_run_demographics.R
|
# Run Demographics ----
# Prepares data and saves demographic output.
message("Preparing data.")
# run and save demographics to a list
source_files(data_preparation_source_files)
# save demographics
saveRDS(
demographics,
file = demographics_output_path
)
|
0348eebd8918fdf2696149dae7de79d6567cd71a
|
9ba2a529bb7f60b0841e5236bedbcbcab3fd69ea
|
/preprocess_1.R
|
204c3b3a55c00c7893833b8dc53d289655039db6
|
[] |
no_license
|
effat/csc-591
|
0504d0bd911b80bf36698328630468565d750540
|
f3cb24d9dfe9690eb3c683cf4f6bb085f3cd610e
|
refs/heads/master
| 2021-01-20T10:32:17.417844
| 2019-11-19T22:39:50
| 2019-11-19T22:39:50
| 101,643,671
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,943
|
r
|
preprocess_1.R
|
library(zoo)
working_dir<-"\\afs\\unity.ncsu.edu\\users\\e\\efarhan\\csc-591"
### .txt files downloaded from Datashop
file1<-"student_problem.txt"
file2<-"student_step.txt"
### concatenate path location to filename
input_stdProb<-paste(working_dir,"\\",file1, sep="")
input_stdStep<-paste(working_dir,"\\", file2,sep="")
### read files
std_prb<-read.table(file1,sep="\t",header=T, check.names = FALSE, na.strings=c(""," ",".","NA"))
std_step<-read.table(file2,sep="\t",header=T, check.names = FALSE, na.strings=c(""," ",".","NA"))
##polynomial interpolating missing values in std_prb file
### if all values of a column are NA, then replace it by zeroes
for (Var in names(std_prb)) {
missing <- sum(is.na(std_prb[,Var]))
if (missing > 0) {
if(missing==nrow(std_prb)){
std_prb[, Var]<-rep(0,nrow(std_prb))
cat(" all NA \n")
}
else{
interploated<-na.spline(std_prb[, Var])
std_prb[, Var]<-interploated
cat(" yey", Var, "\n")
}
}##end for missing >0
}
##polynomial interpolating missing values in std_step file
### if all values of a column are NA, then replace it by zeroes
for (Var in names(std_step)) {
missing <- sum(is.na(std_step[,Var]))
if (missing > 0) {
if(missing==nrow(std_step))
std_step[, Var]<-rep(0,nrow(std_step))
else{
interploated<-na.spline(std_step[, Var])
std_step[, Var]<-interploated
cat(" yey", Var, "\n")
}
}##end for missing >0
}
###write files as csv
std_prb_clean<-as.data.frame(std_prb)
csvFile1_name<-"student_prb_clean.csv"
csvFile1<-paste(working_dir,"\\",csvFile1_name, sep="")
write.csv(std_prb_clean, csvFile1)
std_step_clean<-as.data.frame(std_step)
csvFile2_name<-"student_step_clean.csv"
csvFile2<-paste(working_dir,"\\",csvFile2_name, sep="")
write.csv(std_step_clean, csvFile2)
|
b0abb9639adf7ce3eb54ad6759a1994602677ca4
|
b00ec2c60f0eb44104ad1ccf22f8794e5082fdc0
|
/paper-1.R
|
bca02bc3a44fcf2b458fb72984fa3d7a21c634ca
|
[
"MIT"
] |
permissive
|
dwwood1981/NOAA-migration
|
77224370ed3547ae962f68e8f4ef9b8c3fde5807
|
e5aa0dc59639ba4e1d86de7bc51bdd530a12cf2a
|
refs/heads/master
| 2021-04-28T21:31:50.567555
| 2016-09-04T21:21:57
| 2016-09-04T21:21:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 277
|
r
|
paper-1.R
|
# ---- Start --------------------------------------------------------------
net_migration <- readRDS("1-Organization/Migration/netmigration.rds")
noaa_event <- readRDS("0-Data/NOAA/events.rds")
# ---- Manipulation -------------------------------------------------------
|
8de53dd0c4cc404f97073f18e4c3da061647d770
|
be77b7f49c0e3abbd0da77dc419e88d66a8492a7
|
/man/cluster_map.Rd
|
7fd6ad62e8ae5aed2dc6672ae82a4b3244ff2449
|
[] |
no_license
|
kaneplusplus/basket
|
daba90e9a16ab116845143654dd3a9c7551c1257
|
a665c37a79a193dbc8af33fb40f750b12abefaf4
|
refs/heads/master
| 2023-07-19T00:32:23.013485
| 2023-07-17T15:42:01
| 2023-07-17T15:42:01
| 152,809,160
| 6
| 2
| null | 2020-02-06T03:35:32
| 2018-10-12T21:24:35
|
R
|
UTF-8
|
R
| false
| true
| 770
|
rd
|
cluster_map.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/accessors.r
\name{cluster_map}
\alias{cluster_map}
\title{Get the Clusterwise Maximum A Posteriori Probability Matrix}
\usage{
cluster_map(x)
}
\arguments{
\item{x}{either an exchangeability model or basket object.}
}
\description{
MEM analyses include the maximum a posterior exchangeability
probability (MAP) of included arms indicating whether
two arms in the trial are exchangeable. This function returns the matrix
of those relationships.
}
\examples{
\donttest{
# Create an MEM analysis of the Vemurafenib trial data.
data(vemu_wide)
mem_analysis <- mem_exact(
vemu_wide$responders,
vemu_wide$evaluable,
vemu_wide$baskets
)
# Get the cluster MAPs.
cluster_map(mem_analysis)
}
}
|
b878bc70c010ba5bca03dc7f5f5f1d4079c5999f
|
64a3cb5a0bb2b325e98a3785f630b9e710b7cf95
|
/Cases/IV National City Bank/code scaffold.R
|
9690b4da7a72275b73db156ebe418822423061fd
|
[] |
no_license
|
shorton278/Harvard-Data-Mining
|
9d471b8969fa1e5a765175de0da8d411f13966de
|
e994f4461039418c02be97872fb9b4941cccc9c9
|
refs/heads/master
| 2022-12-08T18:34:40.545750
| 2020-09-04T19:07:04
| 2020-09-04T19:07:04
| 295,265,592
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,747
|
r
|
code scaffold.R
|
#' Case II Supplemental
#' TK
#' 4-30
# Libs
library(dplyr)
library(vtreat)
library(caret)
# Wd
setwd("/cloud/project/cases/National City Bank/training")
# Raw data, need to add others
currentData <- read.csv('CurrentCustomerMktgResults.csv')
newDataSource <- read.csv('householdVehicleData.csv')
# Perform a join, neeed to add other data sets
joinData <- left_join(currentData, newDataSource, by = c('HHuniqueID'))
joinData <- left_join(joinData, ...)
joinData <- left_join(joinData, ...)
# This is a classification problem so ensure R knows Y isn't 0/1 as integers
joinData$Y_AccetpedOffer <- as.factor(joinData$Y_AccetpedOffer)
## SAMPLE: Partition schema
set.seed(1234)
idx <- ...
trainData <- ...
testData <- ...
## EXPLORE: EDA, perform your EDA
## MODIFY: Vtreat, need to declare xVars & name of Y var
xVars <- c('DaysPassed', 'Communication', 'Outcome', ...)
yVar <- '...'
plan <- designTreatmentsC(..., xVars, ..., 1)
# Apply the rules to the set
treatedTrain <- prepare(..., trainData)
treatedTest <- prepare(plan, ...)
## MODEL: caret etc.
fit <- train(Y_AccetpedOffer ~., data = ..., method = ...)
## ASSESS: Predict & calculate the KPI appropriate for classification
trainingPreds <- predict(..., ...)
testingPreds <- predict(..., ...)
## NOW TO GET PROSPECTIVE CUSTOMER RESULTS
# 1. Load Raw Data
prospects <- read.csv('/cloud/project/cases/National City Bank/ProspectiveCustomers.csv')
# 2. Join with external data
# 3. Apply a treatment plan
# 4. Make predictions
prospectPreds <- predict(..., treatedProspects, type= 'prob')
# 5. Join probabilities back to ID
prospectsResults <- cbind(prospects$HHuniqueID, ...)
# 6. Identify the top 100 "success" class probabilities from prospectsResults
# End
|
366f90d313b35383353acf2b0d1b45b9c339d300
|
966ea3714f1b7d3d63bb3c38b4d59d8c06814f92
|
/04_spring_2015_project/project_submission/lab4-utils.R
|
091ba2457b8d2f3aa1234c51213457229c541b34
|
[] |
no_license
|
alexherseg/analytics_projects
|
96066a8fc1d347e1f1ead3652cc55b594facf83f
|
cf65494a03d6665755bd056d13b3f677673f51a3
|
refs/heads/master
| 2020-07-03T03:12:22.462425
| 2016-12-04T23:28:33
| 2016-12-04T23:28:33
| 74,202,732
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 445
|
r
|
lab4-utils.R
|
# Lab 4 Utilities
# Plot GA networks
# Just a regular plot, but with men blue and women pink
# Assumes "sex" attribute
gaplot <- function(gr, names=TRUE)
{
nlist <- rep("", vcount(ga.gr))
if (names)
{
nlist <- V(gr)$vertex.names
}
plot(gr, vertex.color=c("#8888FF","pink")[1+(V(gr)$sex=="F")],
vertex.label=nlist,
# vertex.label.size=.75,
vertex.size=15)
}
invlogit <- function(x) {
exp(x) / (1 + exp(x))
}
|
f4a49ba8a1b3dc4b1103cc32fe7ea3adf8d40b71
|
af901bc01d668ecd411549625208b07024df3ffd
|
/man/is_dictionaryish.Rd
|
96ba7adb6ffbd24f398b9e186cacd5f2ee5674a3
|
[
"MIT",
"BSD-2-Clause"
] |
permissive
|
r-lib/rlang
|
2784186a4dafb2fde7357c79514b3761803d0e66
|
c55f6027928d3104ed449e591e8a225fcaf55e13
|
refs/heads/main
| 2023-09-06T03:23:47.522921
| 2023-06-07T17:01:51
| 2023-06-07T17:01:51
| 73,098,312
| 355
| 128
|
NOASSERTION
| 2023-08-31T13:11:13
| 2016-11-07T16:28:57
|
R
|
UTF-8
|
R
| false
| true
| 349
|
rd
|
is_dictionaryish.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/attr.R
\name{is_dictionaryish}
\alias{is_dictionaryish}
\title{Is a vector uniquely named?}
\usage{
is_dictionaryish(x)
}
\arguments{
\item{x}{A vector.}
}
\description{
Like \code{\link[=is_named]{is_named()}} but also checks that names are unique.
}
\keyword{internal}
|
e4de7f376feb8f448c3544307e7f1080dfde8580
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rstackdeque/examples/without_front.rdeque.Rd.R
|
2f6afd12b480de99eead972917acaa430a7a48e3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 346
|
r
|
without_front.rdeque.Rd.R
|
library(rstackdeque)
### Name: without_front.rdeque
### Title: Return a version of an rdeque without the front element
### Aliases: without_front.rdeque
### ** Examples
d <- rdeque()
d <- insert_front(d, "a")
d <- insert_front(d, "b")
d <- insert_front(d, "c")
d2 <- without_front(d)
print(d2)
d3 <- without_front(d)
print(d3)
print(d)
|
b71787e428fc6b3142716fc5e8357c1d5a94037e
|
afb8f2a156447f5bedcb3b2b31931ab2f72d4396
|
/venn_exemple.R
|
623c34c9823d5e5735b83ae5df2cbc982b132f5a
|
[] |
no_license
|
ESHAMATHUR/Summer_KEYS
|
5b479505903abd2365443a225cf0f10283378166
|
87567db86919d78d7748c92b582b55186c008407
|
refs/heads/master
| 2022-11-07T00:02:19.542565
| 2020-07-08T21:52:08
| 2020-07-08T21:52:08
| 275,210,860
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,079
|
r
|
venn_exemple.R
|
library(dplyr)
library(tidyverse)
library(stringr)
library(VennDiagram)
library(RColorBrewer)
#+++++++++++++++++++++++++
# Import data
# and cleanup
#+++++++++++++++++++++++++
file <- "../Raw_Data/comparing_vibrant_virsorter.csv"
comp_data <- read_csv(file)
#+++++++++++++++++++++++++
# Venn diagram
# VirSorter/Vibrant
#+++++++++++++++++++++++++
# color choice
myCol <- c("#B3E2CD", "#FDCDAC")
#figure
venn.diagram(
x = list(
comp_data %>% filter(!is.na(name_vibrant)) %>% select(contig_id) %>% unlist() ,
comp_data %>% filter(!is.na(name_virsorter)) %>% select(contig_id) %>% unlist()
),
category.names = c("Vibrant", "VirSorter"),
filename = "ven_all_prophages.png",
output = TRUE ,
# Output features
imagetype="png" ,
height = 700 ,
width = 700 ,
resolution = 250,
compression = "lzw",
# Circles
lwd = 2,
lty = 'blank',
fill = myCol,
# Numbers
cex = .6,
fontface = "bold",
fontfamily = "sans",
# Set names
cat.cex = 0.6,
cat.fontface = "bold",
cat.default.pos = "outer",
cat.fontfamily = "sans"
)
|
efffb84943eb0608359384ea55754eb018b70d40
|
d164e285eda74b1463decaa356756c23215534ee
|
/R/act_raw.R
|
96cf7477ffeffe28117cfb2341f5db6d54fccd2f
|
[] |
no_license
|
duju211/heartbeats
|
3ad97088498dcb4d125b8deece448f69485c42d3
|
4c0fbbff3e28861f6b021e983e23ee7f094ea0db
|
refs/heads/master
| 2023-05-04T17:29:12.688272
| 2021-05-23T12:50:44
| 2021-05-23T12:50:44
| 324,873,907
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 239
|
r
|
act_raw.R
|
act_raw <- function() {
board_register_github(repo = "duju211/strava_act", branch = "master")
df_act <- pin_get("df_act", board = "github")
board_disconnect("github")
df_act %>%
filter(has_heartrate) %>%
clean_names()
}
|
9c4b0938e28409d01e38201648992805e04ffc1b
|
c698dbe65e7522a7e4103d8e92bde5bcef7734a4
|
/app.R
|
5af390a9b59308e950e2b7d1f7757de674d908c7
|
[] |
no_license
|
fionazhang94/Dissertation
|
96c669c77fa2e5e61bb37c9c0071355f4b83140e
|
2541a831e616afe59314c384f91ca7c5dc558f28
|
refs/heads/master
| 2020-06-12T14:19:37.282955
| 2019-08-21T14:35:11
| 2019-08-21T14:35:11
| 194,327,234
| 0
| 0
| null | 2019-07-30T19:31:57
| 2019-06-28T19:56:53
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 1,444
|
r
|
app.R
|
library(shiny)
library(dplyr)
library(broom)
library(readr)
# https://www.kaggle.com/uciml/pima-indians-diabetes-database/downloads/pima-indians-diabetes-database.zip/1
diabetes <- read_csv("diabetes.csv")
ui <- shinyUI(
pageWithSidebar(
headerPanel('diabetes k-means clustering'),
sidebarPanel(
selectInput('xcol', 'X Variable', names(diabetes),selected=names(diabetes)[[2]]),
selectInput('ycol', 'Y Variable', names(diabetes),
selected=names(diabetes)[[6]]),
numericInput('clusters', 'Cluster count', 2,
min = 2, max = 5)
),
mainPanel(
plotOutput("plot1")
)
)
)
server <- shinyServer(function(input, output, session) {
selectedData <- reactive({
diabetes[, c(input$xcol, input$ycol)]
})
clusters <- reactive({
kmeans(selectedData(), input$clusters)
})
output$plot1 <- renderPlot({
wss <- rep(0,15)
for (i in 1:15) wss[i] <- sum(kmeans(selectedData(),centers=i)$withinss)
palette(c("#F781BF","#FFFF33", "#A65628","#FF7F00"))
par(mar = c(5.1, 4.1, 0, 1),mfcol=c(2,1))
plot(selectedData(),
col = clusters()$cluster,
pch = 20, cex = 3)
points(clusters()$centers, pch = 4, cex = 4, lwd = 4)
plot(1:15, wss[1:15],
type="b",
xlab="Number of Clusters",
ylab="Within groups sum of squares")
})
})
# Run the application
shinyApp(ui = ui, server = server)
|
283c7b99c342d3c4d4ad8b8b8d53960c05dd0eb4
|
a061885a00a2c99e6c830c9f98cc5594b6aed1da
|
/plot2.R
|
61af93cc84ff8e95bebeb1f7553f3c43ecc78806
|
[] |
no_license
|
chamathka25/ExData_Plotting1
|
f9b5a12090762dc8daf2cbc042b9919e1c88ee83
|
3d92f586073d5e3bd8ac89d79e7a9a06883a127d
|
refs/heads/master
| 2022-11-22T20:03:02.042958
| 2020-07-23T08:13:27
| 2020-07-23T08:13:27
| 281,833,247
| 0
| 0
| null | 2020-07-23T02:44:38
| 2020-07-23T02:44:37
| null |
UTF-8
|
R
| false
| false
| 581
|
r
|
plot2.R
|
mydata=read.table("./household_power_consumption.txt",sep=";",header = T)
data=subset(mydata,mydata$Date=="1/2/2007" | mydata$Date=="2/2/2007")
View(data)
names(data)
data$Date=as.Date(data$Date,format ="%d/%m/%Y")
data$Time=strptime(data$Time,format ="%H:%M:%S")
dim(data)
data[1:1440,"Time"]<-format(data[1:1440,"Time"],"2007-02-01 %H:%M:%S")
data[1440:2880,"Time"]<-format(data[1440:2880,"Time"],"2007-02-02 %H:%M:%S")
plot(data$Time,as.numeric(data$Global_active_power),type="l",xlab=" ",
ylab="Global Active power(kilowatts)")
dev.copy(png,file="plot2.png")
dev.off()
|
b2bd308bbf21bae1e7b39bc2fe433f5aafe926c3
|
570abc2b93f05cbce92d95f6b9bffbe48708bb6c
|
/R_source/p_j.R
|
4521d2219bfbb982a64389a44e3c48fd5fbeef98
|
[] |
no_license
|
Danhisco/artigo_mestrado
|
ed871edab0e89e28a5569668a670ea43d3f548e3
|
1ff761608ea7312c533ef216b51fd67fb8268b00
|
refs/heads/master
| 2023-02-03T00:15:42.720220
| 2023-01-30T02:13:25
| 2023-01-30T02:13:25
| 121,779,367
| 0
| 1
| null | 2018-11-01T11:05:29
| 2018-02-16T17:28:00
|
HTML
|
UTF-8
|
R
| false
| false
| 1,066
|
r
|
p_j.R
|
library(ggplot2)
library(gridExtra)
load("/home/danilo/Documents/dissertacao/dados/resultados_DaniloPMori.Rdata")
pdf(file="~/Desktop/p_J.pdf")
par(mfrow=c(1,2))
hist(df_resultados$N, # N
col="chartreuse4",
border="black",
prob = TRUE,
xlab = "indivíduos",
main = "J",
ylim=c(0, 7.748e-4 ),
breaks=32)
hist(log(df_resultados$N), # log(N)
col="chartreuse4",
border="black",
prob = TRUE,
xlab = "ln(indivíduos)",
main = "ln(J)",
breaks=26)
dev.off()
df_ae <- inner_join(df_ref[,c(1,5,9:10,12:15)], unique(df_resultados[,c(1,4,17,2)]), by = "SiteCode")
df_ae$cluster_medio %<>% as.character() %>% as.numeric()
df_ae %<>% arrange(cobertura, cluster_medio)
df_ae$cluster_medio %<>% factor
ggplot(df_ae, aes(x=forest_succession,y=KS)) + geom_boxplot() + geom_jitter(aes(colour=cluster_medio))
ggplot(df_ae, aes(x=cobertura,y=KS, group=forest_succession)) + geom_point(aes(colour=forest_succession)) + geom_smooth(method="lm",se=F,aes(colour=forest_succession) )
.plot(KS ~ forest_succession, df_ae)
|
df0a9b9d2bc278e233a5f792ca83b30d8587af89
|
29e74b9b3a5a3228d5de27090f5b0e6728d77ee7
|
/70207_Lab7.R
|
1657d43a686939999dcfd037802beabf41a1743c
|
[] |
no_license
|
typark99/TeachingLab_ProbabilityStatistics
|
c6e9f64122d809d51ace55b3618330ee8e9cef2d
|
9bd8d2397c73bfa73916a1304aaebdc310eb1334
|
refs/heads/master
| 2022-11-22T11:26:39.672726
| 2020-07-26T15:57:23
| 2020-07-26T15:57:23
| 282,683,309
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,205
|
r
|
70207_Lab7.R
|
#####################################
# File: Lab7.R
# Author: Taeyong Park
# Summary: Comparing Three or More Means
######################################
#####################
#
# Population Means
#
#####################
# Chemitech compares three methods used to produce filtration systems.
# To this end, Chemitech measures the number of filtration systems produced per week.
# H0: The three population means are the same.
# Import the data.
chemitech = read.csv("Chemitech.csv")
colnames(chemitech)
# See the data
View(chemitech)
# Because the three groups (variables) are separate,
# use the stack function to transform the data structure.
# The aov() function requires stacked data.
stacked.chemitech = stack(chemitech)
View(stacked.chemitech)
# Use the aov function to run an Anova model.
anova.chemitech = aov(values ~ ind, data=stacked.chemitech)
summary(anova.chemitech)
# In the output, 260 is the Between-groups estimate
# and 28.33 is the Within-groups estimate.
######################
# Exercise 1
######################
# The Consumer Reports Restaurant Customer Satisfaction Survey
# studies full-service restraurant chanis.
# One of the variables in the study is meal price, the average amount paid per
# person for dinner and drinks, minus the tip.
# The GrandStrand.csv data show the meal prices obtained from 24 restaurants
# in the Grand Strand section in a city of the US.
# Use .05 significance level to test if there is a significant
# difference among the mean meal price for the three types of restraurants.
# Answer the following questions:
# 1. What is the between-groups estimate of population variance?
# 2. What is the within-groups estimate of population variance?
# 3. What is the F statistic?
# 4. What is the p value?
# 5. What is you conclusion about the difference among the mean meal prices for the three types of restraurants?
######################
# Exercise 2
######################
# 1. Test for the mean difference between the price for Italian and that for Seafood
# 2. Test for the mean difference between the price for Italian and that for Steakhouse
# 3. Test for the mean difference between the price for Seafood and that for Steakhouse
|
77ee8da649283c2f9f08620c5a00d9b23f8387e2
|
094f81c31a3cfd560b24280e476d5af4fb52b9e3
|
/R/findviolation.R
|
cbd1f1ec5fdf5a95b82c97a23e6a58e304d9cfd2
|
[
"MIT"
] |
permissive
|
PJOssenbruggen/Basic
|
6c2343dcb135cb364d059160925ded5cb43b5455
|
1885fa40d3318cc554b4dd80154b263baef19ac4
|
refs/heads/master
| 2021-01-25T11:57:19.583401
| 2019-01-04T13:03:32
| 2019-01-04T13:03:32
| 123,449,454
| 0
| 0
| null | 2018-03-05T12:26:55
| 2018-03-01T14:56:48
|
R
|
UTF-8
|
R
| false
| false
| 1,512
|
r
|
findviolation.R
|
#' \code{findviolation} determines if a zone violates the safe headway rule.
#'
#' @return \code{findviolation} fills the cell of the \code{dfcrit} table.
#' @param tstart start time, a number
#' @param tend end time, a number
#' @param tend.0 end time for over the long time range, a number
#' @param df1 leading vehicle, a matrix
#' @param df2 following vehicle, a matrix
#' @param delt time-step, a number
#' @param leff vehicle length, a number
#' @usage findviolation(tstart, tend, tend.0, df1, df2, delt, leff)
# #' @examples
# #' findviolation(tstart, tend, tend.0, df1, df2, delt, leff)
#' @export
findviolation <- function(tstart, tend, tend.0, df1, df2, delt, leff) {
t <- seq(0,tend.0,delt)
df1 <- cbind(t, df1)
df2 <- cbind(t, df2)
tseq <- seq(tstart,tend,delt)
df1 <- df1[df1[,1] >= tstart & df1[,1] <= tend,]
df2 <- df2[df2[,1] >= tstart & df2[,1] <= tend,]
tlen <- length(tseq)
X <- safe <- rep(NA, tlen)
if(is.matrix(df1) == TRUE) {
hdwy <- df1[,3] - df2[,3]
for(i in 1:tlen) safe[i] <- hsafe(df2[i,2], leff)
for(i in 1:tlen) if(hdwy[i] >= safe[i]) X[i] <- 0 else X[i] <- 1
} else {
hdwy <- df1[3] - df2[3]
safe <- hsafe(df2[2], leff)
if(hdwy >= safe) X <- 0 else X <- 1
}
extent <- hdwy - safe
df <- data.frame(X, safe, hdwy, extent, t = tseq)
tcrit <- min(as.numeric(df[df[,4] == min(extent),5]))
tcrit <- rep(tcrit, length(tseq))
df <- cbind(df, tcrit)
return(df)
}
|
a28bdd6a007768be78439117762210edfc5ac4e4
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/rtypeform/tests/testthat/test_get_all_typeforms.R
|
784f3834216617d3a5b67777637ef1b20e61562e
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 130
|
r
|
test_get_all_typeforms.R
|
test_that("Testing get_all_typeforms", {
skip_on_cran()
typeforms = get_all_typeforms()
expect_equal(ncol(typeforms), 2)
}
)
|
32da3b6aa796ab350d3833b3ffe02e1e8dfff191
|
97bdb95da4059299469a8aea98c8ab10762993e0
|
/R/nvd-class.R
|
5ee7c7c36b638c8005dc55e1c56f7986ebec364c
|
[] |
no_license
|
cran/nevada
|
1924beaa3985652ae21777e067859d07df7b6c3b
|
b6724e527026e31fda1e5e56ec967effa184673f
|
refs/heads/master
| 2023-08-17T22:30:18.581173
| 2021-09-25T05:40:02
| 2021-09-25T05:40:02
| 410,309,689
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,044
|
r
|
nvd-class.R
|
#' Network-Valued Data Constructor
#'
#' This is the constructor for objects of class \code{nvd}.
#'
#' @param model A string specifying the model to be used for sampling networks
#' (current choices are: \code{"sbm"}, \code{"k_regular"}, \code{"gnp"},
#' \code{"smallworld"}, \code{"pa"}, \code{"poisson"} and \code{"binomial"}).
#' Default is \code{"smallworld"}.
#' @param n An integer specifying the sample size (default: \code{0L}).
#' @param pref.matrix A matrix giving the Bernoulli rates for the SBM generator
#' (see \code{\link[igraph]{sample_sbm}} for details). Default is \code{NULL}.
#' It is required for \code{model == "sbm"}.
#' @param lambda A numeric value specifying the mean value for the Poisson
#' generator. Default is \code{NULL}. It is required for \code{model ==
#' "poisson"}.
#' @param size An integer value specifying the number of trials for the binomial
#' distribution. Default is \code{NULL}. It is required for \code{model ==
#' "binomial"}.
#' @param prob A numeric value specifying the probability of success of each
#' trial for the binomial distribution. Default is \code{NULL}. It is required
#' for \code{model == "binomial"}.
#'
#' @return A \code{nvd} object which is a list of \code{\link[igraph]{igraph}}
#' objects.
#' @export
#'
#' @examples
#' nvd(n = 10L)
nvd <- function(model = "smallworld",
n = 0L,
pref.matrix = NULL,
lambda = NULL,
size = NULL,
prob = NULL) {
model <- match.arg(
model,
c("sbm", "k_regular", "gnp", "smallworld", "pa", "poisson", "binomial")
)
if (model == "sbm" & is.null(pref.matrix))
stop("The pref.matrix argument should be specified to use the SBM generator.")
if (model == "poisson" & is.null(lambda))
stop("The lambda argument should be specified to use the Poisson generator.")
if (model == "binomial" & (is.null(size) | is.null(prob)))
stop("The size and prob arguments should be specified to use the Binomial generator.")
obj <- replicate(n, switch(
model,
"sbm" = igraph::sample_sbm(n = 25L, pref.matrix = pref.matrix, block.sizes = c(12L, 1L, 12L)),
"k_regular" = igraph::sample_k_regular(no.of.nodes = 25L, k = 8L),
"gnp" = igraph::sample_gnp(n = 25L, p = 1/3),
"smallworld" = igraph::sample_smallworld(dim = 1L, size = 25L, nei = 4L, p = 0.15),
"pa" = igraph::sample_pa(n = 25L, power = 2L, m = 4L, directed = FALSE),
"poisson" = rpois_network(lambda = lambda, n = 25L),
"binomial" = rbinom_network(size = size, prob = prob, n = 25L)
), simplify = FALSE)
as_nvd(obj)
}
#' Coercion to Network-Valued Data Object
#'
#' This function flags a list of \code{\link[igraph]{igraph}} objects as an
#' \code{\link{nvd}} object as defined in this package.
#'
#' @param obj A list of \code{\link[igraph]{igraph}} objects.
#'
#' @return An \code{\link{nvd}} object.
#' @export
#'
#' @examples
#' as_nvd(nvd("smallworld", 10))
as_nvd <- function(obj) {
if (!is.list(obj))
cli::cli_abort("Input should be a list.")
# check that entries are igraph objects
input_ok <- TRUE
for (l in obj) {
if (!igraph::is_igraph(l)) {
input_ok <- FALSE
break()
}
}
if (!input_ok)
cli::cli_abort("List elements should be igraph objects.")
class(obj) <- c("nvd", "list")
obj
}
is_nvd <- function(obj) {
"nvd" %in% class(obj)
}
#' Two-Sample Stochastic Block Model Generator
#'
#' This function generates two samples of networks according to the stochastic
#' block model (SBM). This is essentially a wrapper around
#' \code{\link[igraph]{sample_sbm}} which allows to sample a single network from
#' the SBM.
#'
#' @param n Integer scalar giving the sample size.
#' @param nv Integer scalar giving the number of vertices of the generated
#' networks, common to all networks in both samples.
#' @param p1 The matrix giving the Bernoulli rates for the 1st sample. This is a
#' KxK matrix, where K is the number of groups. The probability of creating an
#' edge between vertices from groups i and j is given by element (i,j). For
#' undirected graphs, this matrix must be symmetric.
#' @param b1 Numeric vector giving the number of vertices in each group for the
#' first sample. The sum of the vector must match the number of vertices.
#' @param p2 The matrix giving the Bernoulli rates for the 2nd sample (default:
#' same as 1st sample). This is a KxK matrix, where K is the number of groups.
#' The probability of creating an edge between vertices from groups i and j is
#' given by element (i,j). For undirected graphs, this matrix must be
#' symmetric.
#' @param b2 Numeric vector giving the number of vertices in each group for the
#' second sample (default: same as 1st sample). The sum of the vector must
#' match the number of vertices.
#' @param seed The seed for the random number generator (default: \code{NULL}).
#'
#' @return A length-2 list containing the two samples stored as
#' \code{\link{nvd}} objects.
#' @export
#'
#' @examples
#' n <- 10
#' p1 <- matrix(
#' data = c(0.1, 0.4, 0.1, 0.4,
#' 0.4, 0.4, 0.1, 0.4,
#' 0.1, 0.1, 0.4, 0.4,
#' 0.4, 0.4, 0.4, 0.4),
#' nrow = 4,
#' ncol = 4,
#' byrow = TRUE
#' )
#' p2 <- matrix(
#' data = c(0.1, 0.4, 0.4, 0.4,
#' 0.4, 0.4, 0.4, 0.4,
#' 0.4, 0.4, 0.1, 0.1,
#' 0.4, 0.4, 0.1, 0.4),
#' nrow = 4,
#' ncol = 4,
#' byrow = TRUE
#' )
#' sim <- sample2_sbm(n, 68, p1, c(17, 17, 17, 17), p2, seed = 1234)
sample2_sbm <- function(n, nv, p1, b1, p2 = p1, b2 = b1, seed = NULL) {
withr::local_seed(seed)
sim <- n %>%
purrr::rerun(
x = igraph::sample_sbm(nv, p1, b1),
y = igraph::sample_sbm(nv, p2, b2)
) %>%
purrr::transpose() %>%
purrr::map(as_nvd)
}
#' Fréchet Mean of Network-Valued Data
#'
#' This function computes the sample Fréchet mean from an observed sample of
#' network-valued random variables according to a specified matrix
#' representation. It currently only supports the Euclidean geometry i.e. the
#' sample Fréchet mean is obtained as the argmin of the sum of squared Frobenius
#' distances.
#'
#' @param x An \code{\link{nvd}} object.
#' @param weights A numeric vector specifying weights for each observation
#' (default: equally weighted).
#' @param representation A string specifying the graph representation to be
#' used. Choices are adjacency, laplacian, modularity, graphon. Default is
#' adjacency.
#' @param ... Other argument to be parsed to the \code{\link[base]{mean}}
#' function.
#'
#' @return The mean network in the chosen matrix representation assuming
#' Euclidean geometry for now.
#' @export
#'
#' @examples
#' d <- nvd(n = 10L)
#' mean(d)
mean.nvd <- function(x, weights = rep(1, length(x)), representation = "adjacency", ...) {
x <- repr_nvd(x, representation = representation)
if (is.null(weights)) weights <- rep(1, length(x))
x <- mean_nvd_impl(x, weights)
switch(
representation,
adjacency = as_adjacency(x),
laplacian = as_laplacian(x),
modularity = as_modularity(x),
graphon = as_graphon(x)
)
}
#' Fréchet Variance of Network-Valued Data Around a Given Network
#'
#' This function computes the Fréchet variance around a specified network from
#' an observed sample of network-valued random variables according to a
#' specified distance. In most cases, the user is willing to compute the sample
#' variance, in which case the Fréchet variance has to be evaluated w.r.t. the
#' sample Fréchet mean. In this case, it is important that the user indicates
#' the same distance as the one (s)he used to separately compute the sample
#' Fréchet mean. This function can also be used as is as the function to be
#' minimized in order to find the Fréchet mean for a given distance.
#'
#' @param x An \code{\link{nvd}} object listing a sample of networks.
#' @param x0 A network already in matrix representation around which to
#' calculate variance (usually the Fréchet mean but not necessarily). Note
#' that the chosen matrix representation is extracted from this parameter.
#' @param weights A numeric vector specifying weights for each observation
#' (default: equally weighted).
#' @param distance A string specifying the distance to be used. Possible choices
#' are: hamming, frobenius, spectral or root-euclidean. Default is frobenius.
#' When the Fréchet mean is used as \code{x0} parameter, the distance should
#' match the one used to compute the mean. This is not currently checked.
#'
#' @return A positive scalar value evaluating the amount of variability of the
#' sample around the specified network.
#' @export
#'
#' @examples
#' d <- nvd(n = 10L)
#' m <- mean(d)
#' var_nvd(x = d, x0 = m, distance = "frobenius")
var_nvd <- function(x, x0, weights = rep(1, length(x)), distance = "frobenius") {
if (!is_nvd(x))
stop("The input x should be of class nvd.")
if (!is.matrix(x0))
stop("The input x0 should be of class matrix.")
representation <- attributes(x0)$representation
if (representation == "")
stop("The input x0 matrix should have an attribute named representation.")
x <- purrr::map(x, format_input, representation = representation)
ssd <- switch(
distance,
hamming = purrr::reduce2(x, weights, function(.v, .x, .y, .x0) {
d <- dist_hamming_impl(.x, .x0)
.v + .y * d^2
}, .x0 = x0, .init = 0),
frobenius = purrr::reduce2(x, weights, function(.v, .x, .y, .x0) {
d <- dist_frobenius_impl(.x, .x0)
.v + .y * d^2
}, .x0 = x0, .init = 0),
spectral = purrr::reduce2(x, weights, function(.v, .x, .y, .x0) {
d <- dist_spectral_impl(.x, .x0)
.v + .y * d^2
}, .x0 = x0, .init = 0),
"root-euclidean" = purrr::reduce2(x, weights, function(.v, .x, .y, .x0) {
d <- dist_root_euclidean_impl(.x, .x0)
.v + .y * d^2
}, .x0 = x0, .init = 0)
)
ssd / sum(weights)
}
#' Fréchet Variance of Network-Valued Data from Inter-Point Distances
#'
#' This function computes the Fréchet variance using exclusively inter-point
#' distances. As such, it can accommodate any pair of representation and
#' distance.
#'
#' @param x An \code{\link{nvd}} object listing a sample of networks.
#' @param representation A string specifying the graph representation to be
#' used. Choices are adjacency, laplacian, modularity, graphon. Default is
#' adjacency.
#' @param distance A string specifying the distance to be used. Possible choices
#' are: hamming, frobenius, spectral or root-euclidean. Default is frobenius.
#'
#' @return A positive scalar value evaluating the variance based on inter-point
#' distances.
#' @export
#'
#' @examples
#' d <- nvd(n = 10L)
#' var2_nvd(x = d, representation = "graphon", distance = "frobenius")
var2_nvd <- function(x, representation = "adjacency", distance = "frobenius") {
if (!is_nvd(x))
stop("The input x should be of class nvd.")
x <- repr_nvd(x, representation = representation)
var_nvd_impl(x, distance)
}
|
1597b529a0ed888f1751fdc7d175ef116cc7090a
|
6464efbccd76256c3fb97fa4e50efb5d480b7c8c
|
/paws/man/configservice_put_organization_config_rule.Rd
|
b0e8395ec7d68f15b5dee9b20dd6b02c5de7a2d4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
johnnytommy/paws
|
019b410ad8d4218199eb7349eb1844864bd45119
|
a371a5f2207b534cf60735e693c809bd33ce3ccf
|
refs/heads/master
| 2020-09-14T23:09:23.848860
| 2020-04-06T21:49:17
| 2020-04-06T21:49:17
| 223,286,996
| 1
| 0
|
NOASSERTION
| 2019-11-22T00:29:10
| 2019-11-21T23:56:19
| null |
UTF-8
|
R
| false
| true
| 3,426
|
rd
|
configservice_put_organization_config_rule.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/configservice_operations.R
\name{configservice_put_organization_config_rule}
\alias{configservice_put_organization_config_rule}
\title{Adds or updates organization config rule for your entire organization
evaluating whether your AWS resources comply with your desired
configurations}
\usage{
configservice_put_organization_config_rule(OrganizationConfigRuleName,
OrganizationManagedRuleMetadata, OrganizationCustomRuleMetadata,
ExcludedAccounts)
}
\arguments{
\item{OrganizationConfigRuleName}{[required] The name that you assign to an organization config rule.}
\item{OrganizationManagedRuleMetadata}{An \code{OrganizationManagedRuleMetadata} object.}
\item{OrganizationCustomRuleMetadata}{An \code{OrganizationCustomRuleMetadata} object.}
\item{ExcludedAccounts}{A comma-separated list of accounts that you want to exclude from an
organization config rule.}
}
\description{
Adds or updates organization config rule for your entire organization
evaluating whether your AWS resources comply with your desired
configurations. Only a master account can create or update an
organization config rule.
}
\details{
This API enables organization service access through the
\code{EnableAWSServiceAccess} action and creates a service linked role
\code{AWSServiceRoleForConfigMultiAccountSetup} in the master account of your
organization. The service linked role is created only when the role does
not exist in the master account. AWS Config verifies the existence of
role with \code{GetRole} action.
You can use this action to create both custom AWS Config rules and AWS
managed Config rules. If you are adding a new custom AWS Config rule,
you must first create AWS Lambda function in the master account that the
rule invokes to evaluate your resources. When you use the
\code{PutOrganizationConfigRule} action to add the rule to AWS Config, you
must specify the Amazon Resource Name (ARN) that AWS Lambda assigns to
the function. If you are adding an AWS managed Config rule, specify the
rule\'s identifier for the \code{RuleIdentifier} key.
The maximum number of organization config rules that AWS Config supports
is 150.
Specify either \code{OrganizationCustomRuleMetadata} or
\code{OrganizationManagedRuleMetadata}.
}
\section{Request syntax}{
\preformatted{svc$put_organization_config_rule(
OrganizationConfigRuleName = "string",
OrganizationManagedRuleMetadata = list(
Description = "string",
RuleIdentifier = "string",
InputParameters = "string",
MaximumExecutionFrequency = "One_Hour"|"Three_Hours"|"Six_Hours"|"Twelve_Hours"|"TwentyFour_Hours",
ResourceTypesScope = list(
"string"
),
ResourceIdScope = "string",
TagKeyScope = "string",
TagValueScope = "string"
),
OrganizationCustomRuleMetadata = list(
Description = "string",
LambdaFunctionArn = "string",
OrganizationConfigRuleTriggerTypes = list(
"ConfigurationItemChangeNotification"|"OversizedConfigurationItemChangeNotification"|"ScheduledNotification"
),
InputParameters = "string",
MaximumExecutionFrequency = "One_Hour"|"Three_Hours"|"Six_Hours"|"Twelve_Hours"|"TwentyFour_Hours",
ResourceTypesScope = list(
"string"
),
ResourceIdScope = "string",
TagKeyScope = "string",
TagValueScope = "string"
),
ExcludedAccounts = list(
"string"
)
)
}
}
\keyword{internal}
|
e94f56b1c1f22fbae5e0d0b33ccade2816492eb6
|
6d83839f46b1a625e71507442030f7d281a4218c
|
/inst/developer/function_ideas/thoughts on a quick string syntax.R
|
f4a3486b63fdf6d02b2e99af4e55f03e1444855c
|
[] |
no_license
|
guhjy/umx
|
06be5268bdeca6d2edbe7c72a1d720dd1f02445f
|
4e1f524d909696625c82dc2f4045c8d964c5f4d5
|
refs/heads/master
| 2020-04-08T14:23:15.782049
| 2018-11-27T21:28:00
| 2018-11-27T21:28:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 862
|
r
|
thoughts on a quick string syntax.R
|
HS.model <- ' visual =~ x1 + x2 + x3
textual =~ x4 + x5 + x6
speed =~ x7 + x8 + x9 '
fit <- cfa(HS.model,
data = HolzingerSwineford1939,
group = "school")
summary(fit)
To convert lavaan to OpenMx
1. replace "=~" with "->"
2. add the black-box elements "visual <-> visual"
HS.model <- " visual -> x1@1 + x2 + x3
textual -> x4@1 + x5 + x6
speed -> x7@1 + x8 + x9
# Added silently by lavaan (also the @1 above)
visual <-> visual
textual<-> textual
speed <-> speed
x1 <-> x1
x2 <-> x2
x3 <-> x3
x4 <-> x4
x5 <-> x5
x6 <-> x6
x7 <-> x7
x8 <-> x8
x9 <-> x9
"
fit <- cfa(HS.model,
data = HolzingerSwineford1939,
group = "school")
summary(fit)
|
2a088a303054c5325033fc587f0585bb3c1ee995
|
88931c8cf916f9e8bacd99c65c1442e21e34e903
|
/scripts/six_mnase_heatmaps.R
|
c09a0b5c0905bbf4c2638729c2518ba3e9f43147
|
[] |
no_license
|
james-chuang/dissertation
|
cdb91652f9842da5ae75d72f2600c11dceb78721
|
b44d9a88cd934c1862b415b5a3961afc8ce78ec6
|
refs/heads/master
| 2020-06-14T07:03:10.846181
| 2019-07-29T21:18:32
| 2019-07-29T21:18:32
| 194,939,682
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,804
|
r
|
six_mnase_heatmaps.R
|
main = function(theme_spec,
netseq_data, mnase_data, quant_data, annotation_path,
fig_width, fig_height, assay,
pdf_out){
source(theme_spec)
library(cowplot)
sample_ids = c("WT-37C-1", "spt6-1004-37C-1", "spt6-1004-37C-2")
max_length = 1
mnase_cutoff = 0.95
netseq_cutoff = 0.93
netseq_df = read_tsv(netseq_data,
col_names=c('group', 'sample', 'annotation', 'index', 'position', 'signal')) %>%
filter(group=="WT-37C" & between(position,
ifelse(assay=="NET-seq", -0.1, -0.3),
ifelse(assay=="NET-seq", 0.5, 0.1))) %>%
group_by(group, index, position) %>%
summarise(signal = mean(signal)) %>%
ungroup() %>%
mutate(group = "phantom(log[2](p/T)) ~ WT ~ phantom(log[2](p/T))")
mnase_df = read_tsv(mnase_data,
col_names=c('group', 'sample', 'annotation', 'index', 'position', 'signal')) %>%
filter(position <= max_length & sample %in% sample_ids) %>%
group_by(group, index, position) %>%
summarise(signal = mean(signal)) %>%
ungroup() %>%
mutate(group = ordered(group,
levels = c("WT-37C", "spt6-1004-37C"),
# labels = c("\"WT\"", "italic(\"spt6-1004\")")))
labels = c("phantom(g[2](p/T)) ~ WT ~ phantom(g[2](p/T))",
"phantom(g[2](p/T)) ~ italic(\"spt6-1004\") ~ phantom(g[2](p/T))")))
netseq_plot = ggplot(data = netseq_df %>%
complete(group, index, position, fill=list(signal=0)),
aes(x=position, y=index, fill=signal)) +
geom_raster() +
scale_x_continuous(breaks = c(0, 0.5),
labels = function(x){case_when(x==0 ~ "TSS",
x==0.5 ~ paste(x, "kb"),
TRUE ~ as.character(x))},
expand = c(0.025, 0)) +
scale_y_reverse(breaks = function(x){seq(min(x)+500, max(x)-500, 500)},
name = paste(n_distinct(netseq_df[["index"]]), "nonoverlapping coding genes"),
expand = c(0, 30)) +
scale_fill_viridis(option="inferno",
limits = c(NA, quantile(netseq_df[["signal"]], probs=netseq_cutoff)),
oob=scales::squish,
breaks = scales::pretty_breaks(n=2),
name = assay,
guide=guide_colorbar(title.position="top",
title.hjust=1,
barwidth=unit(1.3, "cm"),
barheight=0.3)) +
facet_grid(.~group, labeller=label_parsed) +
theme_heatmap +
theme(strip.text.x = element_text(size=10,
margin=margin(6,0,0,0,"pt"),
angle=0,
color="black",
face="plain",
family="FreeSans",
vjust=0.5,
hjust=0.5),
panel.grid.major.x = element_line(color="black"),
panel.grid.major.y = element_line(color="black"),
legend.box.margin = margin(0, 0, -5, 0, "pt"),
plot.margin = margin(0,6,0,0,"pt" ))
mnase_plot = ggplot(data = mnase_df %>%
complete(group, index, position, fill=list(signal=0)),
aes(x=position, y=index, fill=signal)) +
geom_raster() +
scale_x_continuous(breaks = scales::pretty_breaks(n=3),
labels = function(x){case_when(x==0 ~ "+1 dyad",
x==max_length ~ paste(x, "kb"),
TRUE ~ as.character(x))},
expand = c(0, 0.025)) +
scale_y_reverse(breaks = function(x){seq(min(x)+500, max(x)-500, 500)},
expand = c(0, 30), name=NULL) +
scale_fill_viridis(option="inferno",
limits = c(NA, quantile(mnase_df[["signal"]], probs=mnase_cutoff)),
oob=scales::squish,
breaks = scales::pretty_breaks(n=3),
name = "MNase-seq dyad signal",
guide=guide_colorbar(title.position="top",
barwidth=8, barheight=0.3, title.hjust=0.5)) +
facet_grid(.~group, labeller=label_parsed) +
theme_heatmap +
theme(strip.text.x = element_text(size=10,
margin=margin(6,0,0,0,"pt"),
angle=0,
color="black",
face="plain",
family="FreeSans",
vjust=0.5,
hjust=0.5),
panel.grid.major.x = element_line(color="black"),
panel.grid.minor.x = element_line(color="black"),
panel.grid.major.y = element_line(color="black"),
legend.box.margin = margin(0, 0, -5, 0, "pt"),
plot.margin = margin(0,6,0,0,"pt" ))
quant_df = read_tsv(quant_data,
col_types = "ciicdcciiiiiiidddddddddddddddic") %>%
left_join(read_tsv(annotation_path,
col_names = c('chrom', 'start', 'end', 'feat_name', 'score', 'feat_strand')) %>%
select(-score) %>%
mutate(annotation="nonoverlapping coding genes"),
by=c("feat_chrom"="chrom", "feat_name", "feat_strand", "annotation")) %>%
mutate(feat_start=start, feat_end=end) %>%
select(-c(start, end, nuc_chrom, overlap)) %>%
mutate_at(vars(nuc_start, nuc_end, nuc_center, ctrl_summit_loc, cond_summit_loc, diff_summit_loc),
~if_else(feat_strand=="+", .-feat_start, feat_end-.)) %>%
group_by(annotation) %>%
mutate(anno_labeled = paste(n_distinct(feat_name), annotation)) %>%
ungroup() %>% mutate(annotation=anno_labeled) %>% select(-anno_labeled) %>%
mutate(cond_ctrl_dist = cond_summit_loc-ctrl_summit_loc,
annotation = fct_inorder(annotation, ordered=TRUE),
index = as.integer(fct_inorder(feat_name, ordered=TRUE))) %>%
mutate(direction=factor(as.integer(sign(cond_ctrl_dist)),
levels=c(-1, 0, 1),
labels=c("-", "no change", "+")))
fuzz_plot = ggplot(data = quant_df %>%
filter(nuc_center-50>=-400 &
nuc_center+50<=1000 &
nuc_center <= feat_end-feat_start) %>%
mutate(label = "log[2](italic(\"spt6-1004\")/WT)"),
aes(x=nuc_center, y=index, width=100, fill=fuzziness_lfc)) +
annotate(geom="rect", xmin=-400, xmax=1007, ymin=0, ymax=max(quant_df[["index"]]),
fill="white", linetype="blank") +
geom_tile(linetype="blank") +
scale_x_continuous(breaks = scales::pretty_breaks(n=3),
labels = function(x){case_when(x==0 ~ "+1 dyad",
x==max_length*1e3 ~ paste(x/1e3, "kb"),
TRUE ~ as.character(x/1000))},
expand = c(0, 25)) +
scale_y_reverse(breaks = function(x){seq(min(x)+500, max(x)-500, 500)},
expand = c(0, 30), name=NULL) +
# scale_fill_gradientn(colors = coolwarm(100), limits = c(-0.7, 0.7),
scale_fill_gradientn(colors = coolwarm, limits = c(-0.7, 0.7),
oob=scales::squish,
breaks = scales::pretty_breaks(n=2),
name = "fuzziness",
guide=guide_colorbar(title.position="top",
barwidth=8, barheight=0.3, title.hjust=0.5)) +
facet_grid(.~label, labeller = label_parsed) +
theme_heatmap +
theme(strip.text.x = element_text(size=10,
margin=margin(6,0,0,0,"pt"),
angle=0,
color="black",
face="plain",
family="FreeSans",
vjust=0.5,
hjust=0.5),
legend.box.margin = margin(0, 0, -5, 0, "pt"),
panel.grid.major.x = element_line(color="grey50"),
panel.grid.minor.x = element_line(color="grey50"),
panel.grid.major.y = element_line(color="grey70"),
plot.margin = margin(0,6,0,0,"pt" ))
occ_plot = ggplot(data = quant_df %>%
filter(nuc_center-50>=-400 &
nuc_center+50<=1000 &
nuc_center <= feat_end-feat_start) %>%
mutate(label = "log[2](italic(\"spt6-1004\")/WT)"),
aes(x=nuc_center, y=index, width=100, fill=summit_lfc)) +
annotate(geom="rect", xmin=-400, xmax=1007, ymin=0, ymax=max(quant_df[["index"]]),
fill="white", linetype="blank") +
geom_tile(linetype="blank") +
scale_x_continuous(breaks = scales::pretty_breaks(n=3),
labels = function(x){case_when(x==0 ~ "+1 dyad",
x==max_length*1e3 ~ paste(x/1e3, "kb"),
TRUE ~ as.character(x/1000))},
expand = c(0, 25)) +
scale_y_reverse(breaks = function(x){seq(min(x)+500, max(x)-500, 500)},
expand = c(0, 30), name=NULL) +
# scale_fill_gradientn(colors = coolwarm(100), limits = c(-2, 2),
scale_fill_gradientn(colors = coolwarm, limits = c(-2, 2),
oob=scales::squish,
breaks = scales::pretty_breaks(n=3),
name = "occupancy",
guide=guide_colorbar(title.position="top",
barwidth=8, barheight=0.3, title.hjust=0.5)) +
facet_grid(.~label, labeller = label_parsed) +
theme_heatmap +
theme(strip.text.x = element_text(size=10,
margin=margin(6,0,0,0,"pt"),
angle=0,
color="black",
face="plain",
family="FreeSans",
vjust=0.5,
hjust=0.5),
legend.box.margin = margin(0, 0, -5, 0, "pt"),
panel.grid.major.x = element_line(color="grey50"),
panel.grid.minor.x = element_line(color="grey50"),
panel.grid.major.y = element_line(color="grey70"),
plot.margin = margin(0,6,0,0,"pt" ))
fig_four_b = plot_grid(netseq_plot, mnase_plot, occ_plot, fuzz_plot, align="h", axis="tb", nrow=1,
rel_widths = c(0.2, 1, 0.5, 0.5))
ggplot2::ggsave(pdf_out,
plot=fig_four_b,
width=fig_width,
height=fig_height,
units="in",
device=cairo_pdf)
}
main(theme_spec = snakemake@input[["theme"]],
netseq_data = snakemake@input[["netseq_data"]],
mnase_data = snakemake@input[["mnase_data"]],
quant_data = snakemake@input[["quant_data"]],
annotation_path = snakemake@input[["annotation"]],
fig_width = snakemake@params[["width"]],
fig_height = snakemake@params[["height"]],
assay = snakemake@params[["assay"]],
pdf_out = snakemake@output[["pdf"]])
|
4952009335f5b3cfaf1acc63a0bd9add7c3c5b36
|
dc3114f71d124f090616e1475d7a2f0f3cc58e67
|
/study1_analysis.R
|
9b60a53d975694802edb7416090100d9c80f3b78
|
[] |
no_license
|
stevenfelix/R_Code_Samples
|
c2e30461c9e8bb51502b190c12b389da3d7e2634
|
e61ef57ebd359a995f6a8cfd0adef042bc5fc6d5
|
refs/heads/master
| 2021-01-16T19:36:48.316588
| 2017-08-13T14:17:50
| 2017-08-13T14:17:50
| 100,180,809
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,153
|
r
|
study1_analysis.R
|
# Sample analyses from an MTurk survey study
# Steven Felix
#
# Description: These are excerpts from my script used to produce analyses for a manuscript.
#
rm(list = ls())
search()
# packages ----------------------------------------------------------------
library(effects)
library(psych)
library(car)
library(dplyr)
library(magrittr)
library(corrplot)
library(arm)
# Opening data ------------------------------------------------------------
setwd("~/Dropbox/Research/Dissertation Project/Study 1 - PC Correlates/Data/Rdata")
load("finalSurveyData.Rdata")
data <- final
rm(final)
# Functions ---------------------------------------------------------------
cor.table <- function(x){
tables <- corr.test(x)
rtable <- tables$r
#rtable[!tables$p <= .05] <- NA
nostar <- rtable[tables$p > .05]
onestar <- rtable[tables$p <= .05 & tables$p > .01]
twostar <- rtable[tables$p <= .01 & tables$p > .001]
threestar <- rtable[tables$p <= .001]
rtable[tables$p <= .05 & tables$p > .01] <- paste(format(onestar, digits = 2),"*",sep="")
rtable[tables$p <= .01 & tables$p > .001] <- paste(format(twostar, digits = 2),"**",sep="")
rtable[tables$p <= .001] <- paste(format(threestar, digits = 2),"***",sep="")
rtable[tables$p > .05] <- format(nostar, digits = 2)
diag(rtable) <- NA
ptable <- tables$p
ptable <- format(ptable, digits = 3)
diag(ptable) <- NA
ntable <- tables$n
row.names(ptable) <- paste(row.names(ptable),"p",sep="_")
if(is.matrix(ntable)){
row.names(ntable) <- paste(row.names(ntable),"n",sep="_")
}
rptables <-rbind(rtable,NA,ptable,ntable)
return(rptables)
}
# Sample Descriptives ------------------------------------------------------------
datanames <- names(data)[order(names(data))]
# descriptives (mean, median, range...) for numeric variables
library(psych)
tab = c()
for(name in names(data)){
x <- is.numeric(data[,name])
tab = c(tab, x)
}
table1 <- describe(data[,tab])
table1 <- as.data.frame(table1)
table1
library(xlsx) # output to excel
write.xlsx(table1,
file = "/Users/samf526/Dropbox/Research/Dissertation Project/Manuscripts/Whats in a PC rating/table1.xlsx", showNA = FALSE)
detach(package:xlsx)
# who did people choose for most important category?
table(data$ImpPerson) # absolute frequencies
as.data.frame(format(table(data$ImpPerson) / sum(table(data$ImpPerson)), digits = 2)) # relative freq.
# Sample Correlations --------------------------------------------------------------
# vector of PC variable names
PC.var.names1 <- c("PC_Partner_1","PC_Mother_1","PC_Father_1","PC_Friend_1","PC_Other_1")
# Correlation table for all data
tab <- corr.test(data[,c(PC.var.names1)]) # holm adjust
tab
# Sample Correlation Plots ------------------------------------------------------------------
## Plots: PC and individual variables
rMatrix <- psych:::corr.test(y = data[,c("PC_Mother_1","PC_Father_1","PC_Partner_1","PC_Friend_1","PC_Other_1")],
x = data[,c("DAS.fs","SPS_total", "SPANE_neg", "SPANE_pos")],
adjust = "holm")
rMatrix2 <- rMatrix$r
row.names(rMatrix2) <- c("Dysfunctional Attitudes", "Perceived Support","Negative Moods", "Positive Moods")
colnames(rMatrix2) <- c("Mother","Father","Partner", "Friend", "Other")
col3 <- colorRampPalette(c("black","grey","white","grey","black"))
x11()
cogTable <- corrplot:::corrplot(rMatrix2, method = "shade", col = col3(20),
addCoef.col = "black", tl.srt = 60, tl.offset = .8,
p.mat = rMatrix$p, tl.col = "black", insig = 'blank',
addshade = 'all',cl.pos = "n", title = "Figure 1")
# Sample Backwards Regression ----------------------------------------------------------
# biggest Partner PC correlations:
# -SPANE_neg (.34)
# -PNRQ-neg (.44)
# -AC-neg (.43)
# -CSI - (-.36)
# -PC_Average_nonpartner (.48)
# -PC Mother - (.37)
# -PC Other - (.41)
# -PC Friend - (.42)
library(MASS)
# data with no NAs
dataComp <- data[complete.cases(data[,c("PC_Partner_1", "PC_Average_1_nonpartner","PNRQ_neg", "ACS_neg","SPANE_neg", "CSI", "SPS_total")]),]
# backwards regression
mod <- lm(PC_Partner_1 ~ PC_Average_1_nonpartner + PNRQ_neg + ACS_neg + CSI +SPANE_neg + SPS_total,
data = dataComp)
step <- stepAIC(mod, direction = "both")
step$anova
# final model
mod <- lm(scale(PC_Partner_1) ~ scale(PC_Average_1_nonpartner) + scale(PNRQ_neg) + scale(ACS_neg), data = data)
summary(mod)
# check residuals
res <- rstandard(mod)
plot(res ~ mod$fitted.values, pch = 20, xlab = "Fitted Values", ylab = "Standardized Residuals")
mline <- lm(res ~ mod$fitted.values)
summary(mline)
abline(a = mline$coefficients[1], b = mline$coefficients[2]) # good!
summary(mod) # strongest predictors
# Standardized regression coefficients
library(lm.beta)
betas <- lm.beta(mod)
as.data.frame(betas$standardized.coefficients) # no standard errors
# re-run regression with standardized variables, for Standard Errors
Stmod <- lm(scale(PC_Partner_1) ~ scale(PC_Average_1_nonpartner) + scale(PNRQ_neg) + scale(ACS_neg), data = data)
summary(Stmod)
|
457c2141739d908e82ea83290e3a3473ad4d4294
|
7b7c570b30d6d7a0e9b904c7cb378cfb0d0f0e07
|
/mlflow/R/mlflow/R/model-crate.R
|
56689eb1cfc310929bff06159c9aa0fc28d41fce
|
[
"Apache-2.0"
] |
permissive
|
mlflow/mlflow
|
ca97bfbbf32f8e59f454e428f5e46eb3d34d062f
|
37298ffafcd34002352d01d579d4524790544267
|
refs/heads/master
| 2023-09-01T13:15:53.902815
| 2023-09-01T09:00:42
| 2023-09-01T09:00:42
| 136,202,695
| 14,102
| 3,748
|
Apache-2.0
| 2023-09-14T21:52:42
| 2018-06-05T16:05:58
|
Python
|
UTF-8
|
R
| false
| false
| 738
|
r
|
model-crate.R
|
#' @rdname mlflow_save_model
#' @export
mlflow_save_model.crate <- function(model, path, model_spec=list(), ...) {
if (dir.exists(path)) unlink(path, recursive = TRUE)
dir.create(path)
serialized <- serialize(model, NULL)
saveRDS(
serialized,
file.path(path, "crate.bin")
)
model_spec$flavors <- append(model_spec$flavors, list(
crate = list(
version = "0.1.0",
model = "crate.bin"
)
))
mlflow_write_model_spec(path, model_spec)
model_spec
}
#' @export
mlflow_load_flavor.mlflow_flavor_crate <- function(flavor, model_path) {
unserialize(readRDS(file.path(model_path, "crate.bin")))
}
#' @export
mlflow_predict.crate <- function(model, data, ...) {
do.call(model, list(data, ...))
}
|
79ce96ec02d44ea07d1947a96e5e5fad7a9d1c3f
|
71cc117affb4cf023ee3c807a0a23846b18b7469
|
/R/scf_scaffold.R
|
375aae8f95239b8d843786c9a7adc97f31dddb46
|
[
"MIT"
] |
permissive
|
petermeissner/scaffold
|
479d8bc164df9a29e6b038f3249b104253449a96
|
0e130d16400dbf75da3005fd688d5cf72e6dcd66
|
refs/heads/master
| 2020-06-09T02:56:43.628044
| 2020-02-19T21:19:37
| 2020-02-19T21:19:37
| 193,357,276
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 898
|
r
|
scf_scaffold.R
|
#' general scaffolding function
#'
#' @param scf path to scaffold from
#' @param path_to path to scaffold to
#' @param package the package to look for the scaffolding
#'
#' @export
#'
#' @examples \dontrun{
#'
#' scf_scaffold("shiny_material", path_to = ".")
#'
#' }
#'
scf_scaffold <-
function(scf, path_to = NULL, package = "scaffold", overwrite = FALSE){
# check path
if ( is.null(path_to) ){
path_to <- basename(scf)
}
pkg_path <- system.file(package = package)
scf_path <- fs::path(pkg_path, "scaffold", scf)
files <- list.files(scf_path, recursive = TRUE)
paths_from <- fs::path(scf_path, files)
paths_to <- fs::path(path_to, files)
fs::dir_create(fs::path_dir(paths_to), recurse = TRUE)
fs::file_copy(
path = paths_from,
new_path = paths_to,
overwrite = overwrite
)
}
|
1e19d98f6dfcef6bcc258a09da7aa9797daa73f4
|
819f05d261404015a3e95649a83975622b145c16
|
/4.11.R
|
1bd156d3d026b1280e6d92c3ffe3978c180b8c9d
|
[] |
no_license
|
fengzenggithub/R-Simulation-by-Ross
|
1036a4d6bbdbafaf09f0b3023e3f0bb05e7c0076
|
3d9fd8cfaffbb7ff23f9a377294d07703faaa474
|
refs/heads/master
| 2023-03-16T06:50:05.439459
| 2017-03-05T04:53:44
| 2017-03-05T04:53:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,546
|
r
|
4.11.R
|
# This problem is good because it shows that intuitively plausible solutions
# may not work!
n = 10
k = 4
r = 3
N = 100000
# The most efficient way would be to generate a random element from {1, ..., k},
# and then generate a subset of size r-1 from the rest of the elements...
# This does not work as in fact this would increase the probability of sets
# having more than 1 element among {1, ..., k}, see results
RandomSubsetWith1K_wrong <- function(r, n, k) {
first <- floor(runif(1) * k) + 1
sort(c( first, sample( (1:n)[-first], r - 1, replace = FALSE )))
}
result <- replicate(N, RandomSubsetWith1K_wrong(r, n, k))
df <- data.frame(t(result), count=1/N)
aggregate(count ~ ., df, sum)
# The probability that i from 1 to k is the smallest element of the subset
# is proportional to (n-i)!/(n-i-r+1)!, and for i=1 it is equal to
# p1 = 1 / [ n / r - C(n - k, r) / C(n - 1, r - 1) ],
# where, for i>=1,
# p(i+1) = (n-i-r+1) / (n-i) * pi
# Accordingly, we can generate the smallest element, and then a subset
# with r-1 elements greater than this element
# This should work (we do not check the condition r + k < n)
RandomSubsetWith1K <- function (r, n, k) {
j <- 1
pj <- 1 / ( n / r - choose(n - k, r) / choose(n - 1, r - 1) )
Fj <- pj
U <- runif(1)
while (U >= Fj) {
pj <- (n - j - r + 1) / (n - j) * pj
Fj <- Fj + pj
j <- j + 1
}
c( j, sort(sample((j+1):n, r - 1, replace = FALSE)))
}
result <- replicate(N, RandomSubsetWith1K(r, n, k))
df <- data.frame(t(result), count=1/N)
aggregate(count ~ ., df, sum)
|
a1a20206fbeb62e21a2b803ec0087de2c2b43b22
|
44e7ec2d56b0d973f35f0a372beba2460d1f93d6
|
/man/power_lm_app.Rd
|
6653f07f495a3d3a41f450a93de5c4812b690370
|
[
"CC-BY-4.0"
] |
permissive
|
biostats-r/biostats.tutorials
|
02471032f76acdbf34caef5839cb60bcebe0c6f6
|
6bafcf0f2a5f811c23ef557cdce577b6ee4f5a27
|
refs/heads/main
| 2023-08-29T16:41:01.340658
| 2021-10-12T13:21:00
| 2021-10-12T13:21:00
| 301,679,132
| 1
| 2
|
CC-BY-4.0
| 2023-09-05T15:39:49
| 2020-10-06T09:40:13
|
R
|
UTF-8
|
R
| false
| true
| 237
|
rd
|
power_lm_app.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/power_lm_app.R
\name{power_lm_app}
\alias{power_lm_app}
\title{Simulations of power in lm}
\usage{
power_lm_app()
}
\description{
Simulations of power in lm
}
|
684558e7a6df6cb45aa52655aa3d811fcf1458f6
|
c148b02c89314ebbf164a4daac5f9d406985148e
|
/man/JointRegBC.default.Rd
|
df4041de844678c129b2f7fd21ee0a6ce7ab5dfa
|
[] |
no_license
|
cran/JointRegBC
|
8b02625a240cdbc4c2a8909dc7dfc09cb8019456
|
997655e70a18e6e995218f9123c06d896aa1c34b
|
refs/heads/master
| 2016-09-06T01:35:49.434863
| 2013-06-13T00:00:00
| 2013-06-13T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,786
|
rd
|
JointRegBC.default.Rd
|
\name{JointRegBC.default}
\alias{JointRegBC.default}
\title{Joint Modelling of Mixed Correlated Binary and
Continuous Responses : A Latent Variable Approach.}
\description{
A joint regression model for mixed correlated binary and continuous responses is presented. In this model binary response can be dependent on the continuous response. With this model, the dependence between responses can be taken into account by the correlation between errors in the models for binary and continuous responses.}
\usage{
\method{JointRegBC}{default}(ini = NA, X, y, z, p, q, ...)}
\arguments{
\item{ini}{Initial values}
\item{X}{Design matrix}
\item{z}{Continuous responses}
\item{y}{Binary responses}
\item{p}{Order of dimension of Binary responses}
\item{q}{Order of dimension of continuous responses}
\item{\dots}{Other arguments}}
\details{Models for JointRegBC are specified symbolically. A typical model has the form response1 ~ terms and response2 ~ terms where response1and response2 are the (numeric) binary and
continuous responses vector and terms is a series of terms which specifies a linear predictor for responses. A terms specification of the form first + second indicates all the terms in first together with all the terms in second with duplicates removed. A specification of the form first:second indicates the set of terms obtained by taking the interactions of all terms in first with all terms in second. The specification first*second indicates the cross of first and second. This is the same as first + second + first:second.}
\value{
\item{Binary response}{Coefficient of ordinal response}
\item{Continuous Response}{Coefficient of continuous response}
\item{Variance of Countinuous Response}{Variance of continuous response}
\item{Correlation}{Coefficient of continuous response}
\item{Hessian}{Hessian matrix}
\item{convergence}{An integer code. 0 indicates successful convergence.}
\item{objective}{-loglikelihood.}
}
\references{
Bahrami Samani, E. and Tahmasebinejad. Zh.(2011). Joint Modelling of Mixed Correlated Nominal, Ordinal and
Continuous Responses. Journal of Statistical Research. 45(1):37-47.
}
\author{
Ehsan Bahrami Samani and Zhale Tahmasebinejad
}
\note{
Supportted by Shahid Beheshti University
}
\seealso{
\code{\link{nlminb}},\code{\link{fdHess},\link{clogit}}
}
\examples{
function (ini = NA, X, y, z, p, q, ...)
{
options(warn = -1)
f <- function(ini, X, y, z, p, q) {
X = cbind(1, X)
y <- as.vector(y)
z <- as.vector(z)
ini <- as.vector(ini)
X <- as.matrix(X)
n = nrow(X)
muz = muy = muygivenzx = q2 = q1 = l1 = l2 = l3 = muygivenzx = as.vector(0)
sez <- ini[p + q + 2]
seygivenzx <- (1 - (ini[p + q + 1])^2)
mz=matrix(0,n,p)
my=matrix(0,n,q)
for(i in 1:n){
for(j in 1:p){
mz[i,j]=ini[1:p][[j]]*X[i, ][[j]]
}}
for(i in 1:n){
for(k in 1:q){
my[i,k]=ini[(p + 1):(p + q)][[k]]*X[i, -1][[k]]
}}
for (i in 1:n) {
muz[i] <- sum(mz[i,])
muy[i] <- sum(my[i,])
muygivenzx[i] <- muy[i] + (ini[p + q + 1] * (z[i] -
muz[i]))/sez
q1[i] <- ( - muygivenzx[i])/sqrt(seygivenzx)
l1[i] <- log(pnorm(q1[i])) + log(dnorm(z[i], muz[i],
sez))
l2[i] <- log(1 - pnorm(q1[i])) + log(dnorm(z[i],
muz[i], sez))
}
data0 <- cbind(y, l1)
data1 <- cbind(y, l2)
data0[data0[, 1] == 1, 2] <- 0
data1[data1[, 1] == 0, 2] <- 0
t0 <- sum(data0[, 2])
t1 <- sum(data1[, 2])
t <- c(t0, t1)
Tfinal <- sum(t)
return(-Tfinal)
}
n = nlminb(ini, f, X = X, y = y, z = z, p = p, q = q, lower = c(rep(-Inf,
p+q), -0.999, 0), upper = c(rep(Inf,
p+q), 0.999, Inf), hessian = T)
h = fdHess(n$par, f, z = z, y = y, X, p, q)
h1 = h$Hessian
ih = ginv(h1)
se = sqrt(abs(diag(ih)))
n$Hessian <- h1
n$p <- p
n$q <- q
n$se <- as.vector(se)
n$call <- match.call()
class(n) <- "JointRegBC"
object = n
Co.Re <- data.frame(Parameter = object$par[1:p], S.E = object$se[1:p],
`Confidence Interval` = paste("(", round(object$par[1:p] -
2 * object$se[1:p], 3), ",", round(object$par[1:p] +
2 * object$se[1:p], 3), ")", sep = ""))
Binary.Re <- data.frame(Parameter = object$par[(p + 1):(p + q)],
S.E = object$se[(p + 1):(p + q)], `Confidence Interval` = paste("(",
round(object$par[(p + 1):(p + q)] - 2 * object$se[(p +
1):(p + q)], 3), ",", round(object$par[(p + 1):(p +
q)] + 2 * object$se[(p + 1):(p + q)], 3), ")",
sep = ""))
Cor <- data.frame(Parameter = object$par[p + q + 1], S.E = object$se[p +
q + 1], `Confidence Interval` = paste("(", round(object$par[p +
q + 1] - 2 * object$se[p + q + 1], 3), ",", round(object$par[p +
q + 1] + 2 * object$se[p + q + 1], 3), ")", sep = ""))
Var <- data.frame(Parameter = object$par[p + q + 2], S.E = object$se[p +
q + 2], `Confidence Interval` = paste("(", round(object$par[p +
q + 2] - 2 * object$se[p + q + 2], 3), ",", round(object$par[p +
q + 2] + 2 * object$se[p + q + 2], 3), ")", sep = ""))
res <- list(call = object$call, `Continuos Response` = Co.Re,
`Variance Of Countinous Response` = Var, `Binary Response` = Binary.Re,
Correlation = Cor)
res$Hessian <- h1
res$convergence <- n$convergence
res$objective<- n$objective
res$call <- match.call()
class(res) <- "JointRegBC"
res
}
}
\keyword{regression}
|
badcda8516c00ed3ddd3230fb4cc0c75e0c82ca5
|
20eb3d806fd691a1c7300d3c2695214bafa0674c
|
/data/download.R
|
8bd64459eadf09b667e913de658a99713e5df40a
|
[] |
no_license
|
chl781/-Global-Historical-Climatology-Time-Series-Analysis
|
5e93cb9efc5e9ecab0f70274924480b1cde40cf5
|
1175b67a39617d26b70a485f8ca91dffdc1e0c92
|
refs/heads/master
| 2020-09-28T06:30:35.839136
| 2019-12-11T05:17:37
| 2019-12-11T05:17:37
| 226,712,969
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 211
|
r
|
download.R
|
a=read.csv("best50.csv")
a$names<-as.character(a$names)
library(HelpersMG)
for(i in 1:50){
wget(paste0("https://www.ncei.noaa.gov/data/global-historical-climatology-network-daily/access/",a$names[i],".csv"))
}
|
4a16e408ff0018460866e0c9c8d1694884e6d9a6
|
a5d82208381cf0c1a03e9a07379a2c4a9f719995
|
/R/load.pathway.definition.R
|
0f4c6a73fb0b3bfb0def25299a4661e843c14ef9
|
[
"MIT"
] |
permissive
|
yfyang86/ARTP3
|
f5eefab5e6457918f40074daa992f8b5d4c1115f
|
51bef5f4ef30511eb1be477989d659489a1b25a2
|
refs/heads/master
| 2020-12-28T20:09:32.514738
| 2015-11-02T20:10:25
| 2015-11-02T20:10:25
| 45,478,605
| 0
| 0
| null | 2015-11-03T16:13:22
| 2015-11-03T16:13:22
| null |
UTF-8
|
R
| false
| false
| 1,804
|
r
|
load.pathway.definition.R
|
load.pathway.definition <- function(pathway, options){
msg <- paste("Loading definition of pathway:", date())
if(options$print) message(msg)
if(is.character(pathway)){
tmp <- try(pd <- read.table(pathway, header = TRUE, as.is = TRUE), silent = TRUE)
if(error.try(tmp)){
msg <- paste0("Cannot load ", pathway)
stop(msg)
}else{
if(nrow(pd) == 0){
msg <- paste0("File below is empty: \n", pathway)
stop(msg)
}
pathway <- pd
rm(pd)
gc()
}
}else{
if(is.matrix(pathway)){
pathway <- as.data.frame(pathway)
}
}
header <- c("SNP", "Gene", "Chr")
tmp <- (header %in% colnames(pathway))
if(!all(tmp)){
msg <- paste("Columns below were not found in pathway definition:\n", paste(header[!tmp], collapse = " "))
stop(msg)
}
pathway <- pathway[, header]
if(!is.null(options$selected.snps)){
pathway <- pathway[pathway$SNP %in% options$selected.snps, , drop = FALSE]
if(nrow(pathway) == 0){
msg <- "No SNP is left if only use SNPs specified in options$selected.snps"
stop(msg)
}
}
if(!is.null(options$excluded.genes)){
pathway <- pathway[!(pathway$Gene %in% options$excluded.genes), , drop = FALSE]
if(nrow(pathway) == 0){
msg <- "No SNP is left after removing genes specified by the users"
stop(msg)
}
}
pathway <- pathway[!duplicated(pathway), ]
pathway <- pathway[order(pathway$Chr, pathway$Gene, pathway$SNP), ]
tmp <- table(pathway$Gene, pathway$Chr)
id <- apply(tmp, 1, function(x){sum(x > 0) > 1})
if(any(id)){
dup.genes <- rownames(tmp)[id]
msg <- paste(c('The follow gene(s) are included in more than one chromosome:\n', dup.genes), collapse = ' ', sep = '')
stop(msg)
}
pathway
}
|
23116fad4586deac392a4c9000cb048f1c56b09b
|
f1798de3f2c8d3df5bbbb9b21c9fd3b603ae720c
|
/accessory_scripts/duplicate_participant_xref_w_dbs.r
|
7810fcb1e856c8339fb26e6c21b46bb15991d5da
|
[] |
no_license
|
genomicsengland/af_letter_distribution_dataset
|
5625d2c0188364882a45482aefd553dc6e959481
|
86c4497647262da216f9be1bcbb595b4a38ba985
|
refs/heads/master
| 2023-05-29T03:14:29.924984
| 2021-06-11T15:44:57
| 2021-06-11T15:44:57
| 371,753,029
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,491
|
r
|
duplicate_participant_xref_w_dbs.r
|
#-- check that duplicate participants are genuine duplicates
rm(list = objects())
options(stringsAsFactors = FALSE,
scipen = 200)
library(wrangleR)
library(tidyverse)
library(DBI)
p <- getprofile("indx_con")
metrics_con <- dbConnect(RPostgres::Postgres(),
dbname = "metrics",
host = p$host,
port = p$port,
user = p$user,
password = p$password)
dbs_con <- dbConnect(RPostgres::Postgres(),
dbname = "cohorts",
host = p$host,
port = p$port,
user = p$user,
password = p$password)
dups <- dbGetQuery(metrics_con, "
select participant_id as participant_id_x,
duplicated_participant_id as participant_id_y
from dict.vw_duplicate_participants
;")
dbs <- dbGetQuery(dbs_con, "
select local_pid as participant_id
,trace_result_new_nhs_number as nhs_number
,returned_first_forename as forename
,returned_surname as surname
,returned_date_of_birth as dob
,record_type in (20,30,33,40) as traced
from dbs.batch_trace_return
;")
table(duplicated(dbs$participant_id))
table(duplicated(dbs$nhs_number[dbs$nhs_number != ""]))
d <- merge(dups, dbs, by.x = "participant_id_x", by.y = "participant_id",
all.x = TRUE)
d <- merge(d, dbs, by.x = "participant_id_y", by.y = "participant_id",
all.x = TRUE, suffixes = c("_x", "_y"))
d$mismatch <- (d$nhs_number_x != d$nhs_number_y) & d$traced_x & d$traced_y
dtv(d[d$mismatch,])
|
7330bc5ef2e26632d8fd6c3a6f9c0555a26ecd0c
|
c87286e29bf62160dc5869913ecef6eceedb8ee9
|
/Mehr Objekt Erkennung/ui.R
|
03ad206da1b7e07b843f83fd97884628e65bd901
|
[] |
no_license
|
hjynick/Man-ver-Erkennung
|
8f7a8b090c71150de147df5de50bc1326ff9b191
|
2f44b85a55aa721a09ab93934a7ae0aa00c955f7
|
refs/heads/master
| 2020-04-04T14:55:35.956832
| 2018-11-04T17:11:37
| 2018-11-04T17:11:37
| 156,018,554
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,716
|
r
|
ui.R
|
library(shiny)
library(shinydashboard)
shinyUI(
##fluidPage(
##titlePanel("Manoever Erkennung"),
# titlePanel("Uploading Files"),
#
# # Sidebar layout with input and output definitions ----
# sidebarLayout(
#
# # Sidebar panel for inputs ----
# sidebarPanel(
#
# # Input: Select a file ----
# fileInput("file1", "Choose XLSX File",
# multiple = TRUE,
# accept = c("text/xlsx",
# "text/comma-separated-values,text/plain",
# ".xlsx")),
#
# # Horizontal line ----
# tags$hr(),
#
# # Input: Checkbox if file has header ----
# checkboxInput("header", "Header", TRUE)
# ),
# mainPanel(
# leafletOutput("map",height="600px"),
# tableOutput("contents")
# )
# ),
#
#
# br()
# pageWithSidebar(
# headerPanel("Manoever Erkennung"),
# sidebarPanel(
# fileInput('file1','Choose xlsx File',
# accept = c('text/xlsx','text/comma-separated-values,text/plain')),
# tags$hr()
#checkboxInput('header','Header',TRUE)
# radioButtons('sep','Separator',
# c(Comma=',',Semicolon=';',Tab='\t'),
# 'Comma'),
# radioButtons('quote', 'Quote',
# c(None='',
# 'Double Quote'='"',
# 'Single Quote'="'"),
# 'Double Quote')
##br(),
##leafletOutput("map", height="700px"),
# column(4,br(),br(),br(),br(),plotOutput("plot", height="300px")),
##br(),
##sidebarPanel(
# Input: Select a file ----
##fileInput('file1', 'Choose xlsx file',
## accept = c(".xlsx")
##),
##br(),
# Input: Checkbox if file has header ----
#checkboxInput("header", "Header", TRUE)
##h5(textOutput("select_var"))
dashboardPage(
skin="red",
dashboardHeader(title="Mehr Object"),
dashboardSidebar(
# dashboardSidebar(fileInput('file1', 'Choose xlsx file',
# accept = c(".xlsx")),
# sliderInput("slider1", label = h3("Sample_Range"), min = 1,
# max = 30, value = 10),
sliderInput("slider2",label=h3("Timeline"),min=1509096323.025,
max=1509096382.89,step = 0.04,value =1509096323.01 ,animate = animationOptions(loop = TRUE,interval = 2,playButton = icon('play', "fa-3x"),
pauseButton = icon('pause', "fa-3x")))),
dashboardBody(fluidRow( column(6,box(
title = "Global_Map",
collapsible = TRUE,
width = "100%",
height = 800,
leafletOutput("map1",height = 800)
)),column(6,box( title = "Distance",
collapsible = TRUE,
width = "100%",
tableOutput("Click_text")),box(title = "Lane Change Recognition",valueBoxOutput("sw",width = "100%"),height="100%",width = "100%"),
box(title="Longituide Recogniton",width = "100%",plotOutput("longi",width = 800)),
box(title = "All",width = "100%",plotOutput("manue",width = 800)),
box(title = "Submit",width = "100%",actionButton("submit","Submit")))
# ,column(6,box(
# title = "Sample_Map",
# width = "100%",
# height = "100%",
# leafletOutput("map2",height = 710)
# ),box(valueBoxOutput("sw",width = "100%"),height="100%",width = "100%"
# ))
#
#column(6,
# box(
# title = "Maneuver Recognition", width = "100%",height = 460, background = "light-blue",
# textOutput("sw")
))))
|
eabaf69bf696c44dc39769ff172b229a0f14e5e3
|
0790b99473d0c669412e6c5d8d81765e11a23fc5
|
/code/demographics/education.R
|
c1909bd83ae52006b485cdc19af83eaba01b9de1
|
[] |
no_license
|
katiemlyon/nvs-report
|
cf3d4227a88d3dd05bda0b9addadaa3468004f86
|
972b2ad2e182fd5b1abb99d623991364ab8763c9
|
refs/heads/master
| 2020-04-30T10:51:38.900385
| 2019-04-12T16:04:39
| 2019-04-12T16:04:39
| 176,787,597
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,541
|
r
|
education.R
|
library(ggplot2)
#load functions
source("code/functions/calc_pct.R")
source("code/functions/round_df.R")
#read data
nvs2018 <- read.csv("data/nvs2018.csv")
###########################################
# Education
###########################################
str(nvs2018$SCHOOL)
range(nvs2018$SCHOOL, na.rm=TRUE)
table(nvs2018$SCHOOL)
#education <- subset(nvs2018, select = c(SCHOOL))
education <- nvs2018$SCHOOL
education <- education[!is.na(education)]
# calculate mean education level
educ = mean(education)
educ
# round percent to whole number
educ <- round_df(educ, 0)
educ
# specify education levels for output
educLevel <- NA
educLevel[educ < 12] <- "Less than HS"
educLevel[educ >= 12 & educ <= 15] <- "High School/Some College"
educLevel[educ >= 16 & educ <= 17] <- "College"
educLevel[educ >= 18] <- "Graduate School"
educLevel
#edlevels <- c('High School', 'College')
# recode each factor and explicitly set the levels
for(i in seq_along(education)) {
education[,i] <- factor(education[,i])
}
edProp <- likert(education)
edProp
edTitle <- "Education"
edTable <- summary(edProp)
str(edTable)
###########
education <- nvs2018$SCHOOL
edPlot <- ggplot(data=subset(education, !is.na(SCHOOL)), aes(SCHOOL)) +
geom_bar(aes) +
ggtitle("Education")
edPlot
# What are the education levels?
# EDCAT is a factor (i.e., categorical) variable, a bar chart
# is a great visualization to use.
#
ggplot(titanic, aes(x = Survived)) +
geom_bar()
# If you really want percentages.
prop.table(table(titanic$Survived))
# Add some customization for labels and theme.
ggplot(titanic, aes(x = Survived)) +
theme_bw() +
geom_bar() +
labs(y = "Passenger Count",
title = "Titanic Survival Rates")
###########
# What are the education levels?
# EDCAT is a factor (i.e., categorical) variable, a bar chart
# is a great visualization to use.
education <- subset(nvs2018, select = EDUCATION)
education <- na.omit(education)
ggplot(education, aes(x = EDUCATION)) +
geom_bar(aes(y = (..count..)/sum(..count..))) +
scale_y_continuous(labels = scales::percent) +
labs(x = "Education",
y = "Percent",
title = "Education") +
coord_flip()
## simpler bar chart
ggplot(education) +
stat_count(mapping = aes(x=EDUCATION, y=..prop.., group=1)) +
labs(x = "Education",
y = "Percent",
title = "Education") +
coord_flip() +
theme(plot.background = element_blank()
,panel.grid.major = element_blank()
,panel.grid.minor = element_blank()
,panel.border = element_blank())
|
f7cce0a5fb2e8be06d7de281eaeca2426d75609b
|
5056a6153e4ed04845843e9fd368601d081b797f
|
/R/barcode_count.R
|
4f8148243b8d7431772d3900fe7362ef4e485f0d
|
[] |
no_license
|
jessievb/RAID
|
ae68a3ed63edd26783ec417e5b48354e5a8f375f
|
cf417d21c1b7ea8b88848019f6ca71d867df748c
|
refs/heads/master
| 2020-03-20T21:27:59.269054
| 2018-06-18T12:52:24
| 2018-06-18T12:52:24
| 137,742,215
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,325
|
r
|
barcode_count.R
|
#' Count the number of unique UMI per Barcode_1 and Barcode_2 columns.
#' The data table that should be used is the iSeq_UMI_count table, which
#' contains all the unique (collapsed) UMI,
#' barcode_1 and barcode_2 information. The column row_occurence_count can be
#' ignored.
#' @export
raid_barcode_count <- function(split_data_file_path,
UMI_count_tbl = UMI_count, output_filename = "output/data/barcode_count_") {
barcode_count <- UMI_count_tbl %>%
dplyr::group_by(Barcode_1) %>%
dplyr::summarize(antibody_count = n())
sampleID <- basename(
tools::file_path_sans_ext(file_path_sans_ext(split_data_file_path)))
utils::write.table(barcode_count, file = paste0(output_filename, sampleID, ".tsv"),
sep = "\t", row.names = FALSE, col.names = TRUE)
barcode_count
}
#' Add antibody and sample specific information to the table with barcode
#' counts.
#'
#' The function makes use of the dplyr package 'left_join' option. If there is
#' a direct match between shared columns of data table and the input tables
#' antibody_barcode_index and well_barcode_index.
#' @export
raid_barcode_match <- function(split_data_file_path,
barcode_count_tbl = barcode_count,
output_filename = "output/data/barcode_count_matched_") {
antibody_barcode_tbl <- tbl_df(
read.table("config/antibody_barcode_index.txt", header = TRUE,
stringsAsFactors = FALSE))
barcode_count_tbl$Barcode_1 <- as.character(barcode_count_tbl$Barcode_1)
barcode_count_matched <- barcode_count_tbl %>%
dplyr::left_join(antibody_barcode_tbl, copy = TRUE)
sampleID <- basename(
tools::file_path_sans_ext(file_path_sans_ext(split_data_file_path)))
utils::write.table(
barcode_count_matched, file = paste0(output_filename, sampleID, ".tsv"), sep = "\t",
row.names = FALSE, col.names = TRUE)
barcode_count_matched
}
#' @export
raid_barcode_match_na <- function(
barcode_count_matched_tbl = barcode_count_matched) {
barcode_count_NA <- barcode_count_matched_tbl %>%
dplyr::filter(is.na(Ab_barcode_nr))
}
#' @export
raid_barcode_match_filtered <- function(
barcode_count_matched_tbl = barcode_count_matched) {
barcode_count_matched_filtered <- barcode_count_matched_tbl %>%
dplyr::filter(!is.na(Ab_barcode_nr))
}
|
edf088ad2555bbb909baf1b24f20060a9320852a
|
3bb80cde674096b52fde6d957b2a7b9c32a399ba
|
/section02/conditionals.R
|
7bee7f2ecec2fc538c7b243ef2c684e703cd0382
|
[] |
no_license
|
AmundsenJunior/r-programming-udemy
|
107e45d603a1e91febd3b300ea089e36e1b6ca42
|
b903a165941dbfd2ed9d56d1977639d6b42d5410
|
refs/heads/master
| 2020-03-14T13:50:12.054901
| 2018-04-30T20:03:06
| 2018-04-30T20:03:06
| 131,640,981
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 278
|
r
|
conditionals.R
|
rm(list=ls())
# set of randomly generated numbers within the normal distribution
# can include mean and std dev as 2nd & 3rd args
x <- rnorm(1)
x
if (x > 1) {
answer <- "greater than 1"
} else if (x >= -1) {
answer <- "between -1 and 1"
else {
answer <- "less than -1"
}
|
cff49bbf5796da529784bf98c7bd7596b3374008
|
2eb9544eb511850ea2a7bb40c4159015b53fa87e
|
/server.R
|
167662f215f8d924e5c71fa656d4665eed3cc4f1
|
[] |
no_license
|
pmPartch/HorseColicShiny
|
ea4cc34316192a4bf56a0eca67f13453ca9f0c07
|
660497a2b302725d59b85013b902d56c5b559a28
|
refs/heads/master
| 2021-01-10T06:19:39.197238
| 2015-11-22T05:38:11
| 2015-11-22T05:38:11
| 46,648,002
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,838
|
r
|
server.R
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(randomForest)
#model load
modelFit <- readRDS("rfMin.rds") #only wish to load this once per app startup (not with the page is refreshed)
#conversion functions
ageConvert <- function(age) ifelse(age=='adult',1,9)
doubleConvert <- function(temp) as.double(temp)
numericConvert <- function(temp) as.numeric(temp)
extm_tempConvert <- function(temp) ifelse(temp=='Normal', 1, ifelse(temp=='Warm',2,ifelse(temp=='Cool',3,4)))
periph_pulseConvert <- function(temp) ifelse(temp=='Normal',1,ifelse(temp=='Increased',2,ifelse(temp=='Reduced',3,4)))
muc_membConvert <- function(temp) ifelse(temp=='Normal Pink',1,ifelse(temp=='Bright Pink',2,ifelse(temp=='Pale Pink',3,ifelse(temp=='Pale Cyanotic',4,ifelse(temp=='Bright Red',5,6)))))
cap_refilConvert <- function(temp) ifelse(temp=='Less than 3 seconds',1,2)
painConvert <- function(temp) ifelse(temp=='no pain',1,ifelse(temp=='depressed',2,ifelse(temp=='intermittent mild pain',3,ifelse(temp=='intermittent severe pain',4,5))))
peristalsisConvert <- function(temp) ifelse(temp=='hypermotile',1,ifelse(temp=='normal',2,ifelse(temp=='hypomotile',3,4)))
abd_distConvert <- function(temp) ifelse(temp=='none',1,ifelse(temp=='slight',2,ifelse(temp=='moderate',3,4)))
#server function
shinyServer(function(input, output) {
output$progOutput <- renderTable({modelFit$finalModel$confusion})
#ageData <- reactive(ifelse(input$ageInput == 'adult',1,9))
#newdf <- data.frame("age"={ageConvert(input$ageInput)})
#output$progOutput <- renderPrint('newdf')
#output$predictOutput <- renderPrint({ageData(input$ageInput)})
output$predictOutput <- renderText({
newdf <- data.frame("age"=ageConvert(input$ageInput),
"rect_temp"=doubleConvert(input$rect_tempInput),
"pulse"=numericConvert(input$pulseInput),
"resp_rate"= numericConvert(input$resp_rateInput),
"extm_temp"=extm_tempConvert(input$extm_tempInput),
"periph_pulse"=periph_pulseConvert(input$periph_pulseInput),
"muc_memb"=muc_membConvert(input$muc_membInput),
"cap_refil"=cap_refilConvert(input$cap_refilInput),
"pain"=painConvert(input$painInput),
"peristalsis"=peristalsisConvert(input$peristalsisInput),
"abd_dist"=abd_distConvert(input$abd_distInput))
pred <- predict(modelFit,newdf)
ifelse(pred==1,"Good Recovery Expected","Urgent Care Required")
})
})
|
908858740ad5bdde9ad5e988c3922b34f23db353
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Miller-Marin/fpu/fpu-10Xh-correct02-nonuniform-depth-15/fpu-10Xh-correct02-nonuniform-depth-15.R
|
f50bf1d1a51b201e2b5a5605ac94ae2272509532
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 94
|
r
|
fpu-10Xh-correct02-nonuniform-depth-15.R
|
43fa82174ead4f88f12fa27cebf7ce0a fpu-10Xh-correct02-nonuniform-depth-15.qdimacs 412577 1101179
|
98776bada73d5a5375df677d30baed00bc3d8206
|
4cc92a349885a505896de9056887465f5db40c76
|
/code/NC13Huddle.R
|
3419260339b01cd48464e695a55385a6616a440a
|
[] |
no_license
|
guanjiahui/Social-Network_rhesus-macaques
|
cdaba33cbc333c00e67963e7dffd2c622d66e333
|
aee1f7583c168ca17a9c83bc0659d1e645cb96e6
|
refs/heads/master
| 2020-04-18T14:39:45.612811
| 2019-01-25T18:46:08
| 2019-01-25T18:46:08
| 167,594,532
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,145
|
r
|
NC13Huddle.R
|
#############
#process the data
NC13_RU1_Huddling_Matrix <- read.csv("~/Dropbox/Research/SNH_health profile data for Fushing-selected/NC13_RU1_Huddling_Matrix.csv", header=FALSE)
NC13HuddleR1=as.matrix(NC13_RU1_Huddling_Matrix[-1,-1])
colnames(NC13HuddleR1)=NC13_RU1_Huddling_Matrix[-1,1]
rownames(NC13HuddleR1)=NC13_RU1_Huddling_Matrix[-1,1]
HuddleR1=matrix(0,nrow(NC13HuddleR1),ncol(NC13HuddleR1))
for (i in 1:ncol(NC13HuddleR1)){
for (j in 1:ncol(NC13HuddleR1)){
HuddleR1[i,j]=NC13HuddleR1[i,j]+NC13HuddleR1[j,i]
}
}
NC13HuddleR1=HuddleR1
####################
library(network)
GhuddleR1=network(NC13HuddleR1,directed=FALSE,matrix.type="adjacency")
plot(GhuddleR1,vertex.col=2+y12,vertex.cex=1.5,interactive=TRUE)
library(igraph)
GHR1=graph.adjacency(NC13HuddleR1,mode="undirected")
plot(GHR1,vertex.color=y12, vertex.frame.color="#ffffff")
l <- layout.random(GHR1)
plot(GHR1,layout=l,edge.arrow.size=.1,vertex.color=y12)
l<-layout.circle(GHR1)
l<-layout.sphere(GHR1)
################
################
#now doing DCG
##################
#NC13HuddleR1_dist=dist(NC13HuddleR1,diag=TRUE,upper=TRUE)
#NC13HuddleR1_dist=as.matrix(NC13HuddleR1_dist)
############
temp=c(0.2,0.3,10,100)
Ens.huddle1=Eigen.plot2(temp, selected.id=c(1,2,3,4),NC13HuddleR1)
DCG.huddle1=DCGtree.plot(num.clusters.selected=c(2,2,17,19),
"NC13HuddleR1 tree",Ens.huddle1,temp)
########
heatmap.2(NC13HuddleR1,Rowv=as.dendrogram(DCG.huddle1),Colv=as.dendrogram(DCG.huddle1),
trace="none",col =colorRampPalette(c("white","green","green4","violet","purple"))(100))
heatmap.2(NC13HuddleR1,col =colorRampPalette(c("white","green","green4","violet","purple"))(100),
trace="none")
############
plot(DCG.huddle1,hang=-1)
library(sparcl)
# colors the leaves of a dendrogram
y1 = cutree(DCG.huddle1, 17)
y12=cutree(DCG.huddle1,2)
ColorDendrogram(DCG.huddle1, y = y1, main = "NC13HuddleR1 tree",xlab="",
labels=nameR1,branchlength = 1)
#another way to color it
#den.huddle1=as.dendrogram(DCG.huddle1)
#dend2=cut(den.huddle1,h=3)
#plot(den.huddle1,hang=-1,nodePar = list(col=1:2))
#####################
#########################
s1=sum(NC13HuddleR1)
s2=sum(NC13HuddleR2)
s3=sum(NC13HuddleR3)
NC13HuddleR1.del=NC13HuddleR1[-c(37,90),]
NC13HuddleR1.del=NC13HuddleR1.del[,-c(37,90)]
s1.del=sum(NC13HuddleR1.del)
eachR1=colSums(NC13HuddleR1)
eachR2=colSums(NC13HuddleR2)
eachR3=colSums(NC13HuddleR3)
eachR1.del=colSums(NC13HuddleR1.del)
eachSum=cbind(eachR1.del,eachR2,eachR3)
nameR1=colnames(NC13HuddleR1)
nameR2=colnames(NC13HuddleR2)
nameR3=colnames(NC13HuddleR3)
nameR1.del=nameR1[-c(37,90)]
###############
plot(c(s1,s2,s3),type="b")
plot(c(s1.del,s2,s3),type="b",ylab="Huddle",ylim=c(min(s3),max(s1)),
xaxt="n",main="NC13 Huddling Total")
axis(1, at=1:3, labels=bb2)
matplot(t(eachSum), t="l", lty=1, las=1, ylab="Huddle",
xlab="Time", xaxt="n",main="NC13 Huddling")
bb2=c("baseline","pertubation","postpertubation")
axis(1, at=1:3, labels=bb2)
############
#put them into 17 groups
matplot(t(eachSum), t="l", lty=1, las=1, ylab="Huddle",col=y1,
xlab="Time", xaxt="n",main="NC13")
bb2=c("baseline","pertubation","postpertubation")
axis(1, at=1:3, labels=bb2)
############
#put them into 2 groups
matplot(t(eachSum), t="l", lty=1, las=1, ylab="Huddle",col= cutree(DCG.huddle1,2),
xlab="Time", xaxt="n",main="NC13")
bb2=c("baseline","pertubation","postpertubation")
axis(1, at=1:3, labels=bb2)
############
Huddle=data.frame(cbind(eachSum,t(t(y2))))
pairs(~eachR1.del+SeachR1.del+eachR2 + SeachR2+eachR3+ SeachR3,data=Groom,
main="Simple Scatterplot Matrix")
plot(density(eachR1.del),ylim=c(0,0.04),main="Huddling density",lwd=3)
lines(density(eachR2),col=2,lwd=3)
lines(density(eachR3),col=4,lwd=3)
abline(v=eachR1[37],col="yellow",lwd=3,lty=2)
abline(v=eachR1[90],col="green",lwd=3,lty=2)
legend("topright",c("baseline","perturbation","postperturbation"),
lty=c(1,1,1),col=c(1,2,4),cex=1.3)
###################################
#next to compute the entropy
Entropy=function(m,N){
p=m/N
if (p==0){
E=0
}
else
E=-log(p)*p
return(E)
}
##############
Entropy_sequence=function(Entry1,Entry2,name1,name2){
#a function that assume the elements of each trees(network) has the same location
#and thus has the same total number of elements. That is, each element is matched.
###########
a=length(unique(Entry1))
# b=length(unique(Entry2))
entropy=numeric(a)
SumEntropy=0
N_total=length(intersect(name1,name2))
for (i in 1:a){
location=which(Entry1==i)
N=length(location)
membership=Entry2[location]
M=unique(membership)
for (j in 1:length(M)){
m=length(which(membership==M[j]))
temp=Entropy(m,N)
entropy[i]=temp+entropy[i]
}#end for j
#cat("member",membership, "entropy",entropy[i],"\n")
SumEntropy=SumEntropy+(N/N_total)*entropy[i]
#cat("N",N,"\n")
}#end for i
#cat(SumEntropy,sum(entropy),"\n")
Entropy_bottom=Entropy_de(Entry2)$Bottom
return(list(Entropy=entropy,Sum=SumEntropy/Entropy_bottom))
}#Entropy_sequence() function
###############################
Entropy_de=function(Entry1){
a=length(unique(Entry1))
entropy=0
for(i in 1:a){
prob=length(which(Entry1==i))/length(Entry1)
temp=-log(prob)*prob
entropy=temp+entropy
}
return(list(Bottom=entropy))
}
name1=colnames(NC13HuddleR1)
name2=colnames(NC13HuddleR2)
name3=colnames(NC13HuddleR3)
#################################
Entry1=y1[-c(37,90)]
Entry2=y2
Entry3=y3
Entry12=y12[-c(37,90)]
Entry22=y22
Entry32=y32
SM12=Entropy_sequence(Entry1,Entry2)$Entropy
SM13=Entropy_sequence(Entry1,Entry3)$Entropy
SM21=Entropy_sequence(Entry2,Entry1)$Entropy
SM23=Entropy_sequence(Entry2,Entry3)$Entropy
SM31=Entropy_sequence(Entry3,Entry1)$Entropy
SM32=Entropy_sequence(Entry3,Entry2)$Entropy
########
par(mfrow=c(3,2))
plot(SM12,type="b",
xlab="group", ylab="Entropy",main="Entropy of Baseline vs Perturbation")
plot(SM21,type="b",
xlab="group", ylab="Entropy",main="Entropy of Perturbation vs Baseline")
plot(SM23,type="b",
xlab="group", ylab="Entropy",main="Entropy of perturbation vs post-pert ")
plot(SM32,type="b",
xlab="group", ylab="Entropy",main="Entropy of Post-pert vs perturbation")
plot(SM13,type="b",
xlab="group", ylab="Entropy",main="Entropy of Baseline vs post-pert")
plot(SM31,type="b",
xlab="group", ylab="Entropy",main="Entropy of Post-pert vs Baseline")
##############
Entrpy=list()
Entrpy[[1]]=y1[-c(37,90)]
Entrpy[[2]]=y2
Entrpy[[3]]=y3
Entrpy[[4]]=y12[-c(37,90)]
Entrpy[[5]]=y22
Entrpy[[6]]=y32
Name=list(name1,name2,name3)
En=matrix(0,3,3)
for (i in 1:3){
for (j in 1:3){
if (i!=j)
En[i,j]=Entropy_sequence(Entrpy[[i]],Entrpy[[j]],Name[[i]],Name[[j]])$Sum
}
}
for (i in 1:3)
for (j in 1:3)
cat(En[i,j]+En[j,i],"i",i,"j",j,"\n")
###############
#######
bSM12=Entropy_sequence(Entry12,Entry22)$Entropy
bSM13=Entropy_sequence(Entry12,Entry32)$Entropy
bSM21=Entropy_sequence(Entry22,Entry12)$Entropy
bSM23=Entropy_sequence(Entry22,Entry32)$Entropy
bSM31=Entropy_sequence(Entry32,Entry12)$Entropy
bSM32=Entropy_sequence(Entry32,Entry22)$Entropy
par(mfrow=c(3,2))
plot(bSM12,type="b",ylim=c(0,1.0),
xlab="group", ylab="Entropy",main="Entropy of Baseline vs Perturbation")
plot(bSM21,type="b",ylim=c(0,1.0),
xlab="group", ylab="Entropy",main="Entropy of Perturbation vs Baseline")
plot(bSM23,type="b",ylim=c(0,1.0),
xlab="group", ylab="Entropy",main="Entropy of perturbation vs post-pert ")
plot(bSM32,type="b",ylim=c(0,1.0),
xlab="group", ylab="Entropy",main="Entropy of Post-pert vs perturbation")
plot(bSM13,type="b",ylim=c(0,1.0),
xlab="group", ylab="Entropy",main="Entropy of Baseline vs post-pert")
plot(bSM31,type="b",ylim=c(0,1.0),
xlab="group", ylab="Entropy",main="Entropy of Post-pert vs Baseline")
En2=matrix(0,3,3)
for (i in 1:3){
for (j in 1:3){
if (i!=j)
En2[i,j]=Entropy_sequence(Entrpy[[i+3]],Entrpy[[j+3]],Name[[i]],Name[[j]])$Sum
}
}
for (i in 1:3)
for (j in 1:3)
cat(En2[i,j]+En2[j,i],"i",i,"j",j,"\n")
###################
#####################
#normalize the adjacency matrix
EN1=EN
EN1[which(EN>5)]=5
Eheat1=NC13HuddleR1
small1=which(NC13HuddleR1<5,arr.ind=TRUE)
Eheat1[small1]=NC13HuddleR1[small1]/5
Eheat1[which(NC13HuddleR1>=5,arr.ind=TRUE)]=1
#######
temp=c(0.2,0.3,2,100)
Ens.heat1=Eigen.plot2(temp, selected.id=c(1,2,3,4),Eheat1)
DCG.heat1=DCGtree.plot(num.clusters.selected=c(2,2,15,17),
"NC13HuddleR1 tree",Ens.heat1,temp)
########
heatmap.2(Eheat1,col =colorRampPalette(c("white","green","green4","violet","purple"))(100),
trace="none")
heatmap.2(Eheat1,Rowv=as.dendrogram(DCG.heat1),Colv=as.dendrogram(DCG.heat1),
trace="none",
col =colorRampPalette(c("white","green","green4","violet","purple"))(100))
############################
#d=heatmap(Eheat1)
#HC1=Eheat1[d$rowInd,d$colInd]
#D1=Eheat1[DCG.heat1$order,DCG.heat1$order]
#GetBipEnergy(HC1)
#GetBipEnergy(D1)
plot(DCG.heat1,hang = -1)
y1 = cutree(DCG.huddle1, 15)
y12=cutree(DCG.huddle1,2)
ColorDendrogram(DCG.huddle1, y = y1, main = "NC13HuddleR1 tree",xlab="",
branchlength = 5)
######
#double check Temperature selection
temp=c(0.1,0.2,0.3,0.5,0.8,1,2,2000)
Ens.h1=Eigen.plot2(temp, selected.id=c(1,2,3,4,5,6,7,8),Eheat1)
#################
#visualize the entropy
insertE=function(ary,ind,value){
afterInsert=ary
for (i in 1:length(ind)){
temp=afterInsert[ind[i]:length(afterInsert)]
afterInsert[ind[i]]=value[i]
afterInsert[(ind[i]+1):(length(afterInsert)+1)]=temp
}
return(afterInsert)
}
y2.append=insertE(y2,c(37,90),c(0,0))
y3.append=insertE(y3,c(37,90),c(0,0))
ColorDendrogram(DCG.huddle1, y = y3.append,
main = "NC13HuddleR1 tree (post)",xlab="",
branchlength = 3)
#####################
NameHuddleR1=colnames(NC13HuddleR1)
NameHuddleR2=colnames(NC13HuddleR2)
NameHuddleR3=colnames(NC13HuddleR3)
save(DCG.huddle1,DCG.huddle2,DCG.huddle3,
NameHuddleR1,NameHuddleR2,NameHuddleR3,
file = "HuddlingTREE.RData")
##############################
Eigen.plot2=function(tempinv,selected.id,D){
tempinv.selected <- tempinv[selected.id]
ensM<- list() # your ensemble matrices at each temperature.
for ( i in 1:length(selected.id))
ensM[[i]]=EstClust(GetSim2(D,tempinv.selected[i]), MaxIt=1000, m=5)
#check eigenvalues
par(mfrow=c(2,2))
for (j in 1:length(selected.id)){
Ens=ensM[[j]]
N <- nrow(Ens)
Dinvsqrt <- diag(sapply(1:N, function(i) 1/sqrt(sum(Ens[i,]))))
Lsym <- diag(N) - Dinvsqrt %*% Ens %*% Dinvsqrt
Eigen <- eigen(Lsym)$values
Eigen <- sort(1 - Eigen/Eigen[1], decreasing=TRUE)
#cat(Eigen[1:25],"\n")
# cat("difference",diff(Eigen[1:20]),"\n")
plot(Eigen[1:15],type="b",main=paste(j,"T=",tempinv.selected[j]))
#barplot(Eigen[1:25],main=j)
# plot(diff(Eigen[1:20]),type="b")
}
#for (j in 1:length(selected.id))
# heatmap(ensM[[j]],main=j)
return(ensM)
}
|
83f6a09761af6c9aec1c6707e0e9b8ec9ff80459
|
52e4526a947689c1c63453e1a6b4a6c1e20513c4
|
/man/fsOrder.Rd
|
b15dd65c3221b18df79654cab0d2e53d8285a9d7
|
[] |
no_license
|
cran/robustfa
|
eeb6dbed14e04edb0c44f6cd697bb732dc57c7b1
|
c1951b364fadb7f15a62ce471979c34e12fe3b67
|
refs/heads/master
| 2023-05-06T21:28:30.116250
| 2023-04-16T13:40:02
| 2023-04-16T13:40:02
| 17,699,300
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,158
|
rd
|
fsOrder.Rd
|
\name{fsOrder}
\alias{fsOrder}
\title{
Compute the Ordered Factor Scores
}
\description{
Compute the ordered factor scores according to the first/second/third... column of the original factor scores.
}
\usage{
fsOrder(factorScores)
}
\arguments{
\item{factorScores}{
The original factor scores.
}
}
%% \details{}
\value{
A list with \code{m} (the number of factors) components:
\item{[[1]] }{The ordered factor scores with a decreasing first column.}
\item{[[2]] }{The ordered factor scores with a decreasing second column.}
\item{... }{}
\item{[[m]] }{The ordered factor scores with a decreasing m-th column.}
}
\references{
Zhang, Y. Y. (2013), An Object Oriented Solution for Robust Factor Analysis.
}
\author{ Ying-Ying Zhang (Robert) \email{robertzhangyying@qq.com} }
\seealso{
\code{\link{order}}
}
\examples{
data(stock611)
R611=cor(stock611[,3:12]); R611
## FS.pca contains scores etc.
fsPca=factorScorePca(x = stock611[,3:12], factors = 2, cor = TRUE,
rotation = "varimax", scoresMethod = "regression"); fsPca
orderedFS=fsOrder(fsPca$scores); orderedFS
}
\keyword{robust}
|
90855dfcbba43cd815ffba2e040096ad852b7877
|
8521eda607ce938b257845af1b25f106e86c9f2a
|
/Shiny Sample Sizes/ShinySampleSizesDocumentation/References/R Worksheets/Examples Worksheet.R
|
46059ce818157043334894fb6c8c9c3dacd575bd
|
[] |
no_license
|
mattpartridge/ShinySampleSizes
|
1e6ee5dcecb5db706b4f275571fa299fcbcbcee9
|
65ae8016f3ef2017e40d70e38d0de141b67b4dd5
|
refs/heads/master
| 2021-01-10T07:29:12.635874
| 2017-10-29T14:09:15
| 2017-10-29T14:09:15
| 49,987,528
| 0
| 1
| null | 2016-10-16T19:08:43
| 2016-01-19T22:26:30
|
R
|
UTF-8
|
R
| false
| false
| 2,278
|
r
|
Examples Worksheet.R
|
# Data Prep
library(survival)
## FL
fl = flchain
fl$kl = fl$kappa/fl$lambda
fl$kl.med = as.factor(ifelse(fl$kl < median(fl$kl, na.rm = T), "Low", "High"))
fl$kl.2575 = as.factor(ifelse(fl$kl <= quantile(fl$kl)[2], "<25th", ifelse(fl$kl >= quantile(fl$kl)[4], ">75th", NA)))
fl$mgus = as.factor(fl$mgus)
fl$death = as.factor(fl$death)
fl$sex = as.factor(fl$sex)
## MGUS = 1
mgus = fl[fl$mgus == 1, ]
## Male
length(fl$sex[fl$sex == "M"])
# TTE: SAMPLE SIZE AND POWER Get kaplan meyer estimate of hazard rate of death at 1 year. Hazard rates for MGUS = 1 x sex or some quartile of KL Rate (above/below median)
# Analysis
## One Mean
summary(mgus$kl) # 2.35
sd(mgus$kl) # 4.37
## One Proportion
# length(mgus$death[mgus$death == 1])/length(mgus$death) # 0.14
# length(fl$mgus[fl$mgus == 1])/length(fl$mgus) # 0.015
length(fl$sex [fl$sex == "F"])/length(fl$sex) # 0.55
## Two Means
by(mgus$kl, mgus$sex, summary) # F = 2.01, M = 2.83
## Two Proportions
length(mgus$death[mgus$death == 1])/length(mgus$death) # 0.14
## Time to Event
km.mgus = survfit(Surv(futime, death) ~ mgus, data = fl)
summary(km.mgus) # NoMGUS = 0.034, MGUS = 0.01
max(fl$futime)/365.25 # Longest amount of follow up = 14.3 Years
length(fl$mgus[fl$mgus == 0])/length(fl$mgus[fl$mgus == 1]) # ~= 67:1 MGUS:NoMGUS (0.01)
by(fl$death, fl$mgus, summary)
5606/(5606+2153) # NoMGUS = .72 prop censored
99/(99+16) # MGUS = 0.86 prop censored
length(fl$mgus[fl$mgus==1])/length(fl$mgus[fl$mgus==0])
nSurvival(ratio = l/67)
# Time to Event Example
median(fl$kl[fl$sex == "M"]) # 0.86
ceiling(max(fl$futime[fl$sex == "M"])/365.25) # Study Duration = 15
summary(fl[fl$sex == "M", "kl.med"]); 1841/1683 # Allocation Ratio = 1.09
km.kl.M = survfit(Surv(futime, death) ~ kl.med, data = fl[fl$sex == "M", ])
summary(km.kl.M) # > Median = 0.04, < Median = 0.03
nrow(fl[fl$sex == "M", ]) # Sample Size = 3524,
median(fl$kl[fl$sex == "F"]) # 0.83
ceiling(max(fl$futime[fl$sex == "F"])/365.25) # Study Duration = 15
summary(fl[fl$sex == "F", "kl.med"]); 2096/2254 # Allocation Ratio = 0.93
km.kl.F = survfit(Surv(futime, death) ~ kl.med, data = fl[fl$sex == "F", ])
summary(km.kl.F) # > Median = 0.03, < Median = 0.02
nrow(fl[fl$sex == "F", ]) # Sample Size = 4350,
|
7835c2f4f332d7c9195f4b3d251f9ed907fd67cb
|
8e3e9d61fbe2640f2b7bcd128647dd36dc6b60e6
|
/man/celda_G.Rd
|
4f7c9dc3f841e71115e510622e65e15eb4608aef
|
[
"GPL-2.0-only",
"MIT"
] |
permissive
|
AndrewGr12/celda
|
bcf115559635d8c7447f38642e0d9be78abd5a69
|
8ac2eeaa64f3204c45bca63732276573717381b2
|
refs/heads/master
| 2020-03-25T03:20:27.937760
| 2018-07-24T22:38:00
| 2018-07-24T22:38:00
| 143,336,504
| 0
| 0
|
MIT
| 2018-08-02T19:26:39
| 2018-08-02T19:26:38
| null |
UTF-8
|
R
| false
| true
| 2,111
|
rd
|
celda_G.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/celda_G.R
\name{celda_G}
\alias{celda_G}
\title{celda Gene Clustering Model}
\usage{
celda_G(counts, L, beta = 1, delta = 1, gamma = 1, stop.iter = 10,
max.iter = 200, split.on.iter = 10, split.on.last = TRUE,
count.checksum = NULL, seed = 12345, y.init = NULL, logfile = NULL)
}
\arguments{
\item{counts}{A numeric count matrix}
\item{L}{The number of clusters to generate}
\item{beta}{The Dirichlet distribution parameter for Phi; adds a pseudocount to each transcriptional state within each cell.}
\item{delta}{The Dirichlet distribution parameter for Eta; adds a gene pseudocount to the numbers of genes each state. Default to 1.}
\item{gamma}{The Dirichlet distribution parameter for Psi; adds a pseudocount to each gene within each transcriptional state.}
\item{stop.iter}{Number of iterations without improvement in the log likelihood to stop the Gibbs sampler. Default 10.}
\item{max.iter}{Maximum iterations of Gibbs sampling to perform regardless of convergence. Default 200.}
\item{split.on.iter}{On every 'split.on.iter' iteration, a heuristic will be applied to determine if a gene/cell cluster should be reassigned and another gene/cell cluster should be split into two clusters. Default 10.}
\item{split.on.last}{After the the chain has converged according to 'stop.iter', a heuristic will be applied to determine if a gene/cell cluster should be reassigned and another gene/cell cluster should be split into two clusters. If a split occurs, then 'stop.iter' will be reset. Default TRUE.}
\item{count.checksum}{An MD5 checksum for the provided counts matrix}
\item{seed}{Parameter to set.seed() for random number generation.}
\item{y.init}{Initial values of y. If NULL, y will be randomly sampled. Default NULL.}
\item{logfile}{The name of the logfile to redirect messages to.}
}
\description{
Provides cluster assignments for all genes in a provided single-cell
sequencing count matrix, using the celda Bayesian hierarchical model.
}
\keyword{LDA}
\keyword{clustering}
\keyword{gene}
\keyword{gibbs}
|
e9c065cbeef8aa9a94d2ba7f63bb41722c5a3672
|
f4778157f1298955553edd0b216ea87b2618ba46
|
/man/ks.heatmap.Rd
|
ecf02a5b93062f2062c3bbabc9bbc3536c6cb768
|
[] |
no_license
|
mgkaszkowiak/miRNAselector
|
1ca865c1cc045e74e64c94c479c500d6b629ef1a
|
a9baf719168b0ee652ce2a977cfa3bda3a61a374
|
refs/heads/master
| 2022-04-21T23:13:21.007912
| 2020-04-20T22:57:22
| 2020-04-20T22:57:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 661
|
rd
|
ks.heatmap.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ks.heatmap.R
\name{ks.heatmap}
\alias{ks.heatmap}
\title{ks.heatmap}
\usage{
ks.heatmap(
x = trainx[, 1:10],
rlab = data.frame(Batch = dane$Batch, Class = dane$Class),
zscore = F
)
}
\arguments{
\item{x}{Matrix of log-transformed TPM-normalized counts with miRNAs in columns and cases in rows.}
\item{rlab}{Data frame of factors to be marked on heatmap (like batch or class). Maximum of 2 levels for every variable is supported.}
\item{zscore}{Whether to z-score values before clustering and plotting.}
}
\value{
Heatmap.
}
\description{
Draw a heatmap of selected miRNAs.
}
|
17f318a6cf72f47db4d100f0846022c0ca7fb38f
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/distr6/inst/testfiles/C_EmpiricalMVPdf/libFuzzer_C_EmpiricalMVPdf/C_EmpiricalMVPdf_valgrind_files/1610035521-test.R
|
6cbc10be016d970224eae210e765cfa93692bcde
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 552
|
r
|
1610035521-test.R
|
testlist <- list(data = structure(0, .Dim = c(1L, 1L)), x = structure(c(1.51067888575209e-314, 0, 2.90905852271326e-319, 1.1125369292536e-307, 7.2911220195564e-304, 8.48798319399909e-314, 3.20506244267395e-310, 0, 0, 2.12276966337746e-313, 8.81442565517813e-280, 0, 0, 1.72085029849862e-260, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 7L)))
result <- do.call(distr6:::C_EmpiricalMVPdf,testlist)
str(result)
|
a4c6462602c6747e1b27a080b5d3511634e40552
|
fa0a5cbe982bee427d9f8c46018a849154d3d290
|
/U.S. Green Vehicle Outlook/code/summary_table.R
|
269fa6cd6f382f72a3434040467a48e54208faa7
|
[] |
no_license
|
chengz51/ClassProjects
|
b16975639a96b83b72edcc13070935f442c9c110
|
c31f3ab61ec863139127f5f22ec237993bc92c76
|
refs/heads/main
| 2023-03-28T02:01:55.086671
| 2021-03-26T17:19:20
| 2021-03-26T17:19:20
| 351,322,638
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,489
|
r
|
summary_table.R
|
library(dplyr)
source("scripts/population_function.R")
#( group_by function already used in population_function)
# load in all the datasets
ev_num_data <- read.csv("data/SupplyData.csv", stringsAsFactors = FALSE)
ghg_emisson <- read.csv("data/us-ghg-emissions_fig-1.csv",
stringsAsFactors = FALSE
)
gas_prices <- read.csv("data/10641_gasoline_prices_by_year.csv",
stringsAsFactors = FALSE
)
gas_prices <- gas_prices %>%
mutate(
Year = Year,
gasoline_price = Gasoline.Price,
inflation_adjuster = Inflation.Adjuster,
gasoline_price_2018 = Gasoline.Price..2018.
)
# fliter out the info between year 1994-2014
ev_num_data9414 <- ev_pop_table %>%
filter(Year < 2015) %>%
select(Year, ttl_num)
ghg_emisson9414 <- ghg_emisson %>%
mutate(
ttl_ghg =
Carbon.dioxide +
Methane +
Nitrous.oxide +
HFCs..PFCs..SF6..and.NF3
) %>%
filter(Year > 1993) %>%
select(Year, ttl_ghg)
gas_prices9414 <- gas_prices %>%
filter(Year > 1993 & Year < 2015) %>%
select(Year, Gasoline.Price..2018.)
# joint the tables together to get summary infomation
tb1 <- left_join(ev_num_data9414,
ghg_emisson9414,
by = "Year"
)
summary9414 <- left_join(tb1, gas_prices9414,
by = "Year"
)
col_names <- c(
"Year", "number of EV population in US",
"Greenhouse Emission (million metric tons)",
"Gasoline current price ($/gallon)"
)
a <- summary9414[7:21, ]
colnames(a) <- col_names
table_function <- function(a) {
t <- a
return(t)
}
|
1e22cefc1a7b9ae316a862724652de3efca705c7
|
24e3d5250f2b8a56b90810451b97a3073f186b71
|
/scripts/week_4_class_code.R
|
31c752c43662eaf52077b953031797c3869b799d
|
[] |
no_license
|
gge-ucd/r-davis-in-class-jasgre
|
6c93bfda5a14dfbecdd5b239d5dd99cc57c93d7e
|
79bc92b30471db173f5fbc0bbaeeb09066d438e3
|
refs/heads/master
| 2020-04-17T15:47:29.387532
| 2019-05-27T03:28:45
| 2019-05-27T03:28:45
| 166,713,352
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,446
|
r
|
week_4_class_code.R
|
# week 4 class code
download.file(url = "https://ndownloader.figshare.com/files/2292169", destfile = "data/portal_data_joined.csv")
surveys <- read.csv(file = "data/portal_data_joined.csv")
# ways to look at large dataframes
head(surveys) # shows data in all columns in top 6 rows
str(surveys)
dim(surveys) # returns a vector with the dimensions of the dataframe
nrows(surveys)
ncol(surveys)
tail(surveys)
names(surveys) # gives you the names of all the variables (columns)
summary(surveys) # will give you summary statistics on all of the columns, can reveal weirdness in the data
# subsetting dataframes (which are 2D)
surveys[1,1] # gives you row,column
head(surveys) # use to verify
surveys[,1] #leaving one of the dimensions blank will return everything in that dimension as a VECTOR
surveys[1] # using asingle number, with no comma, will give us a DATAFRAME with one column
head(surveys[1,]) #VECTOR
head(surveys[1]) #DATAFRAME
class(surveys[1])
class(surveys[,1])
class(surveys[1,]) # this returns data.frame? can rows never be vectors unless you
surveys[1:3, 6] # 1: 3 creates a vector of numbers 1-3; and you can put vectors to subset larger "slices" of data
# negative sign to exclude indices
surveys[1:5, -1] # returns rows 1-5, all columns except 1
surveys [c(1:5), ] #c() does the same thing, but it helps with syntax, i think
surveys [-10:15, ] # not a thing (we don't have negative rows)
str(surveys[-c(10:34786),]) # excludes 10 -end
# more ways to subset
surveys["plot_id"] # single column as data.frame
surveys[,"plot_id"] # single column as a vector
surveys[["plot_id"]] # single column as a vector
surveys$plot_id #single column as a vector
# challenge
surveys_200 <- surveys[200,]
nrow(surveys)
surveys_last <- surveys[nrow(surveys),]
tail(surveys)
nrow(surveys)/2
surveys_middle <-surveys[17393,]
surveys[(nrow(surveys)/2),]
surveys[-c(7:nrow(surveys)),]
head(surveys)
# Factors
surveys$sex
# creating our own factor
sex <- factor(c("male", "female", "female", "male"))
sex
class(sex)
typeof(sex)
# levels() gives back a character vector of the levels
levels(sex)
levels(surveys$genus)
# working with dates
library(lubridate)
my_date <- ymd("2015-01-01")
str(my_date)
my_date <- ymd(paste("2015","05", "17", sep = "-"))
paste(surveys$year, surveys$month, surveys$day, sep = "-")
surveys$date <- ymd(paste(surveys$year, surveys$month, surveys$day, sep = "-"))
surveys$date[is.na(surveys$date)]
|
8d520f446362fd146894ff0fc3a4f17ca4cab062
|
3da61e0097d852d2202dbf5e6c9b3fbfe8fb9b92
|
/assignment1/complete.R
|
773d93e23bd4876dde2a6c9f9bef500654bcf7a5
|
[] |
no_license
|
veerakumarnice/rprog-033
|
2381d90ed18bd5305a92bb0e2e74cddeba19503f
|
815e51e22c605b1a298a688168d1f22e5a7d2ef9
|
refs/heads/master
| 2021-01-10T02:10:18.169246
| 2015-10-31T09:53:49
| 2015-10-31T09:53:49
| 44,557,218
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 413
|
r
|
complete.R
|
complete <- function(directory, id = 1:332) {
string <- sprintf("%03d.csv",id)
files <- paste(directory,string,sep ="/")
nobs <- NULL
for (file in files ) {
content <- read.csv(file)
need <- complete.cases(content$sulfate, content$nitrate)
nobs <- c(nobs, length(content$Date[need]))
}
data.frame(id,nobs)
}
|
7b7084583830cb76459f75b820771da2ee5e3500
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/resemble/man/get_predictions.Rd
|
7897c795c84920f1fc820fc8b31ec4af2a9ca2a8
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 801
|
rd
|
get_predictions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_predictions.R
\name{get_predictions}
\alias{get_predictions}
\title{Extract predictions from an object of class \code{mbl}}
\usage{
get_predictions(object)
}
\arguments{
\item{object}{an object of class \code{mbl} as returned by \code{mbl}}
}
\value{
a data.table of predicted values according to either \code{k} or \code{k_dist}
}
\description{
\ifelse{html}{\out{<a href='https://www.tidyverse.org/lifecycle/#satble'><img src='figures/lifecycle-stable.svg' alt='Stable lifecycle'></a>}}{\strong{Stable}}
Extract predictions from an object of class \code{mbl}
}
\seealso{
\code{\link{mbl}}
}
\author{
\href{https://orcid.org/0000-0002-5369-5120}{Leonardo Ramirez-Lopez} and Antoine Stevens
}
|
52e327d86efa433b1273999f073d2400ca48492f
|
2a82c473bccb19ba59bbdd0696f35043b9cfcfa2
|
/release.R
|
10c1908d1f945d44c96e0bb5e799be3f6f0237a0
|
[
"MIT"
] |
permissive
|
wkostelecki/ezplot
|
daab0c404af67977f4bc6daa02798c6737dd3dc0
|
3422e7cd110f960d63da1fd9fc3203423fbe2b3d
|
refs/heads/master
| 2023-06-23T06:56:16.773219
| 2023-06-17T05:23:51
| 2023-06-17T05:23:51
| 39,913,514
| 6
| 0
|
NOASSERTION
| 2020-11-15T14:05:06
| 2015-07-29T20:01:32
|
R
|
UTF-8
|
R
| false
| false
| 982
|
r
|
release.R
|
library(magrittr)
library(glue)
packages = setdiff(c("tidyr", "testthat", "devtools", "DT", "git2r",
"devtools", "spelling", "rhub", "patchwork"),
installed.packages())
install.packages(packages)
old.packages()
update.packages(ask = FALSE)
covr = covr::package_coverage()
covr::report(covr)
# check here: https://cran.rstudio.com//web/checks/check_results_ezplot.html
# update version number:
v = "0.7.8"
readLines("DESCRIPTION") %>%
stringr::str_replace("^Version: [0-9\\.]*$", paste0("Version: ", v)) %>%
writeLines("DESCRIPTION")
devtools::spell_check()
devtools::check() # R CMD check
rcmdcheck::rcmdcheck(args = "--no-manual", error_on = "error")
devtools::check_win_devel()
devtools::check_rhub()
## update cran-comments
git2r::commit(all = TRUE, message = paste0("CRAN commit v", v))
system("git push")
devtools::release()
tag = paste0("v", v)
git2r::tag(name = tag, message = "CRAN")
system(glue("git push origin {tag}"))
|
1402d83a228f644c44ca44b1b21711d359ce6ca6
|
6964d8eb7cf8f9ed5abd612f6c2f0756877bca04
|
/R/embed_plot.R
|
e5ed65211ac5adb04d2aba7666a650eb155e63d5
|
[
"Unlicense"
] |
permissive
|
s-fleck/hammr
|
7a6805acc2f897c380b3f40d4e9112900646006d
|
b8fd5fa9d67698bc4c46ef48d079b0948a036387
|
refs/heads/master
| 2023-07-20T11:56:32.005037
| 2023-07-10T07:32:59
| 2023-07-10T07:32:59
| 119,056,265
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,370
|
r
|
embed_plot.R
|
#' Embed plot in html document
#'
#' Generates a png file from a plot and encodes it as a base64 string
#' encode it as a base64 string, and wraps that string in an html `<img>` tag.
#'
#' @param x a function that plots something or a ggplot object
#' @param img Logical. If `TRUE` result will be wrapped in an img tag
#' @param ... passed on to [shiny::plotPNG()]
#'
#' @return if img is `TRUE` a shiny.tag object, else a character scalar.
#' @export
#' @md
#' @importFrom graphics plot
#'
#' @examples
#'
#' \dontrun{
#'
#' embed_plot(function() plot(cars), width = 200, height = 200)
#'
#'
#' p <- ggplot(
#' cars,
#' aes(x = speed, y = dist)
#' ) +
#' geom_bar(stat = "identity")
#'
#' embed_plot(p, width = 200, height = 200)
#' }
#'
embed_plot <- function(x, img, ...){
assert_namespace("knitr")
assert_namespace("htmltools")
UseMethod("embed_plot")
}
#' @export
embed_plot.function <- function(x, img = TRUE, ...){
assert_namespace("shiny")
tf <- paste0(tempfile(), ".png")
shiny::plotPNG(filename = tf, func = x, ...)
res <- knitr::image_uri(tf)
if(isTRUE(img)){
return(htmltools::tags$img(src = res))
} else {
return(res)
}
}
#' @export
embed_plot.ggplot <- function(x, img = TRUE, ...){
assert_namespace("ggplot2")
tf <- paste0(tempfile(), ".png")
func = function() plot(x)
embed_plot(func, img = img, ...)
}
|
4e1ae3990301df7364821fadf75b915e0a149320
|
0f3df7f607d83626fd4bee17acf3a771c2734f6c
|
/cachematrix.R
|
6026f3e389a1029123a64fabcaaeeaebfc1aa4c7
|
[] |
no_license
|
saganot/ProgrammingAssignment2
|
3cba9f3c04093da31469af7fe649a7cbe22016a1
|
ac03dbeb95f3968a20448a51cdb90b8c560a50a8
|
refs/heads/master
| 2021-04-28T23:07:12.173717
| 2016-12-31T13:52:01
| 2016-12-31T13:52:01
| 77,740,851
| 0
| 0
| null | 2016-12-31T13:14:24
| 2016-12-31T13:14:23
| null |
UTF-8
|
R
| false
| false
| 1,389
|
r
|
cachematrix.R
|
## These two functions allow for the inverse of a matrix to be calculated once
## and cached, then read back from the cache whenever needed, so as to avoid
## costly recalculations.
## makeCacheMatrix creates a special "matrix" that is actually a list of functions.
## The purpose is to store the matrix in the global variable x, calculate its
## inverse and store it in global variable i, and read i back when necessary.
## "set" stores the matrix in global variable x and initializes the inverse.
## "get" reads the matrix from the global variable.
## "setInverse" sets the inverse that is returned by cacheSolve.
## "getInverse" gets the inverse from the global variable i.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setInverse <- function(inverse) i <<- inverse
getInverse <- function() i
list(set=set, get=get, setInverse=setInverse, getInverse=getInverse)
}
## cacheSolve gets the inverse from the "matrix" created by makeCacheMatrix.
## If the inverse has already been calculated, it reads it from the cache.
## If not, it calculates it for the first time and caches it using setInverse.
cacheSolve <- function(x, ...) {
i <- x$getInverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setInverse(i)
i
}
|
fa55fdc7f58bebd5f9249d44c6058df47fd670f4
|
cfb444f0995fce5f55e784d1e832852a55d8f744
|
/man/rnorm_pre.Rd
|
23ef082ce81e093bccc367d81119cec6fe394434
|
[
"MIT"
] |
permissive
|
debruine/faux
|
3a9dfc44da66e245a7b807220dd7e7d4ecfa1317
|
f2be305bdc6e68658207b4ad1cdcd2d4baa1abb4
|
refs/heads/master
| 2023-07-19T18:28:54.258681
| 2023-07-07T16:59:24
| 2023-07-07T16:59:24
| 163,506,566
| 87
| 15
|
NOASSERTION
| 2023-01-30T10:09:37
| 2018-12-29T11:43:04
|
R
|
UTF-8
|
R
| false
| true
| 967
|
rd
|
rnorm_pre.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rnorm_pre.R
\name{rnorm_pre}
\alias{rnorm_pre}
\title{Make a normal vector correlated to existing vectors}
\usage{
rnorm_pre(x, mu = 0, sd = 1, r = 0, empirical = FALSE, threshold = 1e-12)
}
\arguments{
\item{x}{the existing vector or data table of all vectors}
\item{mu}{desired mean of returned vector}
\item{sd}{desired SD of returned vector}
\item{r}{desired correlation(s) between existing and returned vectors}
\item{empirical}{logical. If true, mu, sd and r specify the empirical not population mean, sd and covariance}
\item{threshold}{for checking correlation matrix}
}
\value{
vector
}
\description{
\code{rnorm_pre} Produces a random normally distributed vector with the specified correlation to one or more existing vectors
}
\examples{
v1 <- rnorm(10)
v2 <- rnorm_pre(v1, 0, 1, 0.5)
cor(v1, v2)
x <- rnorm_multi(50, 2, .5)
x$y <- rnorm_pre(x, r = c(0.5, 0.25))
cor(x)
}
|
7487f5314f4de28fb468cad8c6c6728d669249bf
|
330a27c197664d05c9592b21d0bf2682b6ae094a
|
/MLOpsMonitoring/man/create_agg_prix_qty.Rd
|
ed581d3992bf4e1b5aa5fb402e0c6b0080808583
|
[] |
no_license
|
datastorm-open/demo_webinar_mlops
|
091140efe460c98b85789b8c70c11f81bd37e371
|
1dbd231478b84939460294464131b525b3d015f4
|
refs/heads/master
| 2023-08-03T10:34:59.647526
| 2023-07-20T09:21:35
| 2023-07-20T09:21:35
| 321,422,579
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 644
|
rd
|
create_agg_prix_qty.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataset.R
\name{create_agg_prix_qty}
\alias{create_agg_prix_qty}
\title{First part of features computing. Compute the average basket and other features.}
\usage{
create_agg_prix_qty(sub_data_agg, all_customers)
}
\arguments{
\item{sub_data_agg}{: \code{data.table}. A subset of the complete dataset(Use create_subset_data)}
\item{all_customers}{: \code{data.table}. List of all the considered customers.}
}
\value{
a data.table object
}
\description{
First part of features computing. Compute the average basket and other features.
}
\examples{
\dontrun{
TODO
}
}
|
ee55ff5167cc857bf8ce014cca5db3c7faa953e1
|
d48a6be6d855db72443aa767d680e13596e2a180
|
/RMark/R/import.chdata.R
|
34855472882fe813e26df80a52a1ea268438ec1a
|
[] |
no_license
|
jlaake/RMark
|
f77e79d6051f1abfd57832fd60f7b63540a42ab9
|
7505aefe594a24e8c5f2a9b0b8ac11ffbdb8a62d
|
refs/heads/master
| 2023-06-26T21:29:27.942346
| 2023-06-25T16:35:43
| 2023-06-25T16:35:43
| 2,009,580
| 17
| 15
| null | 2019-01-10T17:17:11
| 2011-07-06T23:44:02
|
R
|
UTF-8
|
R
| false
| false
| 6,449
|
r
|
import.chdata.R
|
#' Import capture-recapture data sets from space or tab-delimited files
#'
#' A relatively flexible function to import capture history data sets that
#' include a capture (encounter) history read in as a character string and an
#' arbitrary number of user specified covariates for the analysis.
#'
#' This function was written both to be a useful tool to import data and as an
#' example for more specific import functions that a user may want to write for
#' data files that do not satisfy the requirements of this function. In
#' particular this function will not handle files with fixed-width format files
#' that do not contain appropriate tab or space delimiters between the fields.
#' It also requires that the first field is the capture (encounter) history
#' which is named "ch" and is a character string. The remaining fields are
#' arbitrary in number and type and are user defined based on the arguments to
#' the functions. Variables that will be used for grouping should be defined
#' with the \code{field.type="f"}. Numeric individual covariates (e.g., weight)
#' should be input as \code{field.type="n"}. Fields in the file that should not
#' be imported should be assigned \code{field.type="s"}. The examples below
#' illustrate different uses of the calling arguments to import several
#' different data sets that meet the modest requirements of this function.
#'
#' If you specify a frequency for the encounter history, the field name must be
#' \code{freq}. If you use any other name or spelling it will not be
#' recognized and the default frequency of 1 will be used for each encounter
#' history. This function should not be used with files structured for input
#' into the MARK interface. To use those types of files, see
#' \code{\link{convert.inp}}. It is not neccessary to use either function to
#' create a dataframe for RMark. All you need to is create a dataframe that
#' meets the specification of the RMark format. For example, if you are
#' simulating data, you only need to create a dataframe with the fields ch,
#' freq (if differs from 1) and any covariates you want and then you can use
#' \code{\link{process.data}} on the dataframe.
#'
#' If you have comments in your data file, they should not have a column header
#' (field name in first row). If \code{use.comments=TRUE} the comments are
#' used as row names of the data frame and they must be unique. If
#' \code{use.comments=FALSE} and the file contains comments they are stripped
#' out.
#'
#' @param filename file name and path for file to be imported; fields in file
#' should be space or tab-delimited
#' @param header TRUE/FALSE; if TRUE first line is name of variables
#' @param field.names vector of field names if header=FALSE; first field should
#' always be ch - capture history remaining number of fields and their names
#' are arbitrary
#' @param field.types vector identifying whether fields (beyond ch) are numeric
#' ("n") or factor ("f") or should be skipped ("s")
#' @param use.comments if TRUE values within /* and */ on data lines are used
#' as row.names for the RMark dataframe. Only use this option if they are
#' unique values.
#' @return A dataframe for use in MARK analysis with obligate \code{ch}
#' character field representing the capture (encounter) history and optional
#' covariate/grouping variables.
#' @author Jeff Laake
#' @export
#' @seealso \code{\link{export.chdata}}
#' @keywords utility
#' @examples
#' \donttest{
#' # This example is excluded from testing to reduce package check time
#' pathtodata=paste(path.package("RMark"),"extdata",sep="/")
#' example.data<-import.chdata(paste(pathtodata,"example.data.txt",sep="/"),
#' field.types=c("n","f","f","f"))
#' edwards.eberhardt<-import.chdata(paste(pathtodata,"edwardsandeberhardt.txt",
#' sep="/"),field.names="ch",header=FALSE)
#' dipper<-import.chdata(paste(pathtodata,"dipper.txt",sep="/"),
#' field.names=c("ch","sex"),header=FALSE)
#' }
import.chdata <-
function(filename, header=TRUE, field.names=NULL, field.types=NULL, use.comments=TRUE)
{
#
# import.chdata - reads in capture history and user specified covariates for analysis of
# mark-recapture data.
#
# Arguments:
#
# filename - file name and path for file to be imported; fields in file should be space or tab-delimited
# header - TRUE/FALSE; if TRUE first line is name of variables
# fied.names - vector of field names if header=FALSE; first field should always be ch - capture history
# remaining number of fields and their names are arbitrary
# field.types - vector identifying whether fields (beyond ch) are numeric ("n") or factor ("f") or should be skipped ("s")
# use.comments - logical; if TRUE values within /* and */ on data lines are
# used as row.names for the RMark dataframe. Only use if
# they are unique values.
#
#
# Value: dataframe for use in MARK analysis with obligate ch field and optional covariate/grouping variables
#
#
strip.list=strip.comments(filename,use.comments=use.comments,header=header)
rn=strip.list$rn
out.filename=strip.list$out.filename
filename=out.filename
if(!is.null(field.names))header=FALSE
data=read.table(filename,colClasses=c("character"),header=header)
unlink(out.filename)
if(header & names(data)[1]!="ch") stop("First field should be named ch; Either first row doesn't contain field names or first field not named properly")
nvar=dim(data)[2]
#
# Assign field names if they are not the first line of the data file
#
if(!header)
if(nvar==length(field.names))
if(field.names[1]=="ch")
names(data)=field.names
else
stop("First field should be the capture-history and named ch in field.names")
else
stop("Length of field.names does not match number of columns in data")
#
# If field.types are specified create factor and numeric variables as assigned
# Otherwise presume they are all factors
#
if(nvar>1)
{
if(is.null(field.types)) field.types=rep("f",nvar-1)
for( i in 2:dim(data)[2])
{
if(field.types[i-1] =="f")
data[,i]=as.factor(data[,i])
else
if(field.types[i-1]=="n")
data[,i]=as.numeric(data[,i])
}
data=data[,!c(FALSE,field.types=="s")]
}
row.names(data)=rn
return(data)
}
|
8a629acef8c1287c3618460f6e26ab9fad5bb573
|
811de088d25e921b066b1074942860f63df4ec3e
|
/ui.R
|
c5139ae0a30d4add6e5922421cef3806d24b1557
|
[] |
no_license
|
cwhite1026/FBICrimeStats
|
2ac1364ce15899715b12008441e6b72d4b15c6b4
|
4c84f9ae214fa83720fa29aca031f9807d429f58
|
refs/heads/master
| 2021-01-10T23:05:04.701508
| 2016-10-09T19:08:31
| 2016-10-09T19:08:31
| 70,424,653
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,773
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# We want a main page with plots, plus tabs that show the data tables
# and some information about the data sets themselves.
shinyUI(
navbarPage("FBI 2015 Crime in the US Statistics",
tabPanel("Welcome",
fluidPage(
fluidRow(
column(8, offset=2,
h1("Welcome to the FBI Crime Statistics Plotter!", align="center"),
br(),
h2("Overview"),
p(paste("This tool was created for the Coursera Developing",
"Data Products course to visualize a subset of the",
"FBI Crime in the US (CIUS) murder statistics. ",
"The question the visualizations are intended to answer",
"are of the form 'What murder weapons are used in ",
"different circumstances?'")),
br(),
h2("The Data"),
p("The", a('FBI CIUS site', href='https://ucr.fbi.gov/crime-in-the-u.s/',
target='_blank'), "is the source of all of the data we use.",
paste("We specifically use the Expanded Homicide Data Table 11, Murders",
"by Circumstance and Weapon, from 2010 through 2015. ",
"'Circumstances' are the context for the murder, such as",
"robbery or gangland killings. The tables break down",
"the murders reported to the FBI for each circumstance",
"by the weapon used to commit them. Since reporting is ",
"not homogeneous, many of the murders fall into the 'Other'",
"or 'Unknown' categories.")),
br(),
h2("The 'Plot' Tab"),
p(paste("This tab allows you to plot up the murders by year, weapon, and",
"circumstance in stacked bar chart form. You have control ",
"over which values of each variable are shown and what part ",
"of the plot they correspond to.",
" The three aspects of the plots you have control over are")),
tags$ol(
tags$li("the way the data is broken into panels,"),
tags$li("the variable along the x-axis, and"),
tags$li("the fill of the bars in the stacked bar chart.")
),
p("The three variables that appear in the dataset are"),
tags$ol(
tags$li("the circumstance, meaning the context in which the murder was committed,"),
tags$li("the murder weapon, and"),
tags$li("the the year.")
),
p("Each of these can be assigned to any of the three aspects of the plot."),
h4("The 'Facet Variable'"),
p(paste("The Facet Variable is the variable that determines how the ",
"data is split into panels. For instance, if the facet",
"variable is 'Circumstances', then you could choose to show",
"murders commited during robberies in one panel and those",
"committed due to a brawl over narcotics in a second. If",
"the facet variable is 'Weapon', then you could show ",
"murders committed with handguns",
"in one panel and blunt objects in another.")),
p(paste("Once you've chosen a value for the facet variable, you can",
"choose which values of that variable are shown with the checkbox",
"menu just below the facet variable menu. By default, the",
"three values of variable with the highest total murder count",
"are chosen (excluding values with the words 'total' or 'other'",
"in them).")),
h4("The 'X-Axis' Variable"),
p(paste("The variable displayed along the x-axis. Pretty",
"self-explanatory. I've found that most often Year is the most",
"useful choice for this. You can choose the values for to show",
"on the x-axis in the checkbox menu below the x-axis variable",
"selection menu.")),
h4("The Fill Variable"),
p(paste("Once you have chosen the variables for the facets and x-axis,",
"the third variable is assigned to the fill. You can choose",
"what values of the fill variable in the checkbox menu below",
"'Fill by [fill variable]'. By default the top three values",
"are chosen, again excluding values containing 'total' or 'other.'")),
br(),
h2("The 'Data tables' Tab"),
p(paste("Clicking on this tab will give you the option to view the",
"Expanded Homicide Data Table 11 for any year from 2010-2015.",
"Each table is as it appears in the raw data, with the exception",
"of text formatting of the row and column names. Because the",
"table is wide, I have included the option to toggle whether or",
"not each column is displayed. If you click the 'Column Visibility'",
"button, a list of columns will appear, by default with all columns",
"toggled on. Clicking the column name toggles that column between",
"shown and hidden."))
)
)
)
),
tabPanel("Plot",
sidebarLayout(
sidebarPanel(
#Set up the controls for all of the aspects of the plot
#Start with the faceting properties- both the variable and the
#values of that variable to use
uiOutput("facetMenu"),
h4("Facet values"),
wellPanel(
style = "overflow-y:scroll; max-height: 200px",
uiOutput("facetValueMenu")
),
#Now figure out the x axis, variable and values
uiOutput("xAxisMenu"),
h4("Values to show on X-axis"),
wellPanel(
style = "overflow-y:scroll; max-height: 200px",
uiOutput("xAxisValueMenu")
),
#With the other two determined, the third variable is the fill.
#Get the values to show
h3(textOutput("fillNameText")),
wellPanel(
style = "overflow-y:scroll; max-height: 200px",
uiOutput("fillValueMenu")
)
),
# This is the main panel, which holds the plot.
mainPanel(
#textOutput("holder")
plotOutput("statPlot")
)
)
),
#The second panel will be the raw data that we're looking at
navbarMenu("Data tables",
tabPanel("2010 data",
h2("FBI CIUS data 2010"),
h3("Expanded Homicide Data Table 11, Murder by Circumstance and Weapon"),
# selectInput("dataTableYear", options=),
DT::dataTableOutput('dataTable10')
),
tabPanel("2011 data",
h2("FBI CIUS data 2011"),
h3("Expanded Homicide Data Table 11, Murder by Circumstance and Weapon"),
# selectInput("dataTableYear", options=),
DT::dataTableOutput('dataTable11')
),
tabPanel("2012 data",
h2("FBI CIUS data 2012"),
h3("Extended Homicide Table 11, Murder by Circumstance and Weapon"),
# selectInput("dataTableYear", options=),
DT::dataTableOutput('dataTable12')
),
tabPanel("2013 data",
h2("FBI CIUS data 2013"),
h3("Expanded Homicide Data Table 11, Murder by Circumstance and Weapon"),
# selectInput("dataTableYear", options=),
DT::dataTableOutput('dataTable13')
),
tabPanel("2014 data",
h2("FBI CIUS data 2014"),
h3("Expanded Homicide Data Table 11, Murder by Circumstance and Weapon"),
# selectInput("dataTableYear", options=),
DT::dataTableOutput('dataTable14')
),
tabPanel("2015 data",
h2("FBI CIUS data 2015"),
h3("Expanded Homicide Data Table 11, Murder by Circumstance and Weapon"),
# selectInput("dataTableYear", options=),
DT::dataTableOutput('dataTable15')
)
)
))
|
1a81c37c6abd90ba7e94c268402f543ad1feff1b
|
7864effa271e722a9f83105b3ee10e57b675e062
|
/plot4.R
|
0dbd388b260945aba90e615600394bd181d730b4
|
[] |
no_license
|
bwperlstein/Exploratory-Data-Course-Project-2
|
e88cfdc8fdde609de0b5a31ab6dbae28443f4f43
|
94bb809a062432a4ccd0cd6c9b80c5a39e668bd9
|
refs/heads/master
| 2020-08-08T07:23:04.103381
| 2019-10-08T23:46:34
| 2019-10-08T23:46:34
| 213,777,243
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,447
|
r
|
plot4.R
|
chgSCCtoCharacter <- function(dftemp) {
dftemp <- as_tibble(dftemp)
dftemp$SCC <- as.character(dftemp$SCC)
return(dftemp)
}
yrSelect <- function(df, year1) {
yrSelect.df <- subset(df, df$year == year1)
return(yrSelect.df)
}
getMedian <- function(dftemp) {
medtemp <- median(dftemp$Emissions, na.rm = TRUE)
return(medtemp)
}
plot4 <- function() {
## Initialize variables
dirnm <- "C:/Users/berna/Documents/R/Exploratory_Data_Analysis/Course Project 2"
filenm1 <- "summarySCC_PM25.rds"
filenm2 <- "Source_Classification_Code.rds"
outputnm <- "plot4.png"
med <- vector(mode = "numeric")
## Gather libraries and set diretory
library(dplyr)
setwd(dirnm)
## Read pm25 summary & source classification filea
summary.df <- readRDS(filenm1)
source.df <- readRDS(filenm2)
## Get vector of Coal source SCCs
source.df <- chgSCCtoCharacter(source.df)
SCC.coal <- subset(source.df, grepl("Coal", source.df$EI.Sector), 1)
## Keep just Emmisions and year columns
use.df <- select(summary.df, 2, 4, 6)
## Select only rows where the SCC represents a Coal source (in EI Sector)
coal.df <- subset(use.df, use.df$SCC %in% SCC.coal$SCC)
## Select Coal data by indivdiual year - 1999 & 2008
s1999.df <- yrSelect(coal.df, 1999L)
s2008.df <- yrSelect(coal.df, 2008L)
## Get range of medians for both years for y-axis plotting
med[1] <- getMedian(s1999.df)
med[2] <- getMedian(s2008.df)
rng <- range(med)
rngmax <- rng[2]
med1 <- med[1]
med2 <- med[2]
## Plot median for each year
x <- png(filename = outputnm)
plot(1999L, med1, xlab = "Year", ylab = "Emissions",
main = "Median Coal Emissions - 1999 vs. 2008",
xlim = c(1999L, 2008L), ylim = c(0, rngmax), pch = 19)
points(2008L, med2, pch = 19)
lines(c(1999L, 2008L), med, lwd = 2, col = "blue")
abline(h = min(rng), col = "black")
text(x = mean(c(1999, 2008)), y = (min(rng) - 0.01), cex = 1,
labels = paste("Black horizontal line at level of lower point", round(min(rng), 4)))
dev.off()
}
|
f924ac354430d8c31a65e32ca987c4e93801fef8
|
4b0cff5e09efd41994db11d589ef3069266ccce4
|
/man/oapply.Rd
|
7461ae62dbf44d8bf75d5e9a3b50cbcaf66d782e
|
[] |
no_license
|
cran/Jmisc
|
7d43070011ebd9b56327ca8704dacbeeb5e84c2c
|
0b141061bedc22bc9c7e7b6fa97dde67066f06a9
|
refs/heads/master
| 2022-07-13T05:12:17.641540
| 2022-06-22T04:53:25
| 2022-06-22T04:53:25
| 17,680,105
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 744
|
rd
|
oapply.Rd
|
\name{oapply}
\alias{oapply}
\title{Outer apply}
\usage{
oapply(X, Y, FUN, switch_order = FALSE, ...)
}
\arguments{
\item{X}{first argument to \code{FUN}}
\item{Y}{second argument to \code{FUN}}
\item{FUN}{a function to apply. See mapply}
\item{switch_order}{Switch the order of \code{X} and
\code{Y} in expand.grid}
\item{...}{other arguments to mapply}
}
\value{
same as mapply.
}
\description{
Outer apply It use the expand.grid to compute all possible
combination of \code{X} and \code{Y}, then call the mapply
with the combination generated and \code{FUN}.
}
\examples{
oapply(11:15,1:5,choose)
oapply(11:15,1:5,choose,switch_order=TRUE)
}
\author{
TszKin Julian Chan \email{ctszkin@gmail.com}
}
\seealso{
\link{mapply}
}
|
6b6899feb0e3b519c334e91261c319c8c9a7584a
|
c76d70620a863a0d1e2613d00276cd1a50831b8b
|
/train_TrCASAVA_script.R
|
ca967011896b8726469f2d63e291663a69a70b7a
|
[] |
no_license
|
zhanglabtools/CASAVA
|
7bf299241918bf930d6ada2b0c3654b75cd18022
|
f84e0327aec0bb6405a971530b7091e3c486d982
|
refs/heads/master
| 2023-02-04T05:56:11.757994
| 2020-12-26T09:39:41
| 2020-12-26T09:39:41
| 280,851,154
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,957
|
r
|
train_TrCASAVA_script.R
|
################################################################################
# last time modified: 2020/12/26
# This script is used to train TrCASAVA models in our experiment.
# The inputs file have been uploaded to https://zenodo.org/record/4365899#.X-b3CdgzaUk.
# See the folder /feature/ in 03_Disease_and_TrCASAVA_related.zip.
# Remark: It is recommend to use saved results in Zenodo.
################################################################################
source('CASAVA_library.R')
startTime <- format(Sys.time(), "%Y%m%d%H%M")
# 1) configuration
# change the following four lines according to your own
inputPath <- 'K:/project-four/CleanUp/data/Disease_and_TrCASAVA_related/feature/'
index <- 60 # Eye diseases
outName <- sprintf('BagXGB_temp_output_%s_%s.RData', startTime, index)
modelName <- sprintf('BagXGB_temp_model_%s_%s.RData', startTime, index)
# 2) preparation, get dtrain + dtransfer + test + label + group
trainName <- sprintf('%s/%s_train.RData', inputPath, index)
testName <- sprintf('%s/%s_test.RData', inputPath, index)
transferName <- sprintf('%s/%s_transfer.RData', inputPath, index)
load(trainName)
load(testName)
load(transferName)
trainLabel <- train[['label']]
train[['label']] <- NULL
train[['subset']] <- NULL
train[['group']] <- NULL
weight <- rep(1, length(trainLabel))
trainFeature <- as.matrix(train)
dtrain <- xgboost::xgb.DMatrix(trainFeature, label = trainLabel, weight = weight)
transLabel <- transfer[['label']]
transfer[['label']] <- NULL
weight <- rep(1, length(transLabel))
transFeature <- as.matrix(transfer)
dtransfer <- xgboost::xgb.DMatrix(transFeature, label = transLabel, weight = weight)
testLabel <- test[['label']]
group <- test[['group']]
test[['label']] <- NULL
test[['group']] <- NULL
test <- as.matrix(test)
# 3) train +test + evaluation
specificModel <- TrainBagXGB(dtrain, outName = modelName)
totalPosWeight <- sum(trainLabel) + sum(transLabel)
weight <- lapply(specificModel, predict, dtransfer)
weight <- colMeans(do.call(rbind, weight))
weight[transLabel == 0] <- 1
weight <- c(trainLabel * 4 + 1, weight)
label <- c(trainLabel, transLabel)
scale <- totalPosWeight / sum(weight[label == 1])
weight[label == 1] <- scale * weight[label == 1]
totalTrain <- xgboost::xgb.DMatrix(rbind(trainFeature, transFeature),
label = label,
weight = weight)
transModel <- TrainBagXGB(totalTrain, outName = modelName)
predictions <- PredictBagXGB(model = transModel, feature = test)
pre <- helpFunc(predictions, group)
lab <- helpFunc(testLabel, group)
evaluation <- MeasureAll(pre, lab)
evaluation <- evaluation[, c('AUC', 'AUPR')]
evaluation <- colMeans(evaluation)
# 4) Save out
predictions <- pre
labels <- lab
fileList <- c('specificModel', 'transModel', 'predictions', 'labels', 'evaluation')
save(list = fileList, file = outName)
|
e89d32ebfe6cef50caa282bf9f41dfcea5bbd751
|
81de910c2709361dff0f83cdf087a47ef4e31818
|
/figures/Figure3/11_counts_bonf.R
|
e6c5eef7cee315e065959f9366a1ec241bdcf931
|
[] |
no_license
|
fl-yu/singlecell_bloodtraits
|
1fdfb4edeac23a3763199f377a7115e9541fa5df
|
1da2a246b6f1ad13e8bfcbd424080f8ea5917c86
|
refs/heads/master
| 2022-02-05T10:02:17.033827
| 2019-07-09T22:38:39
| 2019-07-09T22:38:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,139
|
r
|
11_counts_bonf.R
|
library(dplyr)
df <- readRDS("allEnrichments-df.rds")
# Equivalent statistics
p = 0.0001736111
Z = 3.75
chisq = 14.1
allstats <- data.frame(
Yes = c(sum(df$lineageSpecific& df$ldscore_pvalue < p), sum(df$lineageSpecific& df$chromVAR_pvalue < p),
sum(df$lineageSpecific& df$gchromVAR_pvalue < p), sum(df$lineageSpecific& df$panHemeLDSR_pvalue < p),
sum(df$lineageSpecific& df$goShifter_pvalue < p), sum(df$lineageSpecific& df$GPA_chisq > chisq),
sum(df$lineageSpecific& df$gregor_pvalue < p), sum(df$lineageSpecific& df$fgwas_z > Z)
),
No = c(sum(!df$lineageSpecific& df$ldscore_pvalue < p), sum(!df$lineageSpecific& df$chromVAR_pvalue < p),
sum(!df$lineageSpecific& df$gchromVAR_pvalue < p), sum(!df$lineageSpecific& df$panHemeLDSR_pvalue < p),
sum(!df$lineageSpecific& df$goShifter_pvalue < p), sum(!df$lineageSpecific& df$GPA_chisq > chisq),
sum(!df$lineageSpecific& df$gregor_pvalue < p), sum(!df$lineageSpecific& df$fgwas_z > Z)
)
)
rownames(allstats) <- c("LD score", "chromVAR", "gchromVAR", "adjLDS", "goShifter", "GPA", "GREGOR", "FGWAS")
allstats
|
b344ddaff2f27029d09c063c153633778a608456
|
48516682819308cb1ebc92edce2c1c144744cc2f
|
/scripts_for_HM_ancestral_tests/0_plate_arrangementsDAS.R
|
cefb2fc8dfe519602da0287e6e385db4851df1ad
|
[] |
no_license
|
Landrylab/Gene_duplication_2019
|
139c8b2ca0af251cfe4398ab8d11191ef0c7346b
|
af5cacc44768bcccb952ba9ce27ba7ddb707b008
|
refs/heads/master
| 2020-05-02T07:44:58.301178
| 2019-03-26T15:48:36
| 2019-03-26T15:48:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,747
|
r
|
0_plate_arrangementsDAS.R
|
#install.packages("tidyr")
#install.packages("reshape")
library(tidyr)
library(reshape)
library(dplyr)
library(xlsx)
rm(list=ls())
setwd("C:/Users/Diana Ascencio/Dropbox/Project_HeteroHomodimers/pca/array_files/")
dhfr12 <- read.xlsx("DIAS_dest_array_12_20180920.xls",sheetIndex = 1,startRow = 2,stringsAsFactors=FALSE)
# rearrangenment matrix
plt<- matrix(1:1536, nrow = 32,ncol = 48)
colm <- seq(1,48,2)
rowm <- seq(1,32,2)
A <- plt[rowm,colm]
B <- plt[rowm,colm+1]
C <- plt[rowm+1,colm]
D <- plt[rowm+1,colm+1]
temp <- NULL
for (c in 1:48) {
for (r in 1:32) {
temp <- rbind(temp, c(c,r))
}
}
pl1536 <- temp
a <- as.vector(A)
indA <- cbind(a,pl1536[a,])
colnames(indA) <- c("no1536","c1536","r1536")
a <- as.vector(B)
indB <- cbind(a,pl1536[a,])
colnames(indB) <- c("no1536","c1536","r1536")
a <- as.vector(C)
indC <- cbind(a,pl1536[a,])
colnames(indC) <- c("no1536","c1536","r1536")
a <- as.vector(D)
indD <- cbind(a,pl1536[a,])
colnames(indD) <- c("no1536","c1536","r1536")
# Generate collection index -----------------------------------------------
# generate the plate for my DHFR[1,2] strains
pl <- rep("A",384)
pltA <- cbind(indA,dhfr12,pl)
pl <- rep("B",384)
pltB <- cbind(indB,dhfr12,pl)
pl <- rep("C",384)
pltC <- cbind(indC,dhfr12,pl)
pl <- rep("D",384)
pltD <- cbind(indD,dhfr12,pl)
tp <- rbind(pltA,pltB,pltC,pltD)
plt_dhfr12 <- arrange(tp,no1536)
write.csv(plt_dhfr12,file = "plate_dhfr12.csv", quote=F, row.names=F)
index1536 <- tp %>% select(no1536,c1536,r1536,pl)
# generate the plate for my DHFR[3] strains
p1 <- read.xlsx("DIAS_dest_array1_3_20180920.xls",sheetIndex = 1,startRow = 5,stringsAsFactors=FALSE)
p2 <- read.xlsx("DIAS_dest_array2_3_20180920.xls",sheetIndex = 1,startRow = 5,stringsAsFactors=FALSE)
p3 <- read.xlsx("DIAS_dest_array3_3_20180920.xls",sheetIndex = 1,startRow = 5,stringsAsFactors=FALSE)
p4 <- read.xlsx("DIAS_dest_array4_3_20180920.xls",sheetIndex = 1,startRow = 5,stringsAsFactors=FALSE)
p5 <- read.xlsx("DIAS_dest_array5_3_20180920.xls",sheetIndex = 1,startRow = 5,stringsAsFactors=FALSE)
p6 <- read.xlsx("DIAS_dest_array6_3_20180920.xls",sheetIndex = 1,startRow = 5,stringsAsFactors=FALSE)
#plates 1-3 dhfr[3]
pl1 <- rbind(p1,p2,p3,p4)
pl2 <- rbind(p5,p6,p1,p2)
pl3 <- rbind(p3,p4,p5,p6)
plt1_dhfr3 <- cbind(index1536,pl1) %>% arrange(no1536) %>%
select(no1536,c1536,r1536,c,r,Gene,pl) %>%
mutate(Gene = ifelse(Gene == "Boder", "border",Gene)) %>%
mutate(Gene = ifelse(Gene == "NA", "blank",Gene))
plt2_dhfr3 <- cbind(index1536,pl2) %>% arrange(no1536)%>%
select(no1536,c1536,r1536,c,r,Gene,pl) %>%
mutate(Gene = ifelse(Gene == "Boder", "border",Gene)) %>%
mutate(Gene = ifelse(Gene == "NA", "blank",Gene))
plt3_dhfr3 <- cbind(index1536,pl3) %>% arrange(no1536)%>%
select(no1536,c1536,r1536,c,r,Gene,pl) %>%
mutate(Gene = ifelse(Gene == "Boder", "border",Gene)) %>%
mutate(Gene = ifelse(Gene == "NA", "blank",Gene))
# plates after mating
plate1 <- left_join(plt_dhfr12,plt1_dhfr3, by = c("no1536","c1536","r1536","c","r", "pl")) %>%
select(c(1:5,7,6,8)) %>% mutate(plate = "plate_1")
colnames(plate1)[7:8] <- c("dhfr12","dhfr3")
plate2 <- left_join(plt_dhfr12,plt2_dhfr3, by = c("no1536","c1536","r1536","c","r", "pl")) %>%
select(c(1:5,7,6,8)) %>% mutate(plate = "plate_2")
colnames(plate2)[7:8] <- c("dhfr12","dhfr3")
plate3 <- left_join(plt_dhfr12,plt3_dhfr3, by = c("no1536","c1536","r1536","c","r", "pl")) %>%
select(c(1:5,7,6,8)) %>% mutate(plate = "plate_2")
colnames(plate3)[7:8] <- c("dhfr12","dhfr3")
all_plates <- rbind(plate1,plate2,plate3)
write.csv(plate1,file = "plate1.csv", quote=F, row.names=F)
write.csv(plate2,file = "plate2.csv", quote=F, row.names=F)
write.csv(plate2,file = "plate3.csv", quote=F, row.names=F)
write.csv(plt1_dhfr3,file = "plate1_dhfr3.csv", quote=F, row.names=F)
write.csv(plt2_dhfr3,file = "plate2_dhfr3.csv", quote=F, row.names=F)
write.csv(plt3_dhfr3,file = "plate3_dhfr3.csv", quote=F, row.names=F)
write.csv(all_plates, file = "all_plates.csv", quote=F, row.names=F)
# Check if the array is correct -------------------------------------------
tim12 <- grep("YER062C", x = all_plates$dhfr12)
tim3 <- grep("YER062C", x = all_plates$dhfr3)
tim <- intersect(tim12,tim3)
all_plates[tim,]
tp <- grep("23_5_D", all_plates$tag)
crim <- intersect(rm,cm)
|
916f6d23cce14fef0e2d558ba5585a391334f180
|
b820ecdf40c5982d4f30b26afce5bd448fde50c7
|
/R/get_recent_spread.R
|
d2628a0af1fd4762dc9da9afa7ef518472c06a97
|
[] |
no_license
|
mstei4176/krakenR
|
88d0e7db3fe9641ea386732fcc8342df8faff40a
|
7b7405a9b549b6669703c0916735ec9bf6136d1d
|
refs/heads/master
| 2021-04-28T13:30:03.223158
| 2017-12-10T19:57:16
| 2017-12-10T19:57:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 260
|
r
|
get_recent_spread.R
|
# https://api.kraken.com/0/public/Spread
#
get_recent_spread <- function(pair = "XBTEUR") {
base_url <- "https://api.kraken.com/0/public/Spread"
url <- paste0(base_url, "?", "pair=", pair)
spread_out <- jsonlite::fromJSON(url)
return(spread_out)
}
|
8d09e23e0b2dc9c1b9de0918b862c691aa5e6bb8
|
4a7718b5618d75bdcfb3fb71324569d0d11ac749
|
/R/lFC_in_time.R
|
5aaa61d26cbfade8835f28826b5d3225d2a65b55
|
[] |
no_license
|
EwaMarek/FindReference
|
859676f1744ea2333714634fd420d6b91b367956
|
eedc8c80809b6f3e4439999bac4cb09ec2b228f2
|
refs/heads/master
| 2018-08-01T08:39:05.326467
| 2018-06-02T17:05:06
| 2018-06-02T17:05:06
| 104,148,930
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,146
|
r
|
lFC_in_time.R
|
# na wejscie: symbole genow do wyrysowania, EntrezID wszystkich genow wystepujacych na mikromacierzach, info o wszytkich probkach IR, FC_data,
# ranking, skala: linear, log, czy dotyczy miRNA
# na wyjscie: wykres
#' @title Plot fold change in time
#'
#' @param genes_to_valid Character vector with gene symbols or MIMAT ids (for miRNA) which fold change in time should be plotted.
#'
#' @param dane_ranking_z_scores A list - output of \code{create_ranking} function.
#'
#' @param scale A character indicating the scale of plot. Could be 'linear' for fold change or 'log' for logarithmic fold change.
#' Default value is 'linear'.
#'
#' @param miRNA Logical indicating if plot is created for gene or miRNA ranking.
#'
#' @return Function returns a ggplot object which could be plotted with \code{plot} function as in the examples.
#'
#' @description
#' \code{lFC_in_time} function could be used to plot fold change in time for chosen genes or miRNAs by gene symbol or MIMAT ids respectively.
#'
#' @seealso
#' \code{\link{create_ranking}}
#'
#' @examples
#' \dontrun{
#' ##### Create stability ranking for genes
#'
#' # download data from ArrayExpress database
#' to_download = c("E-GEOD-67309", "E-MTAB-966")
#' my_data = downloadAE(to_download, getwd())
#'
#' # load data
#' platforms = c("Affymetrix", "Agilent")
#' loaded_data = load_multi_data(my_data, platforms)
#'
#' # normalize and annotate
#' norm_data = multi_norm_and_annot(loaded_data$raw_expression_data, platforms)
#'
#' # prepare tables for rep_elim function as shown in details
#' path_to_tables = system.file("inst/extdata", "tables_ex3.rds", package = "FindReference")
#' my_tables = readRDS(path_to_tables)
#'
#' # eliminate replications and prepare object for create_ranking function
#' no_rep_data = rep_elim(norm_data, my_tables)
#'
#' # create ranking
#' gene_ranking = create_ranking(no_rep$noRepData, no_rep$uniqSamples, miRNA = FALSE)
#'
#' # plot fold change in time for some genes
#' genes_to_plot = c('GAPDH', 'ACTB', 'LYPLA2', 'B2M', 'TP53')
#' genes_FC_in_time = lFC_in_time(genes_to_plot, gene_ranking)
#'
#' ##### Create stability ranking for miRNAs
#'
#' # download data from ArrayExpress database
#' datamiRNA = downloadAE("E-MTAB-5197", "/home/emarek/")
#'
#' # prepare table as shown in details load_miRNA help page
#' path_to_table = system.file("inst/extdata", "miRNA_ex1.rds", package = "FindReference")
#' my_table = readRDS(path_to_table)
#'
#' # load data
#' loaded_data = load_miRNA(my_table, datamiRNA[[1]]$path)
#'
#' # normalize and annotate data
#' norm_data = norm_and_annot_miRNA(loaded_data)
#'
#' # eliminate replications and prepare object for create_ranking function
#' no_rep_data = rep_elim(norm_data, my_table)
#'
#' # create ranking
#' miRNA_ranking = create_ranking(no_rep$noRepData, no_rep$uniqSamples, miRNA = TRUE)
#'
#' # plot fold change in time for the most stable miRNAs
#' miRNA_to_plot = miRNA_ranking$miRNA_ranking[1:9, 'ID']
#' genes_FC_in_time = lFC_in_time(miRNA_to_plot, miRNA_ranking, scale = 'linear', miRNA = TRUE)
#' }
#'
#' @rdname lFC_in_time
#'
#' @importFrom reshape2 melt
#' @importFrom stats quantile
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 aes
#' @importFrom ggplot2 stat_summary
#' @importFrom ggplot2 theme_bw
#' @importFrom ggplot2 ggtitle
#' @importFrom ggplot2 xlab
#' @importFrom ggplot2 ylab
#' @importFrom ggplot2 facet_wrap
#'
#'
#' @export
lFC_in_time = function(genes_to_valid, dane_ranking_z_scores, scale = 'linear', all_uniq_samples, miRNA=FALSE){
#############################################################################
######################## Take elements from a list ##########################
#############################################################################
if(miRNA == FALSE){
Gene_EntrezId = dane_ranking_z_scores$EntrezId
W_Gene_Ranking_stat = dane_ranking_z_scores$GeneRanking
}else{
W_Gene_Ranking_stat = dane_ranking_z_scores$miRNA_ranking
Gene_EntrezId = dane_ranking_z_scores$MIMATids
}
all_IRsamples = dane_ranking_z_scores$samples
FC_data = dane_ranking_z_scores$FC_data
#############################################################################
################## Create matrix with log fold change data ##################
#############################################################################
lFC_matrix = array(NA, dim = c(length(Gene_EntrezId), length(all_IRsamples)))
rownames(lFC_matrix) = Gene_EntrezId
colnames(lFC_matrix) = all_IRsamples
for (i in 1:length(FC_data)) {
if(class(FC_data[[i]]) == 'matrix'){
#CN = strsplit(colnames(lFC_matrix), " ")
cols = which(colnames(lFC_matrix) %in% paste(colnames(FC_data[[i]]), names(FC_data[i]), sep = " "))
lFC_matrix[rownames(FC_data[[i]]), cols] = FC_data[[i]][, unlist(lapply(strsplit(colnames(lFC_matrix)[cols], " "), "[[", 1))]
}else if(class(FC_data[[i]]) == 'list'){
for (j in 1:length(FC_data[[i]])) {
#CN = unlist(strsplit(colnames(lFC_matrix), " "))
cols = which(colnames(lFC_matrix) %in% paste(colnames(FC_data[[i]][[j]]), names(FC_data[i]), sep = " "))
#cols = which(colnames(lFC_matrix) %in% colnames(FC_data[[i]][[j]]))
lFC_matrix[rownames(FC_data[[i]][[j]]), cols] = FC_data[[i]][[j]][, unlist(lapply(strsplit(colnames(lFC_matrix)[cols], " "), "[[", 1))]
#lFC_matrix[rownames(FC_data[[i]][[j]]), cols] = FC_data[[i]][[j]][, colnames(lFC_matrix)[cols]]
}
}
}
#############################################################################
############## For genes change entrez ids into gene symbols ################
#############################################################################
if(miRNA==FALSE){
EG2SYM = AnnotationDbi::as.list(org.Hs.egSYMBOL)
Symbols = unlist(EG2SYM[Gene_EntrezId])
Symbols = Symbols[Gene_EntrezId]
rownames(lFC_matrix) = Symbols
}
#############################################################################
######## Extract rows chosen by user and add info about experiment ##########
#############################################################################
### change all unique samples structure - add elements names
tablesNames = vector(mode = "character", length(all_uniq_samples))
for(i in 1:length(all_uniq_samples)){
if(class(all_uniq_samples[[i]]) == 'data.frame'){
tablesNames[i] = as.character(all_uniq_samples[[i]]$Experiment[1])
}else if(class(all_uniq_samples[[i]]) == 'list'){
tablesNames[i] = as.character(all_uniq_samples[[i]][[1]]$Experiment[1])
}else{
tablesNames[i] = "NA"
}
}
names(all_uniq_samples) = tablesNames
### extract information
lFC_vs_time = melt(lFC_matrix[as.character(genes_to_valid), ])
cells = vector(mode="character", length = dim(lFC_vs_time)[1])
dose = vector(mode="character", length = dim(lFC_vs_time)[1])
Time = vector(mode="character", length = dim(lFC_vs_time)[1])
Exp = vector(mode="character", length = dim(lFC_vs_time)[1])
for(i in 1:dim(lFC_vs_time)[1]){
samp = as.character(lFC_vs_time[i,2])
uniTab = all_uniq_samples[[strsplit(samp, " ")[[1]][2]]]
if(class(uniTab) == 'data.frame'){
currRow = uniTab[which(uniTab$internalId == strsplit(samp, " ")[[1]][1]),]
cells[i] = as.character(currRow$CellLine)
dose[i] = as.character(currRow$Dose)
Time[i] = as.character(currRow$Time)
Exp[i] = as.character(currRow$Experiment)
}else if(class(uniTab) == 'list'){
if(grepl("A", strsplit(samp, " ")[[1]][1])){
currRow = uniTab[[1]][which(uniTab[[1]]$internalId == strsplit(samp, " ")[[1]][1]),]
}else{
currRow = uniTab[[2]][which(uniTab[[2]]$internalId == strsplit(samp, " ")[[1]][1]),]
}
cells[i] = as.character(currRow$CellLine)
dose[i] = as.character(currRow$Dose)
Time[i] = as.character(currRow$Time)
Exp[i] = as.character(currRow$Experiment)
}
}
# add info to main table
lFC_vs_time$cells = cells
lFC_vs_time$dose = dose
lFC_vs_time$Time = Time
lFC_vs_time$exp = Exp
#lFC_vs_time$gene = Genes[as.character(lFC_vs_time$Var1),]$symbol
ranks = genes_to_valid
if(miRNA==FALSE){
col_with_names = which(colnames(W_Gene_Ranking_stat) == 'symbol')
}else{
col_with_names = 1
}
for(i in 1:length(genes_to_valid)){
ranks[i] = W_Gene_Ranking_stat[which(W_Gene_Ranking_stat[,col_with_names] == genes_to_valid[i]), 'rank']
}
lFC_vs_time$label = paste(genes_to_valid, 'rank:', ranks ,sep = ' ')
lFC_vs_time$linear = 2^(lFC_vs_time$value)
#############################################################################
################ Calculate additional parametres for ggplot #################
#############################################################################
# ile wierszy na wykresie
N_rows = round(sqrt(length(genes_to_valid)))
# funkcje do wyznaczenia gornego i dolnego kwartyla
median.quartile1 <- function(x){
out = quantile(x, probs = 0.25)
return(out)
}
median.quartile3 <- function(x){
out = quantile(x, probs = 0.75)
return(out)
}
# ustalenie kolejnosci wykresow
lev_tab = data.frame(pre_label=unique(lFC_vs_time$label))
rozdzielone = strsplit(as.character(lev_tab$pre_label), ': ', fixed=TRUE)
lev_tab$num = lapply(rozdzielone, function(x){as.numeric(x[[2]])})
lev_tab = lev_tab[order(unlist(lev_tab$num)),]
lFC_vs_time$label2 = factor(lFC_vs_time$label, levels = as.character(lev_tab$pre_label))
#############################################################################
#################### Create plot in scale chosen by user ####################
#############################################################################
if(scale=='linear'){
lFC_vs_time = lFC_vs_time[which(is.na(lFC_vs_time$linear) == FALSE),]
wykres = ggplot(lFC_vs_time, aes(x=Time, y=linear, group = 1))+
stat_summary(geom = 'ribbon', fun.ymin = median.quartile1, fun.ymax = 'median', colour = 'blue', alpha=0.5)+
stat_summary(geom = 'ribbon', fun.ymin = 'median', fun.ymax = median.quartile3, colour = 'blue', alpha=0.5)+
stat_summary(fun.y = "median", colour = "red", size = 0.5, geom = "line")+
theme_bw() +
ggtitle("Median, 1. i 3. quartile of fold-change value in time") +
xlab('Time [h]') +
ylab('Fold change')+
facet_wrap( ~ label2, nrow=N_rows)
}else if(scale=='log'){
lFC_vs_time = lFC_vs_time[which(is.na(lFC_vs_time$value) == FALSE),]
wykres = ggplot(lFC_vs_time, aes(x=Time, y=value, group = 1))+
stat_summary(geom = 'ribbon', fun.ymin = median.quartile1, fun.ymax = 'median', colour = 'blue', alpha=0.5)+
stat_summary(geom = 'ribbon', fun.ymin = 'median', fun.ymax = median.quartile3, colour = 'blue', alpha=0.5)+
stat_summary(fun.y = "median", colour = "red", size = 0.5, geom = "line")+
theme_bw() +
ggtitle("Median, 1. i 3. quartile of fold-change value in time") +
xlab('Time [h]') +
ylab('Fold change')+
facet_wrap( ~ label2, nrow=N_rows)
}
return(wykres)
}
|
693754154abd9913126653e5ca9ca1d7393a1b84
|
f6c00e4ca190a03309d606553c84a2606fda4582
|
/supplementary_info/prisma_diagram.R
|
e57edaefaa40d93f2abef46d8fe7daec01e4e8a9
|
[] |
no_license
|
mslein/therm_var_meta_analysis
|
516e9771c64f714ceb42ada9e9cd7f5fea077724
|
88ca128db82c0dea2e6897918636b9cb12545e47
|
refs/heads/main
| 2023-04-19T00:15:44.205995
| 2023-01-16T22:48:50
| 2023-01-16T22:48:50
| 467,272,388
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,422
|
r
|
prisma_diagram.R
|
pacman::p_load(tidyverse)
prisma <- read_csv("litsearch_subgroups14sept21 copy.csv") %>%
mutate(notes_tidy = case_when(notes %in% c("background", "review",
"review/synthesis", "book") ~ "reviews/background",
notes %in% c("modeling", "modelling") ~ "modeling",
notes %in% c("no constant",
"no constant/flux",
"no constant/fux",
"no flux pattern",
"no flux treatment",
"no flux trt") ~ "no flux/constant",
notes %in% c("no error",
"no error reported") ~ "no error",
notes %in% c("non-biologically relevant",
"not extractable") ~ "not relevant",
notes %in% c("non-lab") ~ "uncontrolled var",
notes %in% c("non-english", "paywall") ~ "non-english",
TRUE ~ "greater diff"))
tally <- count(prisma, notes_tidy)
sum(tally$n)
|
f273d947534d41bba682eba5c673ebd1fe0a9886
|
e0f1cbfce20607ae9d771a65aeed59b2d9fc7a5b
|
/mod-metr.R
|
f32220b31542e01ef9f52e0107ebd8b61dd022ff
|
[] |
no_license
|
DrRoad/demo-shiny-modules
|
2a39d6dde57390114a25dba52d48b3f1ef112f0a
|
12d9632a462b5c42a06f19698f20fe025efcbcb1
|
refs/heads/master
| 2022-11-20T04:39:07.458452
| 2020-07-26T17:25:50
| 2020-07-26T17:25:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 608
|
r
|
mod-metr.R
|
# metric module ----
metric_ui <- function(id) {
fluidRow(
text_ui(NS(id, "metric")),
plot_ui(NS(id, "metric"))
)
}
metric_server <- function(id, df, vbl, threshhold) {
moduleServer(id, function(input, output, session) {
text_server("metric", df, vbl, threshhold)
plot_server("metric", df, vbl, threshhold)
})
}
metric_demo <- function() {
df <- data.frame(day = 1:30, arr_delay = 1:30)
ui <- fluidPage(metric_ui("x"))
server <- function(input, output, session) {
metric_server("x", reactive({df}), "arr_delay", 15)
}
shinyApp(ui, server)
}
|
8e9c49654977e642fb2b007786746ccbaa2d4a45
|
c9c6aed13ac8d59c59dd1df61a8a9a497c7b16a9
|
/eco_model.r
|
4b36b44c5d83ad34c921a14111cfa7c4a9a68de2
|
[
"MIT"
] |
permissive
|
mpdannenberg/geog-4470
|
a795e4b9493bc67b798cdc4d4aad870062a853df
|
93bcc6a42ac52ec1d0c39cae274926994dfabefa
|
refs/heads/master
| 2020-12-10T16:29:43.893652
| 2020-06-23T14:54:41
| 2020-06-23T14:54:41
| 233,647,868
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,608
|
r
|
eco_model.r
|
## Functions for ecological model
date2doy <- function(yr, mo, dy){
yr <- as.character(yr)
mo <- as.character(mo)
dy <- as.character(dy)
dt <- paste(c(yr,'-',mo,'-',dy), collapse='')
doy <- strftime(dt, format='%j')
return(doy)
}
define_global_variables <- function(){
# Site constants
PAR2SWIR <<- 0.43 # PAR to shortwave radiation ratio
STD_MERAD <<- -75.0 # Time zone longitude
LATI <<- 35.9736 # Site latitude (CHANGE TO MMS)
LONGI <<- -79.1004 # Site longitude (CHANGE TO MMS)
hs <<- 20.0 # Screen height
h2 <<- 16.2 # Default canopy height
PA <<- 101325.0 # Air pressure in Pa
CP <<- 1010.0 # Specific heat of air (J kg-1 K-1)
CA <<- 410.0 # Atmospheric CO2 concentration (ppm)
# Physical constants
SO <<- 1367.0 # Solar constant (W m-2)
# Leaf constants
alpha_par <<- 0.80
alpha_nir <<- 0.20
rho_s <<- 0.10
x <<- 1.0 # Leaf angle distribution parameter
omega <<- 1.0 # Clumping index
m <<- 8.0 # Ball-Berry model slope
g1_blayer <<- 0.005 # Leaf boundary layer conductance in m/s
g0 <<- 0.010 # Parameter in Leuning's gs function in mol m-2 s-1
# Soil constants
Ksat <<- 0.6 # Saturated conductance (m day-1)
wilting_p <<- 0.08 # Wilting poit soil water content (unitless)
porosity <<- 0.54 # Porosity at depth zero (unitless)
p1 <<- 0.478 # Air entry pressure in meters of water
p2 <<- 0.186 # Pore size index (unitless)
p_0 <<- 4000.0 # Porosity decay in m-1
m_z <<- 0.24 # Saturated conductivity decay parameter (in m-1)
soil_depth <<- 0.325 # Soil depth in rooting zone
}
canopy_aerodynamic_resistance <- function(u_h, h, h_o){
h_u <- 2.0
cn <- 0.002 # wind attenuation coefficient
# Compute zero plane displacement
d_o <- 0.7 * h_o
# Compute the roughness length
zo_o <- 0.1 * h_o
u_o <- compute_toc_wind(u_h, h, h_o)
u_m <- (u_o * exp(cn * (0.5 * h_o)/h_o - 1))
u_m <- sapply(u_m, function(x) max(x,0))
if(h<d_o) h <- d_o + 0.0001
ra <- (log((h_o-d_o)/zo_o)/0.41)^2 / u_m
ra[u_m==0] <- 9999999
return(ra)
}
canopy_bwb_stomatal_conductance <- function(psn, Tair, a1, vpda){
svp <- 610.7 * exp(17.38 * Tair / (239.0+Tair))
rh <- 1.0 - vpda/svp
gc <- g0 + a1 * psn * rh/CA
# convert mol m-2 s-1 to m s-1
gc <- gc * (22.4 / 1000.0)
# convert gc (Co2) to gs (H2O)
gs <- 1.56 * gc
return(data.frame(gc=gc, gs=gs))
}
compute_bwb_farq_psn <- function(psnp, apar_wm2, Tair, vpda, fstheta, leaftype){
svp <- 610.7 * exp(17.38 * Tair / (239.0 + Tair))
rh <- 1.0 - vpda/svp
apar <- apar_wm2*4.55
Ca <- CA
# calculate atmospheric O2 in Pa, assuming 21% O2 by volume
O2 <- psnp$O2
Ko <- psnp$Ko
# Convert Pa to ppm
Kc <- psnp$Kc * 1e6/PA
gamma <- psnp$gamma * 1e6/PA
ppe <- psnp$ppe
if(leaftype==1){
Vmax <- psnp$Vmax_sunlit
Rd <- psnp$Rd_sunlit
} else {
Vmax <- psnp$Vmax_shaded
Rd <- psnp$Rd_shaded
}
Jmax <- 2.1 * Vmax
g1 <- 0.01 + m * rh/Ca
K <- Kc * (1.0 + O2/Ko)
# Calculate rubisco-limited assimilation rate
aa <- Ca*g1 + g1*K - 1.0
bb <- Ca*g0 + g0*K - Vmax*Ca*g1 + Vmax + Vmax*g1*gamma + Rd*Ca*g1 - Rd + Rd*g1*K
cc = -Vmax*Ca*g0 + Vmax*gamma*g0 + Rd*Ca*g0 + Rd*K*g0
det <- bb*bb - 4.0*aa*cc
Av <- (-bb + sqrt(det)) / (2.0*aa)
Av[det<0] <- -1.0
# Calculate RuBP regeneration limited assimilation rate
aa <- 0.7
bb <- -Jmax - (apar/ppe)
cc <- Jmax * apar/ppe
J <- (-bb - sqrt(bb*bb - 4*aa*cc)) / (2.0/aa)
aa <- 4.5*Ca*g1 - 4.5 + 10.5*gamma*g1
bb <- 4.5*Ca*g0 + 10.5*gamma*g0 - J*g1*Ca + J + gamma*g1*J + 4.5*Rd*Ca*g1-4.5*Rd+10.5*gamma*g1*Rd
cc <- -J*Ca*g0+4.5*Rd*Ca*g0+10.5*gamma*g0*Rd
det <- bb*bb - 4.0*aa*cc
Aj <- (-bb+sqrt(det))/(2.0*aa)
Aj[det<0] <- -1.0
# Calculate net assimilation rate
A = pmin(Aj,Av)
A[Av == -1.0 & Aj == -1.0] <- 0.0
A[Av == -1.0 & Aj != -1.0] <- Aj[Av == -1.0 & Aj != -1.0]
A[Aj == -1.0 & Av != -1.0] <- Av[Aj == -1.0 & Av != -1.0]
return(A)
}
compute_canopy_intercepted_shortwave_rad <- function(par_D, par_d, nir_D, nir_d, lai, Kb, Kd){
Q_sc_par <- par_D*(exp(-Kb*sqrt(alpha_par)*lai)-exp(-Kb*lai))/2.0
Q_d_par <- par_d*(1.0-exp(-Kd*sqrt(alpha_par)*lai))/(Kd*sqrt(alpha_par)*lai)
Q_par_soil <- par_D*exp(-Kb*sqrt(alpha_par)*lai)+par_d*exp(-Kd*sqrt(alpha_par)*lai)
Q_nir_soil <- nir_D*exp(-Kb*sqrt(alpha_nir)*lai)+nir_d*exp(-Kd*sqrt(alpha_nir)*lai)
sunlit_apar <- alpha_par*(par_D*Kb+ Q_sc_par + Q_d_par)
shaded_apar <- alpha_par*(Q_sc_par + Q_d_par)
Q_sc_nir <- nir_D*(exp(-Kb*sqrt(alpha_nir)*lai)-exp(-Kb*lai))/2.0
Q_d_nir <- par_d*(1.0-exp(-Kd*sqrt(alpha_par)*lai))/(Kd*sqrt(alpha_par)*lai)
sunlit_anir <- alpha_nir*(nir_D*Kb+Q_sc_nir+Q_d_nir)
shaded_anir <- alpha_nir*(Q_sc_nir+Q_d_nir)
rad <- data.frame(sunlit_apar=sunlit_apar,
shaded_apar=shaded_apar,
sunlit_anir=sunlit_anir,
shaded_anir=shaded_anir)
return(rad)
}
compute_canopy_net_rad <- function(rad_short, rad_long, toc_rad, sunlit_lai, shaded_lai, Kb, Kd){
lai <- sunlit_lai + shaded_lai
Rnet_sunlit <- rad_short$sunlit_apar + rad_short$sunlit_anir +
rad_long$floor * (1.0 - exp(-Kd*lai)) / (Kd*lai) +
rad_long$air * (1.0 - exp(-Kd*lai)) / (Kd*lai) +
-2.0 * rad_long$canopy * (1.0 - exp(-Kd*lai)) / (Kd*lai)
Rnet_shaded <- rad_short$shaded_apar + rad_short$shaded_anir +
rad_long$floor * (1.0 - exp(-Kd*lai)) / (Kd*lai) +
rad_long$air * (1.0 - exp(-Kd*lai)) / (Kd*lai) +
-2.0 * rad_long$canopy * (1.0 - exp(-Kd*lai)) / (Kd*lai)
Rnet_floor <- (1.0 - rho_s) * (toc_rad$par_D * exp(-Kb*sqrt(alpha_par)*lai)+
toc_rad$par_d * exp(-Kd*sqrt(alpha_par)*lai)+
toc_rad$nir_D * exp(-Kb*sqrt(alpha_nir)*lai)+
toc_rad$nir_d * exp(-Kd*sqrt(alpha_nir)*lai))+
rad_long$air * exp(-Kd*lai)+
rad_long$canopy * (1.0-exp(-Kd*lai))-
rad_long$floor
Rnet_canopy <- sunlit_lai*Rnet_sunlit + shaded_lai*Rnet_shaded
Rnet_stand <- Rnet_canopy + Rnet_floor
out <- data.frame(Rnet_sunlit = Rnet_sunlit,
Rnet_shaded = Rnet_shaded,
Rnet_floor = Rnet_floor,
Rnet_canopy = Rnet_canopy,
Rnet_stand = Rnet_stand)
return(out)
}
compute_longwave_rad <- function(Tair, Tsoil, vpd, cloud){
# Compute emitted longwave radiation from the canopy, floor, and air
N = cloud
e_s = 610.7 * exp(17.38 * Tair / ( 239.0 + Tair))
e_a = (e_s-vpd)/100.0
epsilon_air = 1.24 * (e_a/(Tair+273.13))^(1.0/7.0)
epsilon_air =(1.0-0.84*N)*epsilon_air + 0.84*N
Ln_air = epsilon_air*(5.6704e-8) * (Tair+273.13)^4.0
Ln_canopy = 1.0*(5.6704e-8) * (Tair+273.13)^4.0
Ln_floor = 0.97*(5.6704e-8) * (Tsoil+273.13)^4.0
emitted_rad <- data.frame(air=Ln_air, canopy=Ln_canopy, floor=Ln_floor, air_emissivity=epsilon_air)
return(emitted_rad)
}
compute_potential_exfiltration <- function(Sr, depth_s, K_sat, Ksat_decay, psi_air_entry, pore_size_index, pore_decay, pore0){
# Estimate mean porosity
porosity_average <- pore0 * pore_decay * (1.0-exp(-1.0*depth_s/pore_decay))
# Estimate mean saturated conductivity
if (Ksat_decay > 0){
Ksat_average <- Ksat_decay* Ksat *(1.0-exp(-1.0*depth_s/Ksat_decay))
} else Ksat_average <- Ksat
S <- max(0,min(Sr,1))
# Plug everything into the equation for max infiltration
potential_exfiltration <- S ^ ((1.0 / (2.0*pore_size_index))+2.0) *
sqrt((8.0 * porosity_average *
Ksat_average * psi_air_entry) /
(3.0 * (1.0 + 3.0 * pore_size_index) *
(1.0 + 4.0 * pore_size_index)))
potential_exfiltration = min(0.001, potential_exfiltration)
pe_wm2 = potential_exfiltration * 1e6 * 597.3 * 4.18 / (24.0*3600.0)
return(pe_wm2)
}
compute_psn_parameters <- function(t, Kb, L_sunlit, L_shaded){
# Local static variables
Kc25 <- 404.0 # (ubar) MM const carboxylase, 25 deg C
q10Kc <- 2.1 # (DIM) Q_10 for kc
Ko25 <- 248.0 # (mbar) MM const oxygenase, 25 deg C
q10Ko <- 1.2 # (DIM) Q_10 for ko
act25 <- 3.6 # (umol/mgRubisco/min) Rubisco activity
q10act <- 2.4 # (DIM) Q_10 for Rubisco activity
Kn <- 0.52 # extinction coefficient for Nitrogen distribution
cica <- 0.67 # Ci to Ca ratio
alpha <- 0.10 # quantum yield efficiency
L <- L_sunlit + L_shaded
# Calculate atmospheric O2 in Pa, assumes 21% O2 by volume
O2 <- 0.21 * PA
# Correct kinetic constants for temperature, and do unit conversions
Ko <- Ko25 * q10Ko^((t-25.0)/10.0)
Ko <- Ko * 100.0 # mbar --> Pa
Kc <- Kc25 * (1.8*q10Kc)^((t-25.0)/10.0) / q10Kc
Kc[t>15.0] <- Kc25 * q10Kc^((t[t>15.0]-25.0)/10.0)
act <- act25 * (1.8*q10act)^((t-25.0)/10.0) / q10act
act[t>15.0] <- act25 * q10act^((t[t>15.0]-25.0)/10.0)
Kc <- Kc * 0.10 # ubar --> Pa
act <- act * 1e6/60.0 # umol/mg/min --> umol/kg/s
# Calculate gamma (Pa), assumes Vomax/Vcmax = 0.21
gamma <- 0.5 * 0.21 * Kc * O2 / Ko
# Calculate Vmax from leaf nitrogen data and Rubisco activity
Vmax25 <- 59.0 # umol/m2leaf/s at the top of the canopy (Lai et al, 2002, PCE, 25:1095-1119)
Vmax25_canopy <- L * Vmax25 * (1.0 - exp(-L*Kn)) / (Kn*L)
Vmax25_sunlit <- L * Vmax25 * (1.0 - exp(-Kn-Kb*L)) / (Kn+Kb*L)
Vmax25_shaded <- (Vmax25_canopy - Vmax25_sunlit) / L_shaded
Vmax25_sunlit <- Vmax25_sunlit / L_sunlit
Vmax_sunlit <- Vmax25_sunlit * exp(0.051*(t-25.0))/(1.0+exp(0.205*(t-41.0)))
Vmax_shaded <- Vmax25_shaded * exp(0.051*(t-25.0))/(1.0+exp(0.205*(t-41.0)))
Rd_sunlit <- 0.015 * Vmax_sunlit
Rd_shaded <- 0.015 * Vmax_shaded
ppe <- 1.0 / (alpha*(4.0*cica*CA+2.0*gamma)/(cica*CA-gamma))
psn_para <- data.frame(gamma=gamma,
Vmax_sunlit=Vmax_sunlit,
Vmax_shaded=Vmax_shaded,
Ko=Ko,
Kc=Kc,
O2=O2,
Rd_sunlit=Rd_sunlit,
Rd_shaded=Rd_shaded,
ppe=ppe)
return(psn_para)
}
compute_soil_surface_resistance <- function(theta){
gs <- 1.0 / (-83000.0 * theta + 16100.0)
gs[theta > 0.185] <- 0.001429
rs <- 1.0 / gs
return(rs)
}
compute_sun_angles <- function(lat, lon, yr, mo, dt, hr, mi){
solar_time <- hr + mi/60.0 + (lon-STD_MERAD)/15.0
hangle <- (12.0-solar_time)*15.0*pi/180.0
# Compute Julian day
jday <- as.numeric(mapply(date2doy, yr=yr, mo=mo, dy=dt))
# Sun declination angle
declangle <- 23.45 * sin(2.0*pi*(284.0+jday)/365.0) * pi/180.0
# sin(sun_elevation)
sinh <- sin(lat*pi/180.0)*sin(declangle)+cos(lat*pi/180.0)*cos(declangle)*cos(hangle)
zenith <- acos(sinh)
azimuth <- asin(cos(declangle)*sin(hangle)/cos(pi/2.0-zenith))
out <- data.frame(jday = jday, zenith = zenith, sun_decl = declangle, hourangle = hangle)
return(out)
}
compute_toc_down_rad <- function(par, sun_zenith, jday){
tau_t <- (par/4.55)*(1.0/PAR2SWIR)/(SO*(1.0+0.033*cos(2.0*pi*(jday-10.0)/365.0)))
# Constants from Weiss & Norman 1985, Eq. 12
A <- 0.9
B <- 0.7
C <- 0.88
D <- 0.68
# Cosine of the solar zenith angle
cos_theta <- cos(sun_zenith)
cos_theta[cos_theta<0] <- 0
# Potential visible (V) and NIR (N) radiation at top of atmosphere
S_V <- 0.473*1367.0*(1.0+0.033*cos(2.0*pi*(jday-10)/365))
S_N <- 0.527*1367.0*(1.0+0.033*cos(2.0*pi*(jday-10)/365))
# Potential direct visible on a horizontal surface
R_DV <- S_V*exp(-0.185/cos_theta)*cos_theta
# Potential diffuse visible on a horizontal surface
R_dV <- 0.4*(S_V-R_DV)*cos_theta
# Total potential visible radiation on a horizontal surface
R_V <- R_DV + R_dV
# Absorbed NIR by atmospheric water vapor
cos_theta_noZero <- cos_theta
cos_theta_noZero[cos_theta==0] <- 0.0000000000000001 # avoid division by zero
R_aN <- S_N * 10^(-1.195+0.4459*log10(1.0/cos_theta_noZero)-0.0345*(log10(1.0/cos_theta_noZero)^2.0))
# Potential direct NIR on a horizontal surface
R_DN <- (S_N*exp(-0.06/cos_theta)-R_aN)*cos_theta
R_DN[R_DN<0] <- 0 # Can't have negative direct radiation
# Potetial diffuse NIR on a horizontal surface
R_dN <- 0.6*(S_N-R_DN-R_aN)*cos_theta
# Total potential NIR on a horizontal surface
R_N <- R_DN + R_dN
# Ratio of actual total radiation to potential total radiation on the ground
RATIO <- (par/4.55)*(1.0/PAR2SWIR) / (R_V+R_N)
# Actual fraction of direct visible radiation
RATIO[RATIO>A] <- A
f_DV <- (R_DV/(R_V*0.928)) * (1.0 - ((A-RATIO)/B) ^ (2.0/3.0))
f_DV[R_V==0] <- 0
# Actual fraction of direct NIR radiation
RATIO[RATIO>C] <- C
f_DN <- (R_DN/R_N) * (1.0 - ((C-RATIO)/D) ^ (2.0/3.0))
f_DN[R_N==0] <- 0
rad <- data.frame(par_D = f_DV * par/4.55,
par_d = par/4.55 - f_DV * par/4.55,
nir_D = f_DN * (par/4.55) * ((1-PAR2SWIR)/PAR2SWIR),
nir_d = (par/4.55) * ((1-PAR2SWIR)/PAR2SWIR) - f_DN * (par/4.55) * ((1-PAR2SWIR)/PAR2SWIR),
cloud = max(1.0-tau_t/0.7, 0))
return(rad)
}
compute_toc_wind <- function(u_h, h, z){
# Returns wind speed at height z in stratum
# from RHESSys, which was from BGC 4.11.
# Compute winds
d_o <- 0.63 * z
z_o <- 0.1 * z
# Wind speed at toc log decrease from screen height to toc
u_z <- u_h * log((z-d_o)/z_o) / log((h-d_o)/z_o)
return(u_z)
}
penman_monteith <- function(Tair, Rnet, gs, ga, vpda){
# Calculate forest canopy ET using penman-monteith equation
# dt: delta t, a small temperature increase
# rho: air density (kg/m2)
# lhvap: latent heat of water (J/kg)
# s: Slope of saturated vapor pressure - temp
# es: Saturated vapor pressure at t2 in Pa
# ET: Evapotranspiration in W/m2
# gamma: Psychrometric constant
# z0: Surface roughness
# d0: Zero plane displacement
# Density of air (rho) as a f'n of air temp
rho <- 1.292 - (0.00428*Tair)
# Latent heat of vaporization as a f'n of air temp
lhvap = 2.5023e6 - 2430.54*Tair
# Saturation vapor pressures at Tair (Pa)
es <- 610.7 * exp(17.38 * Tair / ( 239.0 + Tair))
# Slope of es-T curve at Tair (Pa/deg C) (from Campbell & Norman 1998)
s <- 17.38*239.0*es / (239.0+Tair)^2
# Calculate gamma
gamma <- CP * PA / ( 0.622*lhvap )
# Evaporation in W/m2
ET <- ((s*Rnet) + (rho*CP*vpda*ga)) / (gamma*(1.0 + ga/gs) +s)
}
|
9305c05290d1dd00658d0c05d9c253fd96464ecd
|
857c1cead6e3e79a0f1e425eccfb58bfa8d37f6a
|
/plot2.R
|
e6003bc5b5140156297c538eadfb20ef88a206cf
|
[] |
no_license
|
tijanatadic/ExData_Plotting1
|
e7d6cdcab3fd922194a2fc1433fffac15cd3ee37
|
e2efaa6c9de7c71cfc5501f1a0973a741598aa62
|
refs/heads/master
| 2020-12-03T00:22:41.133298
| 2017-07-06T09:45:03
| 2017-07-06T09:45:03
| 96,023,032
| 0
| 0
| null | 2017-07-02T12:50:16
| 2017-07-02T12:50:16
| null |
UTF-8
|
R
| false
| false
| 728
|
r
|
plot2.R
|
#Plot 2
#Read subset of the data, only for dates 1/2/2007 and 1/2/2007
fileName<-"./C4/household_power_consumption.txt"
data<-read.table(fileName, na.strings=c("?", "NA"), sep=";", skip=grep("1/2/2007", readLines(fileName, ok=TRUE)), nrow=2879)
data<-na.omit(data)
#assign coumn names for data frame
names<-c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
colnames(data)<-names
day<-strptime(paste(data$Date, data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
png("./C4/plot2.png", width=480, height=480)
plot(day, data$Global_active_power, type = "l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
5f1284e44b29ac5e6a1ce8588e18db669178a921
|
c4d995e188be8b3059352fa006ca18e87d81bf0e
|
/man/supunsup_clean.Rd
|
a9c29a51dd73e1da4192b301876eecc9f8f84718
|
[
"MIT"
] |
permissive
|
kleinschmidt/phonetic-sup-unsup
|
1adc867e3deb38515e0a22800b69156f3dee398b
|
5c51177e61d7be67942a0657d6ce22b60956aab4
|
refs/heads/master
| 2021-01-17T07:04:08.934062
| 2017-02-01T16:00:49
| 2017-02-01T16:00:49
| 32,366,435
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 459
|
rd
|
supunsup_clean.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{supunsup_clean}
\alias{supunsup_clean}
\title{Non-excluded assignments (Expt. 1)}
\format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 82362 rows and 27 columns.}
\usage{
supunsup_clean
}
\description{
Non-excluded assignments (Expt. 1)
}
\seealso{
Format described in \code{\link{supunsup}}
}
\keyword{datasets}
|
d05ab9a490fa637c6752b86bbd0fb376ea2284a0
|
caeb8764dabd4d0ed17d37e7486ad7e3d714b04e
|
/R/sym_symbol.R
|
33e7706e6aad3bb36bc8832bdab49f0893d18029
|
[] |
no_license
|
cran/caracas
|
579b5cefd2b7a2db85e691d140f2215f4275c3bc
|
42b3bf8eb37ddf5b6adde28b33fdb3359ecc4f11
|
refs/heads/master
| 2023-08-16T18:15:53.677578
| 2023-08-11T13:13:47
| 2023-08-11T15:30:42
| 236,567,703
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,960
|
r
|
sym_symbol.R
|
TXT_NOT_CARACAS_SYMBOL <- paste0("must be a caracas_symbol, ",
"e.g. constructed by symbol() ",
"followed by elementary operations")
PATTERN_PYHTON_VARIABLE <- "[a-zA-Z]+[a-zA-Z0-9_]*"
stopifnot_symbol <- function(x) {
if (!inherits(x, "caracas_symbol")) {
stop(paste0("'x' ", TXT_NOT_CARACAS_SYMBOL))
}
}
verify_variable_name <- function(x) {
if (length(x) != 1L) {
stop("The name must have length 1")
}
pattern <- paste0("^", PATTERN_PYHTON_VARIABLE, "$")
if (!grepl(pattern, x)) {
stop(paste0("'", x, "' is not a valid variable name"))
}
}
construct_symbol_from_pyobj <- function(pyobj) {
y <- list(pyobj = pyobj)
class(y) <- "caracas_symbol"
return(y)
}
#' Create a symbol from a string
#'
#' @param x String to evaluate
#'
#' @examples
#' if (has_sympy()) {
#' x <- symbol('x')
#' (1+1)*x^2
#' lim(sin(x)/x, "x", 0)
#' }
#'
#' @return A `caracas_symbol`
#'
#' @concept lowlevel
#' @importFrom reticulate py_eval
#' @export
eval_to_symbol <- function(x) {
ensure_sympy()
# --------------------------------------------
# 1/3 should be caught
# --------------------------------------------
# https://docs.sympy.org/latest/gotchas.html#python-numbers-vs-sympy-numbers
# y1/3 should not be caught
if (grepl("[0-9-.]+/[0-9-.]+", x, perl = TRUE)) {
# Now there is a fraction that looks like '1/3';
# we need to be sure that there are no characters in front of the
# number in the numerator
if (grepl("[a-zA-Z_]+[0-9-.]+/[0-9-.]+", x, perl = TRUE)) {
# There was a character (e.g. 'x1/2')
# or a subscript/underscore (e.g. 3*y_11/4)
# do nothing
} else {
# S(): Sympify
x <- gsub("([0-9-.]+)/([0-9-.]+)", "S(\\1)/S(\\2)", x, perl = TRUE)
}
}
# --------------------------------------------
# (1)/(3) should be caught
# --------------------------------------------
# https://docs.sympy.org/latest/gotchas.html#python-numbers-vs-sympy-numbers
if (grepl("\\([0-9-.]+\\) */ *\\([0-9-.]+\\)", x, perl = TRUE)) {
# Now there is a fraction that looks like '(1)/(3)';
# S(): Sympify
# This gives S(1)/S(2), okay with no extra parentheses
x <- gsub("\\(([0-9-.]+)\\) */ *\\(([0-9-.]+)\\)", "S(\\1)/S(\\2)", x, perl = TRUE)
}
x <- r_strings_to_python(x)
s <- reticulate::py_eval(x, convert = FALSE)
y <- construct_symbol_from_pyobj(s)
return(y)
}
#' Create a symbol
#'
#' Find available assumptions at
#' <https://docs.sympy.org/latest/modules/core.html#module-sympy.core.assumptions>.
#'
#' @param x Name to turn into symbol
#' @param \dots Assumptions like `positive = TRUE`
#'
#' @examples
#' if (has_sympy()) {
#' x <- symbol("x")
#' 2*x
#'
#' x <- symbol("x", positive = TRUE)
#' ask(x, "positive")
#' }
#'
#' @return A `caracas_symbol`
#'
#' @seealso [as_sym()]
#' @concept caracas_symbol
#' @importFrom reticulate py_run_string
#' @export
symbol <- function(x, ...) {
ensure_sympy()
verify_variable_name(x)
dots <- list(...)
extra_cmd <- ""
if (length(dots) > 0L) {
arg_nm <- names(dots)
arg_val <- rep("None", length(dots))
arg_val[unlist(lapply(dots, function(l) isTRUE(l)))] <- "True"
# isFALSE req. R >= 3.5, hence explicit:
arg_val[unlist(lapply(dots, function(l) is.logical(l) && length(l) == 1L && !is.na(l) && !l))] <- "False"
extra_cmd <- paste0(arg_nm, " = ", arg_val, collapse = ", ")
extra_cmd <- paste0(", ", extra_cmd)
}
cmd <- paste0(x, " = symbols('", x, "'", extra_cmd, ")")
# py_run_string instead of py_eval because we need to assign inside Python
s <- reticulate::py_run_string(cmd, convert = FALSE)
res <- s[[x]]
y <- construct_symbol_from_pyobj(res)
return(y)
}
#' Perform calculations setup previously
#'
#' @param x A `caracas_symbol`
#'
#' @examples
#' if (has_sympy()) {
#' x <- symbol('x')
#' res <- lim(sin(x)/x, "x", 0, doit = FALSE)
#' res
#' doit(res)
#' }
#'
#' @concept caracas_symbol
#'
#' @export
doit <- function(x) {
stopifnot_symbol(x)
ensure_sympy()
if (!is.null(x$pyobj) && !is.null(x$pyobj$doit)) {
y <- construct_symbol_from_pyobj(x$pyobj$doit())
return(y)
}
stop("Could not doit()")
}
try_doit <- function(x) {
# if (!is.null(x$pyobj) && "doit" %in% names(x$pyobj)) {
# y <- construct_symbol_from_pyobj(x$pyobj$doit())
# return(y)
# }
try({
y <- construct_symbol_from_pyobj(x$pyobj$doit())
return(y)
}, silent = TRUE)
return(x)
}
#' Remove inner-most dimension
#'
#' @param x Array symbol to collapse dimension from
#'
#' @examples
#' if (has_sympy()) {
#' x <- as_sym(paste0("x", 1:3))
#' y <- as_sym("y")
#' l <- list(x, y)
#' l
#' unbracket(l)
#' }
#'
#' @concept caracas_symbol
#'
#' @export
unbracket <- function(x) {
if (!inherits(x, "caracas_symbol") && is.list(x)) {
z <- lapply(x, as_character)
z <- do.call(rbind, z)
z <- as_sym(z)
#z <- to_vector(z)
return(z)
}
z <- as.character(x)
zz <- gsub("\\[([^]]+)\\]", "\\1", z)
zz
y <- eval_to_symbol(zz)
return(y)
}
extract_elements <- function(x) {
z <- as.character(x)
zz <- gsub("[", "", z, fixed = TRUE)
zz <- gsub("]", "", zz, fixed = TRUE)
zz <- remove_mat_prefix(zz)
return(zz)
}
## concatenate
#' @export
c.caracas_symbol <- function(...) {
ensure_sympy()
# FIXME: To Python vector?
# In that case, see der() too.
#x <- list(...)
x <- vectorfy(list(...))
# FIXME: Use? In that case ensure that all "[..., ...]" from elsewhere (e.g. der())
# is also caught.
class(x) <- c("caracas_vector", class(x))
return(x)
}
#' Get numerator and denominator of a fraction
#'
#' @param x Fraction
#'
#' @name fraction_parts
#' @examples
#' if (has_sympy()) {
#' x <- as_sym("a/b")
#' frac <- fraction_parts(x)
#' frac
#' frac$numerator
#' frac$denominator
#' }
#'
#' @concept caracas_symbol
#'
#' @export
fraction_parts <- function(x) {
ensure_sympy()
stopifnot_symbol(x)
frac <- x$pyobj$as_numer_denom()
y <- list(
numerator = construct_symbol_from_pyobj(frac[0]), # Python 0-indexed
denominator = construct_symbol_from_pyobj(frac[1])
)
return(y)
}
#' @export
#' @rdname fraction_parts
numerator <- function(x) {
return(fraction_parts(x)$numerator)
}
#' @export
#' @rdname fraction_parts
denominator <- function(x) {
return(fraction_parts(x)$denominator)
}
#' Call a SymPy function directly on x
#'
#' Extend caracas by calling SymPy functions directly.
#'
#' @param x Object to call `fun` on
#' @param fun Function to call
#' @param \dots Passed on to `fun`
#'
#' @examples
#' if (has_sympy()) {
#' def_sym(x, a)
#' p <- (x-a)^4
#' p
#' q <- p %>% sympy_func("expand")
#' q
#' q %>% sympy_func("factor")
#'
#' def_sym(x, y, z)
#' expr <- x*y + x - 3 + 2*x^2 - z*x^2 + x^3
#' expr
#' expr %>% sympy_func("collect", x)
#'
#' x <- symbol("x")
#' y <- gamma(x+3)
#' sympy_func(y, "expand_func")
#' expand_func(y)
#' }
#'
#' @concept caracas_symbol
#'
#' @export
sympy_func <- function(x, fun, ...) {
args <- list(...)
args <- lapply(args, function(a) {
if (inherits(a, "caracas_symbol")) {
return(as.character(a))
}
return(a)
})
# See if x has fun method
out <- tryCatch({
p <- do.call(x$pyobj[[fun]], args)
res <- construct_symbol_from_pyobj(p)
res
}, error = function(cond) {
# ...it did not, try from global namespace:
s <- get_sympy()
args <- c(x$pyobj, args)
p <- do.call(s[[fun]], args)
res <- construct_symbol_from_pyobj(p)
return(res)
})
return(out)
}
#' Get free symbol in expression
#'
#' @param x Expression in which to get the free symbols in
#'
#' @examples
#' if (has_sympy()) {
#' def_sym(a, b)
#' x <- (a - b)^4
#' free_symbols(x)
#' }
#'
#' @concept caracas_symbol
#'
#' @export
free_symbols <- function(x) {
y <- x$pyobj$free_symbols
z <- reticulate::py_eval(paste0("list(", as.character(y), ")"), convert = TRUE)
z <- lapply(z, construct_symbol_from_pyobj)
return(z)
}
#' All variables
#'
#' Return all variables in caracas symbol
#'
#' @param x caracas symbol
#'
#' @examples
#' if (has_sympy()){
#' x <- vector_sym(5)
#' all_vars(x)
#' }
#'
#' @concept caracas_symbol
#'
#' @export
all_vars <- function(x) {
all.vars(as_expr(x))
}
#' Coerce symbol to character
#'
#' Coerce symbol to character
#' @param x caracas symbol
#'
#' @concept caracas_symbol
#'
#' @export
as_character <- function(x) {
ensure_sympy()
stopifnot_symbol(x)
switch(symbol_class(x),
"matrix" = {
as_character_matrix(x)
},
"vector" = {
c(as_character_matrix(x))
},
"atomic" = {
as.character(x)
}
)
}
#' Create list of factors as in a product
#'
#' @param ... factors
#'
#' @examples
#' if (has_sympy()) {
#' d <- 2
#' m <- matrix_sym(d, d)
#' mi <- inv(m)
#' det_m <- det(m)
#' fl <- as_factor_list(1/det_m, det_m * mi)
#' tex(fl)
#' m <- matrix(1:4, nrow=2)
#' mi <- solve(m)
#' det_m <- det(m)
#' fl <- as_factor_list(1 / as_sym(det_m), det_m * mi)
#' tex(fl)
#' }
#'
#' @concept caracas_symbol
#'
#' @export
as_factor_list <- function(...) {
lst <- list(...)
out <- lapply(lst, as_sym)
class(out) <- c("caracas_factor_list", "list")
out
}
#' Divide or multiply matrix with factor.
#' @name mat_div_mult
#' @param m Matrix
#' @param s Factor
#'
#' @concept caracas_symbol
#'
#' @export
#' @rdname mat_div_mult
mat_factor_div <- function(m, s) {
numer <-
as_factor_list(paste0("1/S(", s, ")"), s * m)
}
#' @export
#' @rdname mat_div_mult
mat_factor_mult <- function(m, s) {
as_factor_list(s, m / s)
}
|
46b473cd1b43657955c73392066a08eedaff4975
|
c7557b0ee7435b1a74e62b42161a18302934cca9
|
/src/generate_comb_pair_exprs.r
|
51d5636318bdcd3a64f68f98a3f563bfb0c811b8
|
[] |
no_license
|
tmorikuicr/spresso
|
cb86165cef34a55d3fdefd20e8bacea4beffa01c
|
4f1609f272f7e9307814abe9a91225f7a7ec0ae0
|
refs/heads/master
| 2020-05-07T11:49:00.749320
| 2019-07-02T01:39:03
| 2019-07-02T01:39:03
| 180,476,675
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,843
|
r
|
generate_comb_pair_exprs.r
|
library(getopt, quietly=T)
#======================================
# Options
#======================================
spec <- matrix(c(
"help", "h", 0, "logical", "show this help",
"inptable", "t", 2, "character", "[required] input som result table",
"cutoff", "c", 1, "numeric", "[required] cutoff of the success rate",
"go" , "g", 1, "character", "[required] specify a GO which is always included in combinations (e.g., GO0060412)",
"comb" , "k", 2, "numeric", "[required] specify k of n_C_k (choose k from n)",
"inpdir", "i", 2, "character", "[required] input directory containing expression data",
"outdir", "o", 2, "character", "[required] output directory"
), ncol=5, byrow=T)
opt <- getopt(spec)
if(!is.null(opt$help)){
cat(getopt(spec, usage=T))
quit(status=1)
}
if(is.null(opt$cutoff)){
opt$cutoff = 0
}
#============================================
# Main
#============================================
dir.create(opt$outdir, showWarnings=F)
inptable <- read.table(opt$inptable, header=T, sep="\t")
candidate_gos <- as.vector(subset(inptable, success_rate >= opt$cutoff)$sample)
combs <- combn(candidate_gos, opt$comb)
for(i in 1:ncol(combs)){
gos <- combs[,i]
exprs <- read.table(paste0(opt$inpdir, '/exprs.log10.E1.', opt$go, '.txt'), header=T, row.names=1, sep='\t')
print(gos)
for(go in gos){
exprs.tmp <- read.table(paste0(opt$inpdir, '/exprs.log10.E1.', go, '.txt'), header=T, row.names=1, sep='\t')
exprs <- rbind(exprs, exprs.tmp)
}
exprs <- unique(exprs)
exprs <- exprs[order(rownames(exprs)),]
ofname <- paste0('exprs.log10.E1.', opt$go, '-', go, '.txt')
write.table(exprs, paste(opt$outdir, ofname, sep='/'), quote=F, sep='\t')
}
#----- create output directory -----
dir.create(opt$outdir, showWarnings=F)
odir <- gsub("\\/$", "", opt$outdir)
|
c9662d7f63f36e47cbec5d2f7cb6fa8f84cd547b
|
c78ea793abf6c910d92dde56a48d0c880e5ba174
|
/samples/GSD1329/01_experimenting.R
|
e01aabfcb7e031d8b1adf316940ba7988eeec456
|
[] |
no_license
|
bdomokos74/Snippets
|
9d10c29f2a99396ecf988cf79cd1d87303221596
|
3c2ce5e8f13802a768738830ca28884ba1975e0c
|
refs/heads/master
| 2021-01-20T07:50:59.805027
| 2013-11-05T22:01:53
| 2013-11-05T22:01:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,196
|
r
|
01_experimenting.R
|
library(affy)
dat <- read.table("data/brca.csv", sep=",", h=T)
dat$Title <- NULL
ma.data <- ReadAffy(filenames=paste("data/array/", dat$Samples, ".CEL", sep=""))
sample.names <- dat$Tumor
colnames(exprs(ma.data)) <- sample.names
e <- exprs(ma.data)
dim(e)
gnames <- geneNames(ma.data)
image(ma.data)
boxplot(ma.data, col=c(rep("green", 6), rep("blue", 16), rep("red", 27)))
mva.pairs(data.frame(a=exprs(ma.data[,c(1,2,7,8, 23, 24)])))
### preprocess :::::
eset <- rma(ma.data)
exprs(eset[1:20,1:3])
boxplot(data.frame(exprs(eset)), col=c(rep("green", 6), rep("blue", 16), rep("red", 27)))
mva.pairs(data.frame(exprs(eset[,c(1,8,23)])))
### limma
library(limma)
design <- model.matrix(~ 0+factor(c(rep("apocrine", 6), rep("basal", 16), rep("luminal", 27))))
colnames(design) <- c("apocrine", "basal", "luminal")
fit <- lmFit(eset, design)
cont.matrix <- makeContrasts(Comp2to1=basal-apocrine, Comp3to1=luminal-apocrine, Comp3to2=luminal-basal, levels=design)
fit2 <- contrasts.fit(fit, cont.matrix)
fit2 <- eBayes(fit2)
## gene list
options(digits=3)
toptable(fit2, coef=1, adjust="BH")
## plots
volcanoplot(fit2, coef=2, highlight=10)
abline(v=c(-1,1), col="red")
ilogit = function(p) exp(p)/(1+exp(p))
abline(h=ilogit(.05), col="blue")
## venn diagram
results <- decideTests(fit2)
venn <- vennCounts(results)
venn
vennDiagram(results, include=c("up", "down"), counts.col=c("red", "green"))
###
# create affybatch
preproc.data <- as.matrix(exprs(eset)[,c(1,8)])
colnames(preproc.data) <- NULL
sample.info <- data.frame( spl = gsub(".CEL", "", colnames(exprs(eset)[,c(1,8)])), stat = levels(dat$Tumor)[dat$Tumor][c(1,8)])
meta.info <- data.frame (labelDescription = c('Sample Name', 'Cancer Status'))
pheno <- new("AnnotatedDataFrame", data = sample.info, varMetadata = meta.info)
my.experiments <- new("AffyBatch", exprs=preproc.data, phenoData=pheno, cdfName="HG-U133A")
### book example
fake.data <- matrix(rnorm(8*200), ncol=8)
sample.info <- data.frame(spl=paste('A', 1:8, sep=''),stat=rep(c('cancer', 'healthy'), each=4))
pheno <- new("AnnotatedDataFrame", data = sample.info,varMetadata = meta.info)
my.experiments <- new("AffyBatch",exprs=fake.data, phenoData=pheno)
|
74fd0774dbcfbc085599edee1d87dfcef4d32792
|
00e91ed4130c104fccf7f220a2784d25b1b2b128
|
/man/barycenter.Rd
|
6ea7d627d67f85e5b8e3aea7ad528518722c514b
|
[] |
no_license
|
cran/centiserve
|
9ff59d7a5ef3d4a068d6fbc9c969d259e99ad11e
|
ebc2eb58ab4d8fbb995b7d056127b9cf71b3b31e
|
refs/heads/master
| 2021-01-01T05:55:06.643964
| 2017-07-15T08:34:41
| 2017-07-15T08:34:41
| 97,306,257
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,284
|
rd
|
barycenter.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{barycenter}
\alias{barycenter}
\title{Find the barycenter centrality score}
\usage{
barycenter(graph, vids = V(graph), mode = c("all", "out", "in"),
weights = NULL)
}
\arguments{
\item{graph}{The input graph as igraph object}
\item{vids}{Vertex sequence, the vertices for which the centrality values are returned. Default is all vertices.}
\item{mode}{Character constant, gives whether the shortest paths to or from the given vertices should be calculated for directed graphs. If out then the shortest paths from the vertex, if in then to it will be considered. If all, the default, then the corresponding undirected graph will be used, ie. not directed paths are searched. This argument is ignored for undirected graphs.}
\item{weights}{Possibly a numeric vector giving edge weights. If this is NULL, the default, and the graph has a weight edge attribute, then the attribute is used. If this is NA then no weights are used (even if the graph has a weight attribute).}
}
\value{
A numeric vector contaning the centrality scores for the selected vertices.
}
\description{
Barycenter scores are calculated as 1 / (total distance from vertex v to all other vertices) in a strongly connected network.
}
\details{
There are 2 types of distance centrality scores, Closeness Centrality and Barycenter Centrality. \cr
Barycenter Centrality for vertex \eqn{v}{v} defined as:
\deqn{1 / (total distance from v to all other vertices)}{1 / (total distance from v to all other vertices)}
Closeness scores are calculated using the formula \eqn{1 / (average distance from vertex v to all other vertices)}{1 / (average distance from vertex v to all other vertices)} and Barycenter scores are calculated as \eqn{1 / (total distance from vertex v to all other vertices)}{1 / (total distance from vertex v to all other vertices)}. \cr
More detail at \href{http://www.centiserver.org/?q1=centrality&q2=Barycenter_Centrality}{Barycenter Centrality}
}
\examples{
g <- graph(c(1,2,2,3,3,4,4,2), directed=FALSE)
barycenter(g)
}
\author{
Mahdi Jalili \email{m_jalili@farabi.tums.ac.ir}
}
\references{
Viswanath, Meghana. Ontology-based automatic text summarization. Diss. University of Georgia, 2009.
}
|
0094be3dcedc6763c2d4617087f858ce193d9dc1
|
10125f1600835b777a6a51b3198a4907d4452643
|
/R/causationT2.R
|
bd9d31416c2d27ebb700df5cdf83391841669bd5
|
[
"MIT"
] |
permissive
|
jyfeather/LASSO-BN
|
d3c87faa498f33287852a3d3dfba2ad53356141a
|
a20a3b2da25396ee976e5f59eac93bf6b062ce33
|
refs/heads/master
| 2020-05-18T11:21:59.179394
| 2015-09-15T22:50:45
| 2015-09-15T22:50:45
| 25,669,694
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,166
|
r
|
causationT2.R
|
# It is simple: for each x_i, you just build a regression model using PA(x_i) as the predictors,
# then calculate the residual vector, then use hypothesis testing (t-test?) to test
# whether or not the mean of the residual is zero. If yes, then x_i is not a rout cause variable.
# Otherwise, it is a root cause variable
# You can add the results of the causation based T2 method in Table 1
rm(list=ls())
library(pcalg)
require(Rgraphviz)
library(graph)
load(file = "./dat/simu/bn100")
#load(file = "./dat/real/")
load(file = "./dat/simu/shift_100")
#plot(bn.dag)
edgeMat <- edgeMatrix(bn.dag)
shift.sig <- c(0.1, 0.3, 0.5, 0.7, 1, 1.5)
mean_pop <- 0
rate <- matrix(0, ncol = 3)
for (sig in shift.sig) {
load(file = paste("./dat/simu/dat_100_", sig, sep = ""))
for (pos in shift.pos) {
pos_predictors <- edgeMat["from",which(edgeMat["to",] == pos)]
dat4reg <- data.frame(resp = dat[,pos], dat[,pos_predictors])
fit <- lm(resp ~ ., data = dat4reg)
residual_vec <- residuals(fit)
rate <- rbind(rate, c(sig, pos, sum(abs(residual_vec)>0.707)/nrow(dat)))
#test <- t.test(residual_vec)
#pvalues <- c(pvalues, test$p.value)
}
}
|
33f60af95931545d3387802d7d26a36be9590b9a
|
bf39f0007def1af90f4522f3ddfd4fbcc384ef6c
|
/process_kdd.R
|
e09ec11413f884cde341eae4290da9e1a334d310
|
[] |
no_license
|
bibudhlahiri/learning
|
13d2deddd4f2021db9915c3d7df10f103d262777
|
da7ae4b542f9dae0f5d47b76aa5aa0591c9c88b3
|
refs/heads/master
| 2021-01-17T01:27:16.773005
| 2017-11-26T05:00:32
| 2017-11-26T05:00:32
| 23,405,407
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,978
|
r
|
process_kdd.R
|
library(data.table)
load_kdd_data <- function()
{
filename <-
"C:\\Users\\blahiri\\kdd_cup_for_SAx\\kddcup.data.corrected"
data_corr <- fread(filename, header = FALSE, sep = ",", stringsAsFactors = FALSE, showProgress = TRUE,
colClasses = c("numeric", "character", "character", "character", "numeric",
"numeric", "character", "numeric", "numeric", "numeric",
"numeric", "character", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric",
"character", "character", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "character"),
data.table = TRUE)
lines <- readLines("C:\\Users\\blahiri\\kdd_cup_for_SAx\\Schema.txt")
from_schema_file <- unlist(strsplit(lines, ": "))
column_names <- from_schema_file[c(TRUE, FALSE)]
column_names <- c(column_names, "connection_label")
setnames(data_corr, names(data_corr), column_names)
data_corr[ , num_outbound_cmds := NULL] #All values are 0
#List the columns whose type should be "factor", i.e., the categorical variables.
cols <- c("protocol_type", "service", "flag")
#Use lapply() to set all of them to factor at once
data_corr[,(cols) := lapply(.SD, as.factor), .SDcols = cols]
#Since the columns land, logged_in, is_host_login, is_guest_login are already binary, we make them numeric.
#They were originally read as character.
cols <- c("land", "logged_in", "is_host_login", "is_guest_login")
data_corr[,(cols) := lapply(.SD, as.numeric), .SDcols = cols]
data_corr <- reduce_number_of_distinct_values(data_corr)
#Create dummy variables corresponding to the factor variables protocol_type, service and flag
data_corr[, protocol_type_icmp := as.numeric(data_corr$protocol_type == "icmp")]
data_corr[, protocol_type_tcp := as.numeric(data_corr$protocol_type == "tcp")]
data_corr[, service_ecr_i := as.numeric(data_corr$service == "ecr_i")]
data_corr[, service_private := as.numeric(data_corr$service == "private")]
data_corr[, flag_S0 := as.numeric(data_corr$flag == "S0")]
data_corr[, flag_SF := as.numeric(data_corr$flag == "SF")]
data_corr[ , c("protocol_type", "service", "flag") := NULL]
#Scale the columns which are really numeric (were not made numeric from categorical) and have a wide range of values.
#Making these numeric make sense from the semantic point of view.
sc <- c("duration", "src_bytes", "dst_bytes", "wrong_fragment", "urgent", "hot", "num_failed_logins",
"num_compromised", "num_root", "num_file_creations", "num_shells", "num_access_files",
"count", "serror_rate", "rerror_rate", "same_srv_rate", "diff_srv_rate",
"srv_count", "srv_serror_rate", "srv_rerror_rate", "srv_diff_host_rate",
"dst_host_count", "dst_host_srv_count")
data_corr <- copy(data_corr)[ , (sc) := lapply(.SD, scale), .SDcols = sc]
#standardized_filename <- "C:\\Users\\blahiri\\kdd_cup_for_SAx\\kddcup.data.standardized"
#write.table(data_corr, standardized_filename, sep = ",", row.names = FALSE, col.names = TRUE, quote = FALSE)
#Round off numeric columns to reduce filesize
cols_to_round <- names(data_corr)
cols_to_round <- cols_to_round[(cols_to_round != "connection_label")]
for (j in cols_to_round) set(data_corr, j = j, value = round(data_corr[[j]], 4))
standardized_sample_filename <- "C:\\Users\\blahiri\\kdd_cup_for_SAx\\kddcup.data.standardized.sampled"
sample_size <- 25000 #Makes it a ~5 MB file
sampled_kdd_data <- data_corr[sample(nrow(data_corr), sample_size), ]
write.table(sampled_kdd_data, standardized_sample_filename, sep = ",", row.names = FALSE, col.names = TRUE, quote = FALSE)
train_index <- sample(25000, 20000)
kdd_training_sample <- sampled_kdd_data[train_index, ]
training_sample_filename <- "C:\\Users\\blahiri\\kdd_cup_for_SAx\\kdd_training_sample"
write.table(kdd_training_sample, training_sample_filename, sep = ",", row.names = FALSE, col.names = TRUE, quote = FALSE)
kdd_test_sample <- sampled_kdd_data[-train_index, ]
test_sample_filename <- "C:\\Users\\blahiri\\kdd_cup_for_SAx\\kdd_test_sample"
write.table(kdd_test_sample, test_sample_filename, sep = ",", row.names = FALSE, col.names = TRUE, quote = FALSE)
}
#Reduce the number of distinct values of categorical variables (except connection_label) by merging the categories after top (k-1).
reduce_number_of_distinct_values <- function(input_data)
{
k <- 3
columns <- names(input_data)
columns <- columns[(columns != "connection_label")]
for (column in columns)
{
if (is.factor(input_data[, get(column)]))
{
tx <- table(input_data[, get(column)])
names_in_order <- names(tx[order(-tx)])
if (length(names_in_order) > k)
{
top_names <- names_in_order[1:(k - 1)]
if ("" %in% top_names)
{
top_names <- names_in_order[1:k]
top_names <- top_names[top_names != ""]
}
set(input_data, j = column, value = ifelse(input_data[[column]] %in% top_names, as.character(input_data[[column]]), "Other"))
}
}
}
input_data
}
map_to_five_classes <- function(input_data)
{
input_data[, parent_label := apply(input_data, 1, function(row) lookup_parent_label(as.character(row["connection_label"])))]
}
lookup_parent_label <- function(connection_label)
{
if (connection_label %in% c("buffer_overflow.", "loadmodule.", "perl.", "rootkit."))
{
return("u2r")
}
if (connection_label %in% c("ftp_write.", "guess_passwd.", "imap.", "multihop.", "phf.", "spy.", "warezclient.", "warezmaster."))
{
return("r2l")
}
if (connection_label %in% c("back.", "land.", "neptune.", "pod.", "smurf.", "teardrop."))
{
return("dos")
}
if (connection_label == "normal.")
{
return("normal")
}
return("probe")
}
cluster_kdd <- function()
{
filename <- "C:\\Users\\blahiri\\kdd_cup_for_SAx\\kddcup.data.standardized.sampled"
kdd_sample <- fread(filename, header = TRUE, sep = ",", stringsAsFactors = FALSE, showProgress = TRUE,
colClasses = c("numeric", "numeric", "numeric", "numeric", #Taking 1 off for num_outbound_cmds
"numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "character",
"numeric", "numeric", "numeric", "numeric", "numeric"),
data.table = TRUE)
kdd_sample <- map_to_five_classes(kdd_sample)
kdd_sample[, final_label := ifelse((kdd_sample$parent_label == "normal"), "normal", "attack")]
to_cluster <- kdd_sample[, .SD, .SDcols = sapply(kdd_sample, is.numeric)] #cluster with the numeric columns only
clusters <- kmeans(to_cluster, centers = 200)
#Check fraction of normal and attack packets in two clusters.
to_cluster[, assigned_cluster := clusters$cluster]
#Get an idea of the distance of points from their nearest cluster centers. This will help set the threshold in SAx.
#to_cluster
to_cluster[, distance_to_centroid := apply(to_cluster, 1, function(row) find_distance_to_centroid(
c(row["duration"], row["src_bytes"], row["dst_bytes"], row["land"], row["wrong_fragment"],
row["urgent"], row["hot"], row["num_failed_logins"], row["logged_in"], row["num_compromised"],
row["root_shell"], row["su_attempted"], row["num_root"], row["num_file_creations"], row["num_shells"],
row["num_access_files"], row["is_host_login"], row["is_guest_login"], row["count"], row["srv_count"],
row["serror_rate"], row["srv_serror_rate"], row["rerror_rate"], row["srv_rerror_rate"], row["same_srv_rate"],
row["diff_srv_rate"], row["srv_diff_host_rate"], row["dst_host_count"], row["dst_host_srv_count"], row["dst_host_same_srv_rate"],
row["dst_host_diff_srv_rate"], row["dst_host_same_src_port_rate"], row["dst_host_srv_diff_host_rate"], row["dst_host_serror_rate"], row["dst_host_srv_serror_rate"],
row["dst_host_rerror_rate"], row["dst_host_srv_rerror_rate"], row["protocol_type_tcp"], row["service_ecr_i"], row["service_private"],
row["flag_S0"], row["flag_SF"]),
row["assigned_cluster"], clusters$centers))]
print(fivenum(to_cluster$distance_to_centroid))
#With k = 2, 0.00621319 0.03545544 0.03545762 3.23626149 114.51476117
#With k = 3, 0.00207569 0.02620893 0.02621193 1.92907525 114.46398929
#With k = 4, 1.726433e-03 0.0256287 0.02563178 1.593763 114.2546
#With k =30, 0.002375915 0.022761839 0.022765315 0.274212546 19.959214687
#With k =100,0.002936039 0.021564915 0.021568586 0.105381863 13.784577548
#With k =150,0.00000000 0.02116488 0.02116862 0.08667574 13.78457755
#With k =200,0.000000000 0.001751310 0.004750433 0.069927011 13.784577548
percentile <- ecdf(to_cluster$distance_to_centroid)
print(percentile(10)) #0.99076
to_cluster
}
find_distance_to_centroid <- function(current_vec, assigned_cluster, centers)
{
#print(current_vec)
sqrt(sum((as.numeric(current_vec) - as.numeric(centers[assigned_cluster, ]))^2))
}
#source("C:\\Users\\blahiri\\kdd_cup_for_SAx\\process_kdd.R")
load_kdd_data() #4,898,431 rows; 972781 (19.85%) normal
#data_corr <- map_to_five_classes(data_corr)
#print(table(data_corr$parent_label)) #dos 3883370 (79%), probe 41102 (0.8%), r2l 1126 (0.02%), u2r 52
#to_cluster <- cluster_kdd()
#euclidean_dist <- test_normalization()
|
db074a87c2ee2f1efc5e759c32b91cd5971c716e
|
1175c37b33404586c7a55fef15b91cfe9c3c48f4
|
/man/make_filenames.Rd
|
6a71752d8976cfdf357d02c57b5b6584ad1e63af
|
[] |
no_license
|
rmsharp/renameSurgerySheets
|
189c4fe6e457b56402cef49a3542144c0414eaa9
|
ea3f2a08b067022b6890225b4e4a119764edd99a
|
refs/heads/master
| 2021-09-17T20:21:04.473638
| 2018-07-05T00:48:15
| 2018-07-05T00:48:15
| 100,298,194
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 488
|
rd
|
make_filenames.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_filenames.R
\name{make_filenames}
\alias{make_filenames}
\title{Make new sequenced filenames from full path names}
\usage{
make_filenames(file_list)
}
\arguments{
\item{file_list}{character vector of one or more having the path and basename
of a list of files to be renamed.}
}
\value{
Character vector of properly sequenced file names.
}
\description{
Make new sequenced filenames from full path names
}
|
19cb77fc789c40db3b1bda9371a7c62a5e51d6a9
|
3208008eabc8c851c6bf6fa063dffa2c58bc8815
|
/plot1.R
|
26a38a1b74c4c7f3ab799ce407ce6efe3c7fe09b
|
[] |
no_license
|
tothzoltan81/ExData_Plotting1
|
0fdcf99aa444f520da2c8dd6c69637de8b8d1505
|
c3b6c1d3957cbe1ec1c1eb004fdfa607c4c2da9c
|
refs/heads/master
| 2021-01-23T15:16:01.866430
| 2014-12-07T21:00:38
| 2014-12-07T21:00:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 535
|
r
|
plot1.R
|
setwd("d:/R_files/CoURSERA")
data_household<-read.csv('household_power_consumption.txt',header=T, sep=";", dec=".", na.strings=c("?"))
data_household2<-data_household[66637:69517,]
datetime<-as.POSIXct(paste(data_household2$Date,data_household2$Time), format="%d/%m/%Y %H:%M:%S")
data_household3<-cbind(data_household2,datetime)
remove(datetime)
png(filename = "plot1.png", width = 480, height = 480)
with(data_household3,hist(Global_active_power,xlab="Global Active Power (kilowatts)",main="Global Active Power", col="red"))
dev.off()
|
33d8b6487f8688e350c59451887283932e694bc5
|
7f717941a38a9efd8f18da5ca8deb774c74f7b9a
|
/package/ashr/R/ashr-package.r
|
8700c3e962e7fb67990d92a5ec3cf2802febeedc
|
[] |
no_license
|
daichaoxing/ash
|
0f7ceada274b1c59e757c6f366fc0f1d4b6fbec2
|
67524524e626150773716fb6ac22df3c0815b154
|
refs/heads/master
| 2020-12-24T09:01:06.222347
| 2015-02-10T03:11:27
| 2015-02-10T03:11:27
| 28,334,024
| 1
| 0
| null | 2014-12-22T14:53:14
| 2014-12-22T10:16:14
|
R
|
UTF-8
|
R
| false
| false
| 50
|
r
|
ashr-package.r
|
#' ashr
#'
#' @name ashr
#' @docType package
NULL
|
112c39556ed22b98ca2993469986e171483b4ea7
|
9876f2d87cbd128c440450d0e500e1c77d1fbe81
|
/usable_demo.R
|
a6c7c07e7a9d013450c8f101c07a9f5eaab8f3ae
|
[] |
no_license
|
gesturestudios/rPi_weather_station
|
6e3d29bde6602633bec347f001c8fc8f67623c7d
|
5542ce01d6b57ec84bbed95020e9ff9af420779b
|
refs/heads/master
| 2021-06-08T15:59:31.119084
| 2020-05-09T19:03:22
| 2020-05-09T19:03:22
| 96,737,289
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,486
|
r
|
usable_demo.R
|
# import libraries----------
library(shiny)
library(shinydashboard)
library(plotly)
library(ggplot2)
library(RMySQL)
library(dplyr)
library(reshape2)
library(gsheet)
hexcolor <- function(x){
if (x>100) {result = "#9E0142"}
else if (x>90) {result = "#9E0142"}
else if (x>80) {result = "#D62F27"}
else if (x>70) {result = "#F46C43"}
else if (x>60) {result = "#FCAD60"}
else if (x>50) {result = "#FDDF90"}
else if (x>40) {result = "#DFF3F8"}
else if (x>30) {result = "#ABD9E9"}
else if (x>20) {result = "#4575B4"}
else {result = "#303694"}
return(result)
}
textcolor <- function(x){
if (x>100) {result = "maroon"}
else if (x>80) {result = "red"}
else if (x>60) {result = "orange"}
else if (x>50) {result = "yellow"}
else if (x>40) {result = "aqua"}
else if (x>30) {result = "light-blue"}
else if (x>20) {result = "blue"}
else {result = "navy"}
return(result)
}
ui <- dashboardPage(#======================================================================
dashboardHeader(title = textOutput("curdate"),titleWidth = 350),
dashboardSidebar(#-----------------------------------------------------------------======
sidebarMenu(
menuItem("Overview", tabName = "overview", icon = icon("address-card")),
menuItem("Bills", tabName = "bills", icon = icon("dollar"),
menuSubItem("Utility bills",tabName = "utility"),
menuSubItem("Home improvement",tabName = "homeimp")
),
menuItem("Darksky map", tabName = "weathermap", icon = icon("map")),
box(
title="Slider for all graphs",background = "blue",width = 13,
sliderInput(inputId = "days",
label = "Choose number of days to plot",
value = 5, min = 1, max = 30,ticks=FALSE)
)
)
),
dashboardBody(#--------------------------------------------------------------------------
tabItems(
tabItem(tabName = "overview",#-------------------------------------------------------
valueBoxOutput("outside"),
valueBoxOutput("inside"),
valueBoxOutput("climate_status"),
tags$iframe(
seamless = "seamless",
src = "https://forecast.io/embed/#lat=45.4813&lon=-122.8490&name=our backyard",
height = 250, width = 600
),
box(
background = "navy",width = "450px", height = "200px",
img(src="http://www.hamqsl.com/solarpich.php")
)
),
tabItem(tabName = "utility",#----------------------------------------------------------
box(
title="Summary of bills",background = "light-blue",width = 3,
uiOutput("choose_billers"),
uiOutput("choose_years"),
box(
title="Upcoming bills",background = "red",width = 2.75,
htmlOutput("upcoming_bills")
),
box(
title="Recently paid",background = "green",width = 2.75,
htmlOutput("paid_bills")
)
),
box(
title = "Plot of selected provider bills over selected year range",background = "navy",width = 9,
plotlyOutput("billplot")
)
),
tabItem(tabName = "homeimp",#----------------------------------------------------------
box(
title="Choose years to summarize:",background = "light-blue",width = 3,
uiOutput("hd_years")
),
box(
title = "Total spending at Home Depot over selected year range",background = "navy",width = 9,
plotlyOutput("hdspendingplot")
)
),
tabItem(tabName = "weathermap",#--------------------------------------------------------
box(
title="forecast map from Dark Sky",background = "light-blue",width = 4
),
tabPanel("Map",
br(),
htmlOutput("darkmapframe")
)
)
)
)
)
# -----------------------------------------------------------------------------------------
server <- function(input, output) {#=======================================================
# connect to data sources and retrieve data--------------------------------------------------
# utility bill data from Google Sheet
bills = gsheet2tbl('https://docs.google.com/spreadsheets/d/1omTXqs6xDdzzUHfDOYOi5picPXoUVDkpcY7-IH9QDFA/edit?usp=sharing')
bills$dateob = as.Date(bills$DUE_DATE,"%m/%d/%Y")
nulldates = bills$DUE_DATE[is.na(bills$dateob)]
bills$dateob[is.na(bills$dateob)] = as.Date(nulldates,"%b %d,%Y")
bills$month = as.numeric(format(bills$dateob,'%m'))
bills$year = as.numeric(format(bills$dateob,'%Y'))
bills$month_abb = month.abb[bills$month]
bills$month_abb <- factor(bills$month_abb,levels=month.abb)
recent_bills = summarize(group_by(bills,SOURCE),
lastbills = max(dateob))
recent_bills$status = recent_bills$lastbills - Sys.Date()
upcoming = subset(recent_bills,lastbills>=Sys.Date())
if (nrow(upcoming)>=1){
upcoming$str = paste(upcoming$SOURCE," due in ",upcoming$status," days on ",format(upcoming$lastbills,"%b %d"))}
paid = subset(recent_bills,lastbills<Sys.Date())
if (nrow(paid)>=1){
paid$str = paste(paid$SOURCE," last paid on ",format(paid$lastbills,"%b %d"))}
# Home Depot spending from Google Sheet
homedepotsheet = gsheet2tbl("https://docs.google.com/spreadsheets/d/1OB0TD1gkaYtPGSwrTMZzo_EsymR9cENYbgubf-RwBzU/edit?usp=sharing")
hddf = data.frame(Datestr = as.Date(character()),
receipt_amount = numeric(),
stringsAsFactors = FALSE)
for (i in 1:nrow(homedepotsheet)){
test = sub(".*USD\\$ ","",homedepotsheet[i,4])
hddf[i,2] = as.numeric(sub(" .*","",test))
hddf[i,1] =as.Date(sub(" at .*","",homedepotsheet[i,1]),format = "%B %d, %Y")
}
hddf <- hddf[order(hddf$Datestr),]
hddf$year = as.numeric(format(hddf$Datestr,'%Y'))
hddf$CommonDate <- as.Date(paste0("2000-",format(hddf$Datestr, "%j")), "%Y-%j")
hddf$cumsum <- do.call(c, tapply(hddf$receipt_amount, hddf$year, FUN=cumsum))
# renders for header --------------------------------------------------------------------
output$curdate <- renderText({format(Sys.time(), "%A %B %d, %I:%M %p") })
# renders for summary page---------------------------------------------------------------
output$outside <- renderValueBox({
valueBox(
icon("thermometer-4"),
value=45,
subtitle="outside",
color=textcolor(45)
)
})
output$inside <- renderValueBox({
valueBox(
icon("thermometer-4"),
value=68,
subtitle="inside",
color=textcolor(68)
)
})
output$climate_status <- renderValueBox({
valueBox(
subtitle = "thermostat set to",
value=67,
icon("fire"),
color="red"
)
})
# renders for utility page -------------------------------------------------------------
output$choose_billers <- renderUI({
billers = unique(bills$SOURCE)
checkboxGroupInput(inputId = "billervariable",label = "Choose bills to display",
choices = billers,selected = billers)
})
output$choose_years <- renderUI({
yearoptions = unique(bills$year)
checkboxGroupInput(inputId = "yearvariable",label = "Choose years to display",
choices = yearoptions,selected = yearoptions)
})
output$upcoming_bills <- renderUI({HTML(paste0(upcoming$str,sep='<br/>'))})
output$paid_bills <- renderUI({HTML(paste0(as.list(paid$str),sep='<br/>'))})
output$billplot <- renderPlotly({
bills2plot = subset(bills, SOURCE %in% input$billervariable & year %in% input$yearvariable,
c(SOURCE,year,month_abb,DUE,dateob))
bills2plot$SOURCE <- factor(bills2plot$SOURCE)
bills2plot$year <- factor((bills2plot$year))
bills2plot.all <- rbind(bills2plot, cbind(expand.grid(
SOURCE=levels(bills2plot$SOURCE),
month_abb=levels(bills2plot$month_abb),
year = levels(bills2plot$year)), DUE=NA, dateob=NA))
billplot = ggplot(bills2plot.all,aes(SOURCE,DUE))+
geom_bar(aes(fill=SOURCE,text=dateob),position="dodge",stat="identity")+
theme(axis.text.x=element_blank(),axis.title.x=element_blank(),axis.ticks.x=element_blank())+
facet_grid(year~month_abb)+labs(y="Bill amount ($)", fill="Provider")
billplot})
# renders for home depot page---------------------------------------------------------
output$hd_years <- renderUI({
hdyearoptions = unique(hddf$year)
checkboxGroupInput(inputId = "hdyear",label = "Choose years to display",
choices = hdyearoptions,selected = hdyearoptions)
})
output$hdspendingplot <- renderPlotly({
hdplotdata = subset(hddf, year %in% input$hdyear)
hdplot = ggplot(hdplotdata,aes(x=CommonDate,y=cumsum)) +
geom_area(fill='red',alpha=0.5) +
geom_point(aes(size=receipt_amount,text=Datestr)) +
facet_grid(year~.)+
labs(y="Total spending ($)",x="")+
scale_x_date(labels = function(x) format(x, "%d-%b")) +
theme(legend.position = "none")
hdplot})
# render map from Dark Sky --------------------------------------------------------------
output$darkmapframe <- renderUI({
HTML('
<style>
.embed-container {
position: relative;
padding-bottom: 80%;
height: 0;
max-width: 100%;
}
</style>
<iframe
width="1500"
height="900"
frameborder="0"
scrolling="no"
marginheight="0"
marginwidth="0"
title="provPrepTest"
src="https://darksky.net/map-embed/@temperature,38.411,-110.391,4.js?embed=true&timeControl=false&fieldControl=true&defaultField=precipitation_rate&defaultUnits=_inph">
</iframe>
')
})
}
shinyApp(ui, server)#======================================================================
|
68a25cb008b2d774f9b77fecbddc40c667c28b6a
|
a2b58771d7acd6ab467d0f59da8dd7fdf7ac5ec2
|
/R/trim_outlier.R
|
a6e4e8672c786b814c100aa5932a3eedf3d13423
|
[] |
no_license
|
Schwenk-Lab/BAf-R_package
|
9849f64f965ef35d76f7f99ead23fd03fb698db6
|
18f67d3d4c2d21843330a97f36baea0d4571f2dd
|
refs/heads/master
| 2022-06-28T00:22:13.155061
| 2020-05-12T08:59:31
| 2020-05-12T08:59:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,520
|
r
|
trim_outlier.R
|
# -----------------------------------------------------------------------------#
#' Trim outlier out by Robust PCA
#'
#' Trim outlier out based on the orthogonal and score distances computed by
#' robust principal components analysis (PCA). After log-transformation, like
#' ordinary PCA, the values are scaled, but using robust statistics such as
#' median instead of mean and MAD instead SD. Please note this function is
#' applied to each binder batch separately and omits the data of the samples
#' having any NA first.
#'
#' @param X a \code{\link{matrix}} or a \code{\link{BAf-class}} object
#' @param ... in order to encourage to use all parameter names
#' @param alpha,kmax the parameter for \code{rrcov::\link{PcaHubert}}
#' @param cutoff.od.P,cutoff.sd.P the probability threshold for the orthogonal
#' and score distances
#' @param coord the distance coordinates to be used in outlier classification.
#' "\code{o&s}" indicates the points beyond the cutoffs on both coordinates
#' are labelled as outliers. In other options, the points over any cutoff on
#' any coordinates are marked as outliers.
#' @param plotit if plots were wanted
#' @param main title of plot
#'
#' @return The BAf object after outlier removal
#'
#' @references
#' Hubert, M., Rousseeuw, P. J., Branden, K. V., (2005) ROBPCA: A New Approach
#' to Robust Principal Component Analysis. Technometrics 47, 64-79
#'
#' @author Mun-Gwan Hong <\email{mun-gwan.hong@scilifelab.se}>
#' @seealso \code{\link{apply_per_group}}
#' @examples
#' data(sba)
#' B <- trim_outlier(sba, applyBy= "plate", plotit = FALSE)
#' summary(B)
#' @keywords BAf Suspension Bead Array
#' @import rrcov
#' @rdname trim_outlier
#' @export
# -----------------------------------------------------------------------------#
# created : 2012-04-23 by Mun-Gwan
# modified :
# 2012-10-15 by Mun-Gwan : fix the bug with the data including NAs disapearing
# row names
# 2012-10-30 by Mun-Gwan :
# 1) fix the error that appears when 'applyBy' is missing.
# 2) fix the case 'nrow(tg) < 10'
# 2013-05-13 by Mun-Gwan :
# 1) handling "coord" variable
# 2) minor changes in diagnostic plot
# 2013-05-21 by Mun-Gwan : change 'kmax' for 'PcaHubert' to default 'kmax= 10'
# in rrcov package
# 2013-07-03 by Mun-Gwan : adapt to "SBAe" class
# 2013-07-25 by Mun-Gwan : coord = "o&s" limits the outliers in the top right
# corner
# 2013-12-06 by Mun-Gwan : take 'kmax' as an argument
# 2014-09-17 by Mun-Gwan : select appropriate "plot" more specifically
# 2015-11-04 by Mun-Gwan : When MAD is 0, then skip scaling
# 2017-07-18 by Mun-Gwan : fix the problem of showing wrong sample ID when NA
# was already included
# 2017-11-09 by Mun-Gwan : add a function to handle matrix
# -----------------------------------------------------------------------------#
setGeneric("trim_outlier", function(X, ...) standardGeneric("trim_outlier"));
# -----------------------------------------------------------------------------#
#' @rdname trim_outlier
#' @export
# -----------------------------------------------------------------------------#
setMethod(
"trim_outlier",
signature(X = "matrix"),
function(X,
...,
alpha = 0.9,
cutoff.od.P = 0.025,
cutoff.sd.P = 0.025,
coord = c("o&s", "o", "s", "os"),
plotit = FALSE,
main = "",
kmax= 10) {
# Stop when any infinite value included
stopifnot(all(is.finite(X[!is.na(X)])))
coord <- match.arg(coord)
##
if(plotit) {
# two rows in plot and ask = TRUE if it continues to the next plate
opar <- par(
mfrow = c(2, 1),
oma = c(0, 0, 3, 0),
mar = c(4, 4, 2, 2) + 0.1,
mgp = c(2.7, 1, 0),
ask = if (dev.interactive(orNone = TRUE)) TRUE else FALSE
)
on.exit(par(opar))
}
# omit NA of failed samples
tg <- na.omit(X)
i_omit <- unclass(attr(tg, "na.action"))
if(nrow(tg) < 10)
stop("Too small number (<10) of samples were left after na.omit.")
if(length(i_omit) > (nrow(X)/2))
stop("More than a half of samples have NAs.")
# log-transform + centering and scaling like ordinary PCA but using median
# and MAD instead.
tg <- log(tg) %>%
scale(center = apply(., 2, median, na.rm = TRUE),
scale = apply(., 2, mad, na.rm = TRUE) %>%
# no scaling when MAD == 0
( function(x) { x[x == 0] <- 1; x } )
)
if(any(tmp <- apply(tg, 2, mad, na.rm= T) == 0))
warning("The columns (", paste(which(tmp), collapse= ","),
") have constant values for all samples")
## robust PCA
pca <- rrcov::PcaHubert(tg, kmax = eval(kmax), alpha = alpha)
pca@cutoff.od <- if(grepl("o", coord)) {
rrcov:::.crit.od(pca@od, crit= (1- cutoff.od.P))
} else Inf #2013-05-13
pca@cutoff.sd <- if(grepl("s", coord)) {
sqrt(qchisq(cutoff.sd.P, pca@k, lower.tail = FALSE))
} else Inf #2013-05-13
## find outliers
isOut.od <- pca@od > pca@cutoff.od
isOut.sd <- pca@sd > pca@cutoff.sd
# 2013-05-13 / 2013-07-25
isOut <- if(grepl("&", coord)) {
isOut.od & isOut.sd
} else {
isOut.od | isOut.sd
}
## * Plot it * -----------------------------------------------------------
if(plotit) {
## Box plot - show distribution of signals of each sample
boxplot(t(tg),
col = ifelse(isOut, 2, 0),
outcol = ifelse(isOut, 2, par()$fg),
main = "Intensity distribution",
cex = 0.5,
cex.main = 1,
col.main = "gray",
xlab = "",
ylab = "Scaled intensity",
cex.lab = 1,
xaxt = "n"
)
## X-axis label
title(xlab= "Sample", mgp= c(2, 0, 0), col.lab= "gray")
axis(side= 1,
at = 1:nrow(tg),
labels = rownames(tg),
las = 3,
cex.axis = 0.3,
lwd.ticks = 0.5,
mgp = c(3, 0.7, 0)
)
# thicker at every 5 ticks
axis(side= 1, at= seq(5, nrow(tg), 5), labels= FALSE)
abline(h=0, col= "gray", lty= "dotted")
if(nrow(tg) > 10) # vertical gray lines per 10 samples
abline(v=seq(10, nrow(tg), 10), col= "gray", lty= "dotted")
# >> diagonistic plot << #
selectMethod("plot", c("Pca", "missing"))(
x= pca,
id.n.sd = sum(isOut.sd) + 1,
id.n.od = sum(isOut.od) + 1,
main = "Outlier map",
xlim = c(0, max(pca@sd, if (grepl("s", coord)) pca@cutoff.sd) * 1.1),
ylim = c(0, max(pca@od, if (grepl("o", coord)) pca@cutoff.od)),
cex.main = 1,
col.main = "gray",
cex.lab = 1,
off = 0.03,
cex = 0.8,
col = ifelse(isOut, 2, 1),
pch = ifelse(isOut, 19, 1)
)
title(main= main, outer= T)
}
# squeeze the omitted row into the 'isOut'
if(!is.null(i_omit)) {
for(j in i_omit) isOut <- append(isOut, FALSE, after= (j - 1))
# return to original row names in the rows of NAs (2012-10-15)
names(isOut)[i_omit] <- names(i_omit)
}
# >> replace all values with NA of the outlier samples << #
X[isOut, ] <- NA
attr(X, "is_out") <- isOut
return(X)
}
)
# -----------------------------------------------------------------------------#
#' @param by_s Robust PCA per sample set divided by this. If it is a character,
#' then the column named as it in \code{@sinfo} wil be used for
#' stratification. When a factor vector is given, then it is used as it is in
#' dividing into groups. If it is NULL as the default, there will be no
#' stratification.
#'
#' @rdname trim_outlier
#' @export
# -----------------------------------------------------------------------------#
# created : 2012-04-23 by Mun-Gwan
# modified :
# 2017-11-09 by Mun-Gwan : add a function to handle matrix
# -----------------------------------------------------------------------------#
setMethod(
"trim_outlier",
signature(X = "BAf"),
function(X,
...,
by_s = NULL,
alpha = 0.9,
cutoff.od.P = 0.025,
cutoff.sd.P = 0.025,
coord = c("o&s", "o", "s", "os"),
plotit = FALSE,
kmax= 10) {
apply_per_group(
baf= X,
by_s = by_s,
by_b = batch_colname(X, "binder"), # per binder batch
passBAf = T,
FUN= function(TG) {
main <- if(is.null(by_s)) {
batch(TG, "binder")[1]
} else if(is.character(by_s) && length(by_s) == 1) {
paste(batch(TG, "binder")[1], "-", by_s, attr(TG, "by_names")$by_s)
} else {
paste(batch(TG, "binder")[1], "-", attr(TG, "by_names")$by_s)
}
isOut <- trim_outlier(
X= sX(TG),
alpha = alpha,
cutoff.od.P = cutoff.od.P,
cutoff.sd.P = cutoff.sd.P,
coord = coord,
plotit = plotit,
main = main,
kmax = kmax
) %>%
attr("is_out")
# >> replace all values with NA of the outlier samples << #
sX(TG)[isOut, ] <- NA
sA(TG, "sample")$outlier <- data.frame(isOut) %>%
`names<-`(batch(TG, "binder")[1])
return(TG)
}
)
}
)
|
25427864d34c65008ce38c6869fbc1f96143d53e
|
c36783569c701ad176286bb1c57a7ff2559e3c56
|
/In_class/Sept18_inclass.R
|
e839b28535e860db2c112ee4b1807f15702cdb42
|
[] |
no_license
|
AndrewUpdegrove/DataAnalytics2020_Andrew_Updegrove
|
a5eb0816f27081d372c6dc5f80c87e7c46109658
|
43438aefc518db3a4d9b1d161d76597f9ce264c0
|
refs/heads/master
| 2023-01-31T20:29:44.564365
| 2020-12-14T15:38:24
| 2020-12-14T15:38:24
| 292,866,954
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 383
|
r
|
Sept18_inclass.R
|
multivariate <- read.csv(file.choose(), header=T)
attach(multivariate)
names(multivariate)
multivariate
#Scatterplots
plot(Income,Immigrant, main="Scatterplot")
plot(Immigrant,Homeowners)
#fitting Linear Models
mm = lm(Homeowners ~ Immigrant)
mm
plot(Immigrant, Homeowners)
abline(mm)
abline(mm, col="Green", lwd=3)
summary(mm)
attributes(mm)
mm$coefficients
|
f27536f64bd914df681a9f3f26379b72b884cf83
|
d86268c2fdd4195208c3fd5aecab31c324af7bca
|
/omd/man/fill_na.Rd
|
38d89bc6e2bff7bac23147d245605ad48b81c247
|
[] |
no_license
|
bio-datascience/omd
|
0e2edc61e86c135383b5d4bf29c14c95af026f5f
|
5f2f532dfe077388f7911cc7999622c4b6a3f8b8
|
refs/heads/master
| 2023-08-28T21:44:27.488641
| 2021-11-02T15:25:02
| 2021-11-02T15:25:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 329
|
rd
|
fill_na.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{fill_na}
\alias{fill_na}
\title{Fill in NA from surrounding values.}
\usage{
fill_na(mat)
}
\arguments{
\item{mat}{A matrix with possibly missing values}
}
\value{
The same object.
}
\description{
Fill in NA from surrounding values.
}
|
fe19085df818edb7ad42519d58cd6257178b6deb
|
8912afe2f91c01683ec4230630dfc87595f3cf4c
|
/Happiness Challenge/Happiness.R
|
fc7905266da25bb21ee14c25f7c00d00c8c10856
|
[] |
no_license
|
dsouzarc/data101
|
70c757ce6e88408d68fab7773711bb0c3b2a3c02
|
9e0b759f1989a34328d785ddef696236c3ca3604
|
refs/heads/master
| 2021-01-10T12:17:36.765944
| 2016-02-27T20:37:37
| 2016-02-27T20:37:37
| 50,802,547
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,542
|
r
|
Happiness.R
|
happiness <- read.csv('Happiness Challenge/Happiness.csv')
#Country happiness
countryHappiness <- tapply(happiness$HAPPINESS, happiness$COUNTRY, mean)
sortedHappiness <- sort(countryHappiness)
happinessLength <- length(sortedHappiness)
#Various default variable
yLimit <- c(0, 10)
yLabel = "Average Happiness"
imageDirectory = "Happiness Challenge/Images/"
#Plot Unhappy countries
barplot(sortedHappiness[1:5], col=rainbow(5), ylim=yLimit, ylab=yLabel, main="Unhappy countries")
#Plot Happy Countries
barplot(sortedHappiness[(happinessLength - 4): happinessLength], ylab=yLabel, ylim=yLimit, col=rainbow(5), main="Happy Countries")
#Gender vs Happiness
genderHappiness <- tapply(happiness$HAPPINESS, happiness$GENDER, mean)
barplot(genderHappiness, col=rainbow(2), ylim=yLimit, ylab=yLabel, main="Gender Happiness")
#Age vs Happiness
ageHappiness <- tapply(happiness$HAPPINESS, happiness$AGE, mean)
barplot(ageHappiness, col=rainbow(length(ageHappiness)), ylim=yLimit, ylab=yLabel, main="Happiness At Ages")
#Gender vs. Happiness at different age ranges
youthGender <- subset(happiness, happiness$AGE >= 18 & happiness$AGE < 30)
middleGender <- subset(happiness, happiness$AGE >= 31 & happiness$AGE < 50)
oldGender <- subset(happiness, happiness$AGE >= 51 & happiness$AGE < 75)
reallyOldGender <- subset(happiness, happiness$AGE >= 76)
youthGender.mean <- tapply(youthGender$HAPPINESS, youthGender$GENDER, mean)
middleGender.mean <- tapply(middleGender$HAPPINESS, middleGender$GENDER, mean)
oldGender.mean <- tapply(oldGender$HAPPINESS, oldGender$GENDER, mean)
reallyOldGender.mean <- tapply(reallyOldGender$HAPPINESS, reallyOldGender$GENDER, mean)
means <- c(youthGender.mean, middleGender.mean, oldGender.mean, reallyOldGender.mean)
barplot(means, main="Happiness vs. Gender At Ages", ylab=yLabel, ylim=yLimit,
col=rainbow(length(means)), names.arg=c("18-30 M", "18-30 F", "31 - 50M", "31-50F", "51-75M", "51-75F", "76+ M", "76+F"))
genderMeans <- list(youthGender$HAPPINESS, middleGender$HAPPINESS, oldGender$HAPPINESS, reallyOldGender$HAPPINESS)
#For each of the 3 happiest countries
for (i in (length - 3): length) {
#Plot a graph of the happiness with age groups
plotHappinessVSAgeIntervals(countryIndex = i)
plotHappinessVSAgeIntervals(countryIndex = i, factorGender=FALSE, showBoxPlot=FALSE)
#Get the correlating unhappiest country and plot a graph of their happiness with age groups
unhappyIndex = length - i + 1
plotHappinessVSAgeIntervals(countryIndex=unhappyIndex)
plotHappinessVSAgeIntervals(countryIndex=unhappyIndex, factorGender=FALSE, showBoxPlot=FALSE)
}
#Plots a graph of a country's happiness with age groups and gender
plotHappinessVSAgeIntervals <- function(countryIndex, factorGender=TRUE, showBoxPlot=TRUE) {
#Country name
country <- sortedHappiness[countryIndex]
countryName <- names(country)
#Default variables/text
ranking <- length - countryIndex + 1
ranking <- paste(countryName, "(Happiest Country: #", ranking, ")")
boxPlotXLabels <- c("18-30", "31-50", "51-75", "76+")
#Get the age groups
youthGender <- subset(happiness, happiness$AGE >= 18 & happiness$AGE < 30 & happiness$COUNTRY == countryName)
middleGender <- subset(happiness, happiness$AGE >= 31 & happiness$AGE < 50 & happiness$COUNTRY == countryName)
oldGender <- subset(happiness, happiness$AGE >= 51 & happiness$AGE < 75 & happiness$COUNTRY == countryName)
reallyOldGender <- subset(happiness, happiness$AGE >= 76 & happiness$COUNTRY == countryName)
#If we should factor gender
if (factorGender) {
xTitles <- c("18-30 F", "18-30 M", "31-50 F", "31-50 M", "51-75 F", "51-75 M", "76+ F", "76+ M")
plotName <- paste("Happiness vs Gender at Different Ages in", ranking)
#Calculate the averages for each gender
youthGender.mean <- tapply(youthGender$HAPPINESS, youthGender$GENDER, mean)
middleGender.mean <- tapply(middleGender$HAPPINESS, middleGender$GENDER, mean)
oldGender.mean <- tapply(oldGender$HAPPINESS, oldGender$GENDER, mean)
reallyOldGender.mean <- tapply(reallyOldGender$HAPPINESS, reallyOldGender$GENDER, mean)
}
#If we should not factor gender
else {
xTitles <- boxPlotXLabels
plotName <- paste("Happiness at Different Ages in", ranking)
#These averages are simple
youthGender.mean <- mean(youthGender$HAPPINESS)
middleGender.mean <- mean(middleGender$HAPPINESS)
oldGender.mean <- mean(oldGender$HAPPINESS)
reallyOldGender.mean <- mean(reallyOldGender$HAPPINESS)
}
#Data storing
genderMeans <- c(youthGender.mean, middleGender.mean, oldGender.mean, reallyOldGender.mean)
genderMeansRounded <- lapply(genderMeans, round, 2)
generalDataList <- list(youthGender$HAPPINESS, middleGender$HAPPINESS, oldGender$HAPPINESS, reallyOldGender$HAPPINESS)
#Plot the graph and add labels
genderMeansPlot <- barplot(genderMeans, main=plotName, ylab=yLabel, ylim=yLimit, col=rainbow(length(genderMeans)), names.arg=xTitles)
text(x=genderMeansPlot, y=genderMeans, label=genderMeansRounded, pos=3, cex = 1.2, col="red")
#Show a box plot of the data
if(showBoxPlot) {
boxPlotTitle <- paste("Ranges of Happiness at Different Ages in", ranking)
boxplot(generalDataList, main=boxPlotTitle, ylab=yLabel, col=rainbow(length(generalDataList)), names=boxPlotXLabels)
}
}
#Saves the plot
saveGraph <- function(imageName) {
fullName <- paste(imageDirectory, imageName, ".png", sep='')
dev.copy(png, fullName, width = 4, height = 4, units = 'in', res = 300)
}
|
c704c4fd537b3f68eafe52d4ded6cbef1a427021
|
cef9e0516578ddc6fc2f1184b8f4850408887c0d
|
/R/coxphSeries.R
|
691035bdb2e6d3940666887195e5eef58b01565a
|
[] |
no_license
|
tagteam/Publish
|
62cad3b48930743d19b5246ce2f8aeb7343ebe98
|
b027ce49f882ee051118247997f0a931cedcd46e
|
refs/heads/master
| 2023-01-28T04:10:25.237750
| 2023-01-17T14:21:03
| 2023-01-17T14:21:03
| 32,985,684
| 17
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,318
|
r
|
coxphSeries.R
|
##' Run a series of Cox regression analyses for a list of predictor variables
##' and summarize the results in a table.
##' The Cox models can be adjusted for a fixed set of covariates
##'
##' This function runs on \code{coxph} from the survival package.
##' @title Run a series of Cox regression models
##' @param formula The fixed part of the regression formula. For
##' univariate analyses this is simply \code{Surv(time,status)~1}
##' where \code{Surv(time,status)} is the outcome variable. When the
##' aim is to control the effect of \code{vars} in each element of the
##' series by a fixed set of variables it is
##' \code{Surv(time,status)~x1+x2} where again Surv(time,status) is
##' the outcome and x1 and x2 are confounders.
##' @param data A \code{data.frame} in which the \code{formula} gets
##' evaluated.
##' @param vars A list of variable names, the changing part of the
##' regression formula.
##' @param ... passed to publish.coxph
##' @return matrix with results
##' @author Thomas Alexander Gerds
##' @examples
##' library(survival)
##' data(pbc)
##' ## collect hazard ratios from three univariate Cox regression analyses
##' pbc$edema <- factor(pbc$edema,levels=c("0","0.5","1"),labels=c("0","0.5","1"))
##' uni.hr <- coxphSeries(Surv(time,status==2)~1,vars=c("edema","bili","protime"),data=pbc)
##' uni.hr
##'
##' ## control the logistic regression analyses for age and gender
##' ## but collect only information on the variables in `vars'.
##' controlled.hr <- coxphSeries(Surv(time,status==2)~age+sex,vars=c("edema","bili","protime"),data=pbc)
##' controlled.hr
##'
##' @export
coxphSeries <- function(formula,data,vars,...){
## ref <- glm(formula,data=data,...)
Missing=NULL
data.table::setDT(data)
data <- data[,c(all.vars(formula),vars),with=FALSE]
clist <- lapply(vars,function(v){
form.v <- update.formula(formula,paste(".~.+",v))
if (is.logical(data[[v]]))
data[[v]] <- factor(data[[v]],levels=c("FALSE","TRUE"))
cf <- survival::coxph(form.v,data=data,...)
cf$call$data <- data
cf$model <- data
nv <- length(cf$xlevels[[v]])
rtab <- regressionTable(cf)
rtab[[v]]
})
out <- data.table::rbindlist(clist)
if (all(out$Missing%in%c("","0")))
out[,Missing:=NULL]
out[]
}
|
81e0f1a9f8fb0351ddb109e4e33703e74243e359
|
4c65edc1964eaf7cf648e9ec8300166d46a5eb92
|
/Stop/app.R
|
796a6c45391fc8cfa27bd99e421901d0891a48b2
|
[] |
no_license
|
josephYen0829/dataffffff
|
66e8de506a48c70e2e51c5de7cc727276b844c73
|
851ea8e4488deaf0a2cc2824bacc878b997131f0
|
refs/heads/master
| 2020-03-23T04:54:03.466934
| 2019-09-27T05:50:17
| 2019-09-27T05:50:17
| 141,111,863
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 103,183
|
r
|
app.R
|
# setwd("~/PIITSStop Project/Shiny Dashboard/Data")
############################################################
# Load libraries and depended scripts
############################################################
dependencies <- c("shinydashboard", "leaflet", "DT", "shiny", "readxl", "plotly",
"markdown", "shinyjs", "pROC", "caret", "shinyFiles", "stringr",
"dplyr", "shinyBS", "tidyr", "lubridate", "e1071")
for (d in dependencies) {
if (!require(d, character.only = TRUE)) {
install.packages(d)
require(d, character.only = TRUE)
}
}
source("set_variables.R")
source("data_processing.R")
source("model_engine.R")
source("train_and_predict.R")
source("heatmap_data_processing.R")
source("parameters.R")
############################################################
# Load raw data, preprocess and do feature engineering
############################################################
# Load branch details
branch_map <- read_excel(path = branch_map_file, sheet = "branch_table")
# if the master data frame file exits, read it in. Else, load the raw data
# train the model, make predictions, create the master data frame and save it
# as .csv file
branch_all <- NULL
Non_Financial_Breakdown <- NULL
if (file.exists(branch_all_file)) {
branch_all <- read.csv(branch_all_file, header=TRUE, sep=",")
branch_all$audit_date = as.Date(branch_all$audit_date)
Non_Financial_Breakdown <- branch_all[, c('Branch_Code', 'Year', 'Quarter', 'Acct_Open_Count_Q',
'Acct_Closure_Count_Q', 'Cheque_Issuance_Count_Q',
'Cheque_Payment_Cancel_Count_Q', 'Cheque_Outward_Clearing_Count_Q')]
#branch_all <- branch_all[branch_all$audit_qtr!="2016-Q4",]
#Non_Financial_Breakdown <- Non_Financial_Breakdown[!(Non_Financial_Breakdown$Year=="2016" & Non_Financial_Breakdown$Quarter==4),]
} else {
# Read in raw data, preprocess and perform feature engineering
processed_data <- load_and_process_data(file1_path, file2_path, file3_path, file4_path,
file5_path, file6_path, file7_path, branch_map)
branch_all <- processed_data$all_data
Non_Financial_Breakdown <- processed_data$non_financial
# Drop 2013 data
branch_all <- branch_all[branch_all$Year != "2013",]
branch_all <- branch_all[order(branch_all$Branch_Code, branch_all$Year, branch_all$Quarter),]
# Train the model and make predictions
branch_all <- train_and_predict(branch_all, features_selected_rds_file, branch_map, detection_seed, prediction_seed)
# Save the master data frame as csv file
write.csv(branch_all, branch_all_file, row.names = FALSE)
}
# Get values as menu items
branch_name <- c("ALL",as.character(sort(unique(branch_all$Branch_Name))))
branch_code <- c("ALL",sort(as.numeric(unique(branch_all$Branch_Code))))
audit_date <- as.Date(sort(unique(branch_all$audit_date),decreasing = TRUE))
audit_qtr <- as.character(sort(unique(branch_all$audit_qtr),decreasing = TRUE))
current_date <- max(audit_date)
current_qtr <- max(audit_qtr)
############################################################
# ui.R
############################################################
# header
header <- dashboardHeader(title = 'DBS Taiwan Branches',
tags$li(class = "dropdown",
tags$a(href="http://www.dbs.com.tw/personal-zh/default.page", target="_blank",
tags$img(height = "30px", alt="DBS Logo", src='https://upload.wikimedia.org/wikipedia/en/thumb/1/18/DBS_Bank_Logo.svg/1280px-DBS_Bank_Logo.svg.png'),
style = "padding-top:10px; padding-bottom:10px;")
)
)
# Sidebar
sidebar <- dashboardSidebar(
sidebarMenu(id = "tabs",
menuItem(
"Detection Model",
tabName = "detection",
icon = icon("bolt")
),
menuItem(
"Prediction Model",
tabName = "prediction",
icon = icon("bolt")
),
menuItem(
"Heatmaps",
tabName = "charts",
icon = icon("fire")
),
menuItem(
"Data Upload",
tabName = "retrain",
icon = icon("upload")
)
),
box(title = tagList(shiny::icon("wrench"), strong("Settings")), width=NULL, background = "navy",
solidHeader = TRUE, id = "settings",collapsible = TRUE,
disabled(sliderInput("cutoff", label = h6(strong("Select the Cutoff Threshold")),
min = 0, max = 1,
value = c(lower_cutoff,upper_cutoff), step = 0.05)), #cutoff sliderinput
selectizeInput("selectize1", label = h6(strong("Select by Branch Name")),
choices = branch_name,
selected = "ALL", multiple = FALSE,
options = list(placeholder = 'Select a Branch Name')), #1st input
selectizeInput("selectize2", label = h6(strong("Select by Branch Code")),
choices = branch_code,
selected = "ALL", multiple = FALSE,
options = list(placeholder = 'Select a Branch Code')), #2nd input
selectInput("quarter2", label = h6(strong("Select the Quarter")),
choices = audit_qtr[1:(length(audit_qtr)-4)],
selected = current_qtr,
multiple = FALSE), #quarter drop-down select
selectizeInput("risky_only", label = h6(strong("Risk Rating")),
choices = c("ALL", "High Risk", "Medium Risk", "Low Risk"),
selected = "ALL", multiple = FALSE), #filter display ROR branches
actionButton(inputId = "reset", label = "Default")
) # end of box
)
# body
body <- dashboardBody(
useShinyjs(),
tags$head(tags$style(HTML('
# /* logo */
# .skin-red .content-wrapper, .right-side {
# background-color: #212121;
# }
#
# .nav-tabs-custom>.tab-content {
# background: #263238;
# }
.nav-tabs-custom>.nav-tabs {
border-bottom-color: #d50000;
}
.nav-tabs {
border-bottom: 5px solid #ddd;
}
section.sidebar .shiny-input-container {
padding: 0px 5px 0px 0px;
}
.form-group, .selectize-control {
margin-bottom: 0px;
}
.form-group, .select-control {
margin-bottom: 0px;
}
.form-group, .slider-control {
margin-bottom: 0px;
}
.col-sm-6 {
padding-right: 8px;
padding-left: 8px;
}
.dataTables_filter input {
width: 150px
}
.shiny-output-error-validation {
color: green;
}
'
))),
tabItems(
tabItem(tabName = "detection",
fluidRow(
# Dynamic infoBox
valueBoxOutput("High_Risk_Branches"),
valueBoxOutput("Medium_Risk_Branches"),
valueBoxOutput("Low_Risk_Branches")
), # end of first fluid row
fluidRow(
tabBox(title = "", width="100%", id = "dbsmap", height = 600,
#tabsetPanel(
# 1st tab panel
tabPanel(title=tagList(shiny::icon("globe"), "Map View"),
leafletOutput("dbs_tw_map", height=400) # box for TW map
), # end of first tab panel
# 2nd tab panel
tabPanel(title=tagList(shiny::icon("line-chart"), "Trend"),
helpText("Select any Branch at the Settings box. This chart shows the historical trend of the selected Branch with 1 Medium or above Audit Issues"),
plotlyOutput("plot_ROR")
), # end of second tab panel
# 3rd tab panel
tabPanel(title=tagList(shiny::icon("th"), "Important Features"),
helpText("Select any Branch at the Settings box. This chart ranks the important features by branch.
Features with significant changes across whole period are listed on top."),
plotlyOutput("plot_branch_heatmap", height = 600)
), # end of second tab panel
# 4th tab panel
tabPanel(title=tagList(shiny::icon("line-chart"), "Model Performance"),
helpText("Select any Quarter at the Settings box. The model performance applies to all Branches and all Risk Ratings in the selected quarter;
filtering by Branch or Risk Rating has no effect."),
plotlyOutput("roc_curve", height = 500),
h4(strong("Confusion Matrix")),
helpText("Select the cutoff threshold in the Settings Box.
Each column of the matrix represents the instances in an actual class
while each row represents the instances in a predicted class."),
DT::dataTableOutput("confusion_matrix", width="80%")
) # end of second tab panel
) #end of tabBox
) # end of second fluid row
), # end of tabitem - detection model
tabItem(tabName = "prediction",
fluidRow(
# Dynamic infoBox
valueBoxOutput("High_Risk_Branches_Pred"),
valueBoxOutput("Medium_Risk_Branches_Pred"),
valueBoxOutput("Low_Risk_Branches_Pred")
), # end of first fluid row
fluidRow(
tabBox(title = "", width="100%", id = "dbsmappred", height = 600,
#tabsetPanel(
# 1st tab panel
tabPanel(title=tagList(shiny::icon("globe"), "Map View"),
leafletOutput("dbs_tw_map_pred", height=400) # box for TW map
), # end of first tab panel
# 2nd tab panel
tabPanel(title=tagList(shiny::icon("line-chart"), "Trend"),
helpText("Select any Branch at the Settings box. This chart shows the historical trend of the selected Branch with 1 Medium or above Audit Issues"),
plotlyOutput("plot_ROR_pred")
), # end of second tab panel
# 3rd tab panel
tabPanel(title=tagList(shiny::icon("th"), "Important Features"),
helpText("Select any Branch at the Settings box. This chart ranks the important features by branch.
Features with significant changes across whole period are listed on top."),
plotlyOutput("plot_branch_heatmap_pred", height = 600)
), # end of second tab panel
# 4th tab panel
tabPanel(title=tagList(shiny::icon("line-chart"), "Model Performance"),
helpText("Select any Quarter at the Settings box, except for the current quarter
which has yet to have target prediction.
The model performance applies to all Branches and all Risk Ratings in the selected quarter;
filtering by Branch or Risk Rating has no effect."),
plotlyOutput("roc_curve_pred", height = 500),
h4(strong("Confusion Matrix")),
helpText("Select the cutoff threshold in the Settings Box.
Each column of the matrix represents the instances in an actual class
while each row represents the instances in a predicted class."),
DT::dataTableOutput("confusion_matrix_pred", width="80%")
) # end of second tab panel
) #end of tabBox
) # end of second fluid row
), # end of tabitem - detection model
tabItem(tabName = "charts",
fluidRow(
column(width = 6,
box(title = tagList(strong("Account Opening Count by Quarter")), width=12,
solidHeader = TRUE, status = "warning",id = "heatmap1",collapsible = TRUE,
tabBox(title = "", width="100%", id = "dbsmap",
# 1st tab panel
tabPanel(title=tagList("Heat Map"),
plotlyOutput("acct_open_heatmap", height = 500)
), # end of first tab panel
tabPanel(title=tagList("Data"),
DT::dataTableOutput("acct_open_datatable")
) # end of second tab panel
) # end of tabBox
),
box(title = tagList(strong("Deposit Transaction Count by Quarter")), width=12,
solidHeader = TRUE, status = "warning",id = "heatmap3",collapsible = TRUE,
tabBox(title = "", width="100%", id = "dbsmap",
# 1st tab panel
tabPanel(title=tagList("Heat Map"),
plotlyOutput("tran_dt_heatmap", height = 500)
), # end of first tab panel
tabPanel(title=tagList("Data"),
DT::dataTableOutput("tran_dt_datatable")
) # end of second tab panel
) # end of tabBox
),
box(title = tagList(strong("Withdrawal Transaction Count by Quarter")), width=12,
solidHeader = TRUE, status = "warning",id = "heatmap5",collapsible = TRUE,
tabBox(title = "", width="100%", id = "dbsmap",
# 1st tab panel
tabPanel(title=tagList("Heat Map"),
plotlyOutput("tran_wt_heatmap", height = 500)
), # end of first tab panel
tabPanel(title=tagList("Data"),
DT::dataTableOutput("tran_wt_datatable")
) # end of second tab panel
) # end of tabBox
),
box(title = tagList(strong("Remittance Transaction Count by Quarter")), width=12,
solidHeader = TRUE, status = "warning",id = "heatmap7",collapsible = TRUE,
tabBox(title = "", width="100%", id = "dbsmap",
# 1st tab panel
tabPanel(title=tagList("Heat Map"),
plotlyOutput("tran_rt_heatmap", height = 500)
), # end of first tab panel
tabPanel(title=tagList("Data"),
DT::dataTableOutput("tran_rt_datatable")
) # end of second tab panel
) # end of tabBox
),
box(title = tagList(strong("Total Financial Transaction Count by Quarter")), width=12,
solidHeader = TRUE, status = "warning",id = "heatmap9",collapsible = TRUE,
tabBox(title = "", width="100%", id = "dbsmap",
# 1st tab panel
tabPanel(title=tagList("Heat Map"),
plotlyOutput("tran_total_ft_heatmap", height = 500)
), # end of first tab panel
tabPanel(title=tagList("Data"),
DT::dataTableOutput("tran_total_ft_datatable")
) # end of second tab panel
) # end of tabBox
),
box(title = tagList(strong("Total Non-Financial Transaction Count by Quarter")), width=12,
solidHeader = TRUE, status = "warning",id = "heatmap11",collapsible = TRUE,
tabBox(title = "", width="100%", id = "dbsmap",
# 1st tab panel
tabPanel(title=tagList("Heat Map"),
plotlyOutput("nf_tran_total_heatmap", height = 500)
), # end of first tab panel
tabPanel(title=tagList("Data"),
DT::dataTableOutput("nf_tran_total_datatable")
) # end of second tab panel
) # end of tabBox
)
), # end of column 1
column(width = 6,
box(title = tagList(strong("Account Closure Count by Quarter")), width=12,
solidHeader = TRUE, status = "warning",id = "heatmap2",collapsible = TRUE,
tabBox(title = "", width="100%", id = "dbsmap",
# 1st tab panel
tabPanel(title=tagList("Heat Map"),
plotlyOutput("acct_closure_heatmap", height = 500)
), # end of first tab panel
tabPanel(title=tagList("Data"),
DT::dataTableOutput("acct_closure_datatable")
) # end of second tab panel
) # end of tabBox
),
box(title = tagList(strong("Deposit Transaction Amount by Quarter")), width=12,
solidHeader = TRUE, status = "warning",id = "heatmap4",collapsible = TRUE,
tabBox(title = "", width="100%", id = "dbsmap",
# 1st tab panel
tabPanel(title=tagList("Heat Map"),
plotlyOutput("tran_da_heatmap", height = 500)
), # end of first tab panel
tabPanel(title=tagList("Data"),
DT::dataTableOutput("tran_da_datatable")
) # end of second tab panel
) # end of tabBox
),
box(title = tagList(strong("Withdrawal Transaction Amount by Quarter")), width=12,
solidHeader = TRUE, status = "warning",id = "heatmap6",collapsible = TRUE,
tabBox(title = "", width="100%", id = "dbsmap",
# 1st tab panel
tabPanel(title=tagList("Heat Map"),
plotlyOutput("tran_wa_heatmap", height = 500)
), # end of first tab panel
tabPanel(title=tagList("Data"),
DT::dataTableOutput("tran_wa_datatable")
) # end of second tab panel
) # end of tabBox
),
box(title = tagList(strong("Remittance Transaction Amount by Quarter")), width=12,
solidHeader = TRUE, status = "warning",id = "heatmap8",collapsible = TRUE,
tabBox(title = "", width="100%", id = "dbsmap",
# 1st tab panel
tabPanel(title=tagList("Heat Map"),
plotlyOutput("tran_ra_heatmap", height = 500)
), # end of first tab panel
tabPanel(title=tagList("Data"),
DT::dataTableOutput("tran_ra_datatable")
) # end of second tab panel
) # end of tabBox
),
box(title = tagList(strong("Total Financial Transaction Amount by Quarter")), width=12,
solidHeader = TRUE, status = "warning",id = "heatmap10",collapsible = TRUE,
tabBox(title = "", width="100%", id = "dbsmap",
# 1st tab panel
tabPanel(title=tagList("Heat Map"),
plotlyOutput("tran_total_fa_heatmap", height = 500)
), # end of first tab panel
tabPanel(title=tagList("Data"),
DT::dataTableOutput("tran_total_fa_datatable")
) # end of second tab panel
) # end of tabBox
),
box(title = tagList(strong("Changes in Head Count (Absolute) by Quarter")), width=12,
solidHeader = TRUE, status = "warning",id = "heatmap12",collapsible = TRUE,
tabBox(title = "", width="100%", id = "dbsmap",
# 1st tab panel
tabPanel(title=tagList("Heat Map"),
plotlyOutput("hr_abs_changes_heatmap", height = 500)
), # end of first tab panel
tabPanel(title=tagList("Data"),
DT::dataTableOutput("hr_abs_changes_datatable")
) # end of second tab panel
) # end of tabBox
)
) # end of column 2
) # end of fluidrow
), # end of 2nd tabitem
tabItem(tabName = "retrain",
fluidRow(
box(title = tagList(strong("Step 1: New Datasets Upload")), width=6,
solidHeader = TRUE, status = "primary",id = "upload",collapsible = TRUE,
helpText("Upload all the required latest datasets using the standard templates. The uploaded data must be cumulative,
inclusive of historical data as well."),
h5(strong("File 1: Branch Financial Transactions")),
fileInput('file1', 'Choose xlsx file',
accept = c(".xlsx")
),
span(textOutput("file1_check"), style="color:red"),
h5(strong("File 2: Non Financial Transactions")),
fileInput('file2', 'Choose xlsx file',
accept = c(".xlsx")
),
span(textOutput("file2_check"), style="color:red"),
h5(strong("File 3: BHC Data")),
fileInput('file3', 'Choose xlsx file',
accept = c(".xlsx")
),
span(textOutput("file3_check"), style="color:red"),
h5(strong("File 4: Customer Complaints Data")),
fileInput('file4', 'Choose xlsx file',
accept = c(".xlsx")
),
span(textOutput("file4_check"), style="color:red"),
h5(strong("File 5: HR Data")),
fileInput('file5', 'Choose xlsx file',
accept = c(".xlsx")
),
span(textOutput("file5_check"), style="color:red"),
h5(strong("File 6: ROR Data")),
fileInput('file6', 'Choose xlsx file',
accept = c(".xlsx")
),
span(textOutput("file6_check"), style="color:red"),
h5(strong("File 7: Non Financial Transactions Breakdown")),
fileInput('file7', 'Choose xlsx file',
accept = c(".xlsx")
),
span(textOutput("file7_check"), style="color:red")
),
box(title = tagList(strong("Step 2: Process Data and Retrain Model")), width=6,
solidHeader = TRUE, status = "warning",id = "upload",collapsible = TRUE,
helpText("Press `Confirm` to process the data and train the model"),
div(style = "display:inline-block",actionButton("preprocess_confirm","Confirm", class='btn-warning',
style = "color: white"),style="display:center-align"),
textOutput("upload")
)
) # end of fluidrow
) # end of 4th tabitem
) # end of tabitems
) # end of body
ui <- dashboardPage(
skin = "red",
header,
dashboardSidebar(sidebar),
body
)
############################################################
# server.R
############################################################
############################################################
# Get data for heapmap display
############################################################
reactive_branch_all <- reactiveValues(df = branch_all)
Non_Financial_Breakdown_v3 <- Non_Financial_Breakdown_heatmap_data_preprocess(Non_Financial_Breakdown)
taiwan_branch_heatmap_data <- get_heapmap_data(branch_all, Non_Financial_Breakdown_v3)
acct_open_heatmap_data <- get_acct_open_heatmap_data(taiwan_branch_heatmap_data, branch_map)
acct_closure_heatmap_data <- get_acct_closure_heatmap_data(taiwan_branch_heatmap_data, branch_map)
tran_dt_heatmap_data <- get_tran_dt_heatmap_data(taiwan_branch_heatmap_data, branch_map)
tran_da_heatmap_data <- get_tran_da_heatmap_data(taiwan_branch_heatmap_data, branch_map)
tran_wt_heatmap_data <- get_tran_wt_heatmap_data(taiwan_branch_heatmap_data, branch_map)
tran_wa_heatmap_data <- get_tran_wa_heatmap_data(taiwan_branch_heatmap_data, branch_map)
tran_rt_heatmap_data <- get_tran_rt_heatmap_data(taiwan_branch_heatmap_data, branch_map)
tran_ra_heatmap_data <- get_tran_ra_heatmap_data(taiwan_branch_heatmap_data, branch_map)
tran_total_ft_heatmap_data <- get_tran_total_ft_heatmap_data(taiwan_branch_heatmap_data, branch_map)
tran_total_fa_heatmap_data <- get_tran_total_fa_heatmap_data(taiwan_branch_heatmap_data, branch_map)
nf_tran_total_heatmap_data <- get_nf_tran_total_heatmap_data(taiwan_branch_heatmap_data, branch_map)
hr_abs_changes_heatmap_data <- get_hr_abs_changes_heatmap_data(taiwan_branch_heatmap_data, branch_map)
# Define reactive values
reactive_taiwan_branch_heatmap_data <- reactiveValues(df = taiwan_branch_heatmap_data)
reactive_acct_open_heatmap_data <<- reactiveValues(data = acct_open_heatmap_data)
reactive_acct_closure_heatmap_data <- reactiveValues(data = acct_closure_heatmap_data)
reactive_tran_dt_heatmap_data <- reactiveValues(data = tran_dt_heatmap_data)
reactive_tran_da_heatmap_data <- reactiveValues(data = tran_da_heatmap_data)
reactive_tran_wt_heatmap_data <- reactiveValues(data = tran_wt_heatmap_data)
reactive_tran_rt_heatmap_data <- reactiveValues(data = tran_rt_heatmap_data)
reactive_tran_ra_heatmap_data <- reactiveValues(data = tran_ra_heatmap_data)
reactive_tran_total_ft_heatmap_data <- reactiveValues(data = tran_total_ft_heatmap_data)
reactive_tran_total_fa_heatmap_data <- reactiveValues(data = tran_total_fa_heatmap_data)
reactive_nf_tran_total_heatmap_data <- reactiveValues(data = nf_tran_total_heatmap_data)
reactive_hr_abs_changes_heatmap_data <- reactiveValues(data = hr_abs_changes_heatmap_data)
acct_open_table_data <- reactiveValues(data = get_acct_open_table_data(branch_all, branch_map))
acct_closure_table_data <- reactiveValues(data = get_acct_closure_table_data(branch_all, branch_map))
tran_dt_table_data <- reactiveValues(data = get_tran_dt_table_data(branch_all, branch_map))
tran_da_table_data <- reactiveValues(data = get_tran_da_table_data(branch_all, branch_map))
tran_wt_table_data <- reactiveValues(data = get_tran_wt_table_data(branch_all, branch_map))
tran_wa_table_data <- reactiveValues(data = get_tran_wa_table_data(branch_all, branch_map))
tran_rt_table_data <- reactiveValues(data = get_tran_rt_table_data(branch_all, branch_map))
tran_ra_table_data <- reactiveValues(data = get_tran_ra_table_data(branch_all, branch_map))
tran_total_ft_table_data <- reactiveValues(data = get_tran_total_ft_table_data(branch_all, branch_map))
tran_total_fa_table_data <- reactiveValues(data = get_tran_total_fa_table_data(branch_all, branch_map))
nf_tran_total_table_data <- reactiveValues(data = get_nf_tran_total_table_data(branch_all, branch_map))
hr_abs_changes_table_data <- reactiveValues(data = get_hr_abs_changes_table_data(branch_all, branch_map))
server <- function(input, output, session){
model_cutoff_medium <<- reactive({as.numeric(input$cutoff[1])})
model_cutoff_high <<- reactive({as.numeric(input$cutoff[2])})
############################################################
# Sidebar
############################################################
# current quarter view
observe({
updateSelectInput(session,input='quarter2',choices = audit_qtr[1:(length(audit_qtr)-4)],
selected = current_qtr)
})
observeEvent(input$tabs, {
updateSelectizeInput(session, input='risky_only', selected = "ALL")
updateSelectizeInput(session, input='selectize1', choices = branch_name, selected = "ALL")
updateSelectizeInput(session, input='selectize2', choices = branch_code, selected = "ALL")
})
## After selecting a branch name, update the branch code, vice versa
observeEvent(input$selectize1, updateSelectizeInput(session,input='selectize2',selected=branch_map$Branch_Code[match(input$selectize1, branch_map$Branch_Name)]))
observeEvent(input$selectize2, updateSelectizeInput(session,input='selectize1',selected=branch_map$Branch_Name[match(input$selectize2, branch_map$Branch_Code)]))
## update selectinput based on the risky only display and quarter
observeEvent(input$risky_only,{
if(input$risky_only == "ALL") {
updateSelectizeInput(session, input='selectize1', choices = branch_name, selected = "ALL")
updateSelectizeInput(session, input='selectize2', choices = branch_code, selected = "ALL")}
if(input$risky_only == "High Risk") {
if (input$tabs == "detection") {
updateSelectizeInput(session, input='selectize1',choices=c("ALL",as.character(sort(branch_all$Branch_Name[branch_all$model_pred>=model_cutoff_high() & branch_all$audit_qtr==input$quarter2])),selected = NULL))
updateSelectizeInput(session, input='selectize2', choices=c("ALL",unique(sort(as.integer(branch_all$Branch_Code[branch_all$model_pred>=model_cutoff_high() & branch_all$audit_qtr==input$quarter2]))),selected = NULL))
} else {
updateSelectizeInput(session, input='selectize1',choices=c("ALL",as.character(sort(branch_all$Branch_Name[branch_all$predict_prob>=model_cutoff_high() & branch_all$audit_qtr==input$quarter2])),selected = NULL))
updateSelectizeInput(session, input='selectize2', choices=c("ALL",unique(sort(as.integer(branch_all$Branch_Code[branch_all$predict_prob>=model_cutoff_high() & branch_all$audit_qtr==input$quarter2]))),selected = NULL))
}
}
if(input$risky_only == "Medium Risk") {
if (input$tabs == "detection") {
updateSelectizeInput(session, input='selectize1',choices=c("ALL",as.character(sort(branch_all$Branch_Name[branch_all$model_pred>=model_cutoff_medium() & branch_all$model_pred<model_cutoff_high() & branch_all$audit_qtr==input$quarter2])),selected = NULL))
updateSelectizeInput(session, input='selectize2', choices=c("ALL",unique(sort(as.integer(branch_all$Branch_Code[branch_all$model_pred>=model_cutoff_medium() & branch_all$model_pred<model_cutoff_high() & branch_all$audit_qtr==input$quarter2]))),selected = NULL))
} else {
updateSelectizeInput(session, input='selectize1',choices=c("ALL",as.character(sort(branch_all$Branch_Name[branch_all$predict_prob>=model_cutoff_medium() & branch_all$predict_prob<model_cutoff_high() & branch_all$audit_qtr==input$quarter2])),selected = NULL))
updateSelectizeInput(session, input='selectize2', choices=c("ALL",unique(sort(as.integer(branch_all$Branch_Code[branch_all$predict_prob>=model_cutoff_medium() & branch_all$predict_prob<model_cutoff_high() & branch_all$audit_qtr==input$quarter2]))),selected = NULL))
}
}
if(input$risky_only == "Low Risk") {
if (input$tabs == "detection") {
updateSelectizeInput(session, input='selectize1',choices=c("ALL",as.character(sort(branch_all$Branch_Name[branch_all$model_pred<model_cutoff_medium() &branch_all$audit_qtr==input$quarter2])),selected = NULL))
updateSelectizeInput(session, input='selectize2',choices=c("ALL",unique(sort(as.integer(branch_all$Branch_Code[branch_all$model_pred<model_cutoff_medium() & branch_all$audit_qtr==input$quarter2]))),selected = NULL))
} else {
updateSelectizeInput(session, input='selectize1',choices=c("ALL",as.character(sort(branch_all$Branch_Name[branch_all$predict_prob<model_cutoff_medium() &branch_all$audit_qtr==input$quarter2])),selected = NULL))
updateSelectizeInput(session, input='selectize2',choices=c("ALL",unique(sort(as.integer(branch_all$Branch_Code[branch_all$predict_prob<model_cutoff_medium() & branch_all$audit_qtr==input$quarter2]))),selected = NULL))
}
}
})
observeEvent(input$quarter2,{
if(input$risky_only == "ALL") {
updateSelectizeInput(session, input='selectize1', choices = branch_name, selected = input$selectize1)
updateSelectizeInput(session, input='selectize2', choices = branch_code, selected = input$selectize2)
}
if(input$risky_only == "High Risk") {
if (input$tabs == "detection") {
updateSelectizeInput(session, input='selectize1',choices=c("ALL",as.character(sort(branch_all$Branch_Name[branch_all$model_pred>=model_cutoff_high() & branch_all$audit_qtr==input$quarter2])),selected = NULL))
updateSelectizeInput(session, input='selectize2', choices=c("ALL",sort(as.integer(branch_all$Branch_Code[branch_all$model_pred>=model_cutoff_high() & branch_all$audit_qtr==input$quarter2])),selected = NULL))
} else {
updateSelectizeInput(session, input='selectize1',choices=c("ALL",as.character(sort(branch_all$Branch_Name[branch_all$predict_prob>=model_cutoff_high() & branch_all$audit_qtr==input$quarter2])),selected = NULL))
updateSelectizeInput(session, input='selectize2', choices=c("ALL",sort(as.integer(branch_all$Branch_Code[branch_all$predict_prob>=model_cutoff_high() & branch_all$audit_qtr==input$quarter2])),selected = NULL))
}
}
if(input$risky_only == "Medium Risk") {
if (input$tabs == "detection") {
updateSelectizeInput(session, input='selectize1',choices=c("ALL",as.character(sort(branch_all$Branch_Name[branch_all$model_pred>=model_cutoff_medium() & branch_all$model_pred<model_cutoff_high() & branch_all$audit_qtr==input$quarter2])),selected = NULL))
updateSelectizeInput(session, input='selectize2', choices=c("ALL",sort(as.integer(branch_all$Branch_Code[branch_all$model_pred>=model_cutoff_medium() & branch_all$model_pred<model_cutoff_high() & branch_all$audit_qtr==input$quarter2])),selected = NULL))
} else {
updateSelectizeInput(session, input='selectize1',choices=c("ALL",as.character(sort(branch_all$Branch_Name[branch_all$predict_prob>=model_cutoff_medium() & branch_all$predict_prob<model_cutoff_high() & branch_all$audit_qtr==input$quarter2])),selected = NULL))
updateSelectizeInput(session, input='selectize2', choices=c("ALL",sort(as.integer(branch_all$Branch_Code[branch_all$predict_prob>=model_cutoff_medium() & branch_all$predict_prob<model_cutoff_high() & branch_all$audit_qtr==input$quarter2])),selected = NULL))
}
}
if(input$risky_only == "Low Risk") {
if (input$tabs == "detection") {
updateSelectizeInput(session, input='selectize1',choices=c("ALL",as.character(sort(branch_all$Branch_Name[branch_all$model_pred<model_cutoff_medium() & branch_all$audit_qtr==input$quarter2])),selected = NULL))
updateSelectizeInput(session, input='selectize2',choices=c("ALL",sort(as.integer(branch_all$Branch_Code[branch_all$model_pred<model_cutoff_medium() & branch_all$audit_qtr==input$quarter2])),selected = NULL))
} else {
updateSelectizeInput(session, input='selectize1',choices=c("ALL",as.character(sort(branch_all$Branch_Name[branch_all$predict_prob<model_cutoff_medium() & branch_all$audit_qtr==input$quarter2])),selected = NULL))
updateSelectizeInput(session, input='selectize2',choices=c("ALL",sort(as.integer(branch_all$Branch_Code[branch_all$predict_prob<model_cutoff_medium() & branch_all$audit_qtr==input$quarter2])),selected = NULL))
}
}
})
## default button - reset to default
observeEvent(input$reset, {
if(is.null(input$reset) == FALSE){
updateSelectizeInput(session,input='selectize1',selected = "ALL")
updateSelectizeInput(session,input='selectize2',selected = "ALL")
updateSelectizeInput(session,input='risky_only',selected = "ALL")
updateSelectInput(session,input='quarter2',choices = audit_qtr[1:(length(audit_qtr)-4)],
selected = current_qtr)
}
})
############################################################
# Info Boxes
############################################################
output$Low_Risk_Branches <- renderValueBox({
valueBox(length(unique(branch_all$Branch_Code[branch_all$audit_qtr==input$quarter2 & branch_all$model_pred<model_cutoff_medium()])),
"Low Risk Branches", icon = icon("check-circle"),color="olive")
})
output$Medium_Risk_Branches <- renderValueBox({
valueBox(length(unique(branch_all$Branch_Code[branch_all$audit_qtr==input$quarter2 & branch_all$model_pred>=model_cutoff_medium() & branch_all$model_pred<model_cutoff_high()])),
"Medium Risk Branches", icon = icon("bell"), color = "orange")
})
output$High_Risk_Branches <- renderValueBox({
valueBox(length(unique(branch_all$Branch_Code[branch_all$audit_qtr==input$quarter2 & branch_all$model_pred>=model_cutoff_high()])),
"High Risk Branches", icon = icon("exclamation-triangle"), color = "red")
})
output$Low_Risk_Branches_Pred <- renderValueBox({
valueBox(length(unique(branch_all$Branch_Code[branch_all$audit_qtr==input$quarter2 & branch_all$predict_prob<model_cutoff_medium()])),
"Low Risk Branches", icon = icon("check-circle"),color="olive")
})
output$Medium_Risk_Branches_Pred <- renderValueBox({
valueBox(length(unique(branch_all$Branch_Code[branch_all$audit_qtr==input$quarter2 & branch_all$predict_prob>=model_cutoff_medium() & branch_all$predict_prob<model_cutoff_high()])),
"Medium Risk Branches", icon = icon("bell"), color = "orange")
})
output$High_Risk_Branches_Pred <- renderValueBox({
valueBox(length(unique(branch_all$Branch_Code[branch_all$audit_qtr==input$quarter2 & branch_all$predict_prob>=model_cutoff_high()])),
"High Risk Branches", icon = icon("exclamation-triangle"), color = "red")
})
############################################################
# Map
############################################################
# select branches according to the user selection - detection model
filtered_Branch_Data <- reactive({
df <- reactive_branch_all$df %>%
dplyr::filter(audit_qtr == input$quarter2)
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
if (input$risky_only == "ALL") {df} else if (input$risky_only == "High Risk"){
df <- df %>% dplyr::filter(model_pred >= model_cutoff_high())}
else if(input$risky_only == "Medium Risk"){
df <- df %>% dplyr::filter(model_pred >= model_cutoff_medium() & model_pred < model_cutoff_high())}
else {
df <- df %>% dplyr::filter(model_pred < model_cutoff_medium())
}
return(df)
})
# select branches according to the user selection - prediction model
filtered_Branch_Data_Pred <- reactive({
df <- reactive_branch_all$df %>%
dplyr::filter(audit_qtr == input$quarter2)
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
if (input$risky_only == "ALL") {df} else if (input$risky_only == "High Risk"){
df <- df %>% dplyr::filter(predict_prob >= model_cutoff_high())}
else if(input$risky_only == "Medium Risk"){
df <- df %>% dplyr::filter(predict_prob >= model_cutoff_medium() & predict_prob < model_cutoff_high())}
else {
df <- df %>% dplyr::filter(predict_prob < model_cutoff_medium())
}
return(df)
})
# load detection model map
output$dbs_tw_map<-renderLeaflet({
leaflet() %>%
#addTiles() %>%
addProviderTiles(providers$OpenStreetMap.BlackAndWhite) %>%
setView(lat = 23.6978, lng = 120.9605, zoom = 7)
})
# draw markers on the detection model map
observe({
if(nrow(filtered_Branch_Data())==0) { leafletProxy("dbs_tw_map") %>% clearShapes() %>% clearMarkers()}
else {
leafletProxy("dbs_tw_map", data = filtered_Branch_Data()) %>%
clearShapes() %>%
clearMarkers() %>%
fitBounds(
~min(lon), ~min(lat), ~max(lon), ~max(lat)
) %>%
addCircleMarkers(~lon,
~lat,
fillOpacity=0.75, stroke = FALSE,
radius = ~ifelse(filtered_Branch_Data()$model_pred>=model_cutoff_high(),20,
ifelse(filtered_Branch_Data()$model_pred<model_cutoff_medium(),6,15)),
color = ifelse(filtered_Branch_Data()$model_pred>=model_cutoff_high(),"red",
ifelse(filtered_Branch_Data()$model_pred<model_cutoff_medium(),"green","#ffab00")),
label=filtered_Branch_Data()$Branch_Name,
popup= paste(sep = "<br/>",
h5(strong(filtered_Branch_Data()$Branch_Name)),
filtered_Branch_Data()$Address,
filtered_Branch_Data()$audit_qtr,
paste0("Detected as Risky Branch: ",
ifelse(filtered_Branch_Data()$model_pred>=model_cutoff_medium(), "Yes", "No")),
paste0("Actual Audit Issue: ",
filtered_Branch_Data()$scn1_1M)))
}
}) # end of map render
# load prediction model map
output$dbs_tw_map_pred<-renderLeaflet({
leaflet(filtered_Branch_Data_Pred()) %>%
addProviderTiles(providers$OpenStreetMap.BlackAndWhite) %>%
setView(lat = 23.6978, lng = 120.9605, zoom = 7) %>%
addCircleMarkers(~lon,
~lat,
fillOpacity=0.75, stroke = FALSE,
radius = ~ifelse(filtered_Branch_Data_Pred()$predict_prob>=model_cutoff_high(),20,
ifelse(filtered_Branch_Data_Pred()$predict_prob<model_cutoff_medium(),6,15)),
color = ifelse(filtered_Branch_Data_Pred()$predict_prob>=model_cutoff_high(),"red",
ifelse(filtered_Branch_Data_Pred()$predict_prob<model_cutoff_medium(),"green","#ffab00")),
label=filtered_Branch_Data_Pred()$Branch_Name,
popup= paste(sep = "<br/>",
h5(strong(filtered_Branch_Data_Pred()$Branch_Name)),
filtered_Branch_Data_Pred()$Address,
filtered_Branch_Data_Pred()$audit_qtr
))
})
# draw markers on prediction model map
observe({
if(nrow(filtered_Branch_Data_Pred())==0) { leafletProxy("dbs_tw_map_pred") %>% clearShapes() %>% clearMarkers()}
else {
leafletProxy("dbs_tw_map_pred", data = filtered_Branch_Data_Pred()) %>%
clearShapes() %>%
clearMarkers() %>%
fitBounds(
~min(lon), ~min(lat), ~max(lon), ~max(lat)
) %>%
addCircleMarkers(~lon,
~lat,
fillOpacity=0.75, stroke = FALSE,
radius = ~ifelse(filtered_Branch_Data_Pred()$predict_prob>=model_cutoff_high(),20,
ifelse(filtered_Branch_Data_Pred()$predict_prob<model_cutoff_medium(),6,15)),
color = ifelse(filtered_Branch_Data_Pred()$predict_prob>=model_cutoff_high(),"red",
ifelse(filtered_Branch_Data_Pred()$predict_prob<model_cutoff_medium(),"green","#ffab00")),
label=filtered_Branch_Data_Pred()$Branch_Name,
popup= paste(sep = "<br/>",
h5(strong(filtered_Branch_Data_Pred()$Branch_Name)),
filtered_Branch_Data_Pred()$Address,
filtered_Branch_Data_Pred()$audit_qtr
))
}
}) # end of map render
# detection model: marker click action -> display branch details
observeEvent(input$dbs_tw_map_marker_click,{
click <- input$dbs_tw_map_marker_click
if (is.null(click)) return()
updateSelectizeInput(session, input='selectize1', selected = branch_map$Branch_Name[branch_map$lat == click$lat & branch_map$lon == click$lng])
updateSelectizeInput(session, input='selectize2', selected = branch_map$Branch_Code[branch_map$lat == click$lat & branch_map$lon == click$lng])
})
# prediction model: marker click action -> display branch details
observeEvent(input$dbs_tw_map_pred_marker_click,{
click <- input$dbs_tw_map_pred_marker_click
if (is.null(click)) return()
updateSelectizeInput(session, input='selectize1', selected = branch_map$Branch_Name[branch_map$lat == click$lat & branch_map$lon == click$lng])
updateSelectizeInput(session, input='selectize2', selected = branch_map$Branch_Code[branch_map$lat == click$lat & branch_map$lon == click$lng])
})
############################################################
# ROR Plot
############################################################
# dataset for ROR plot
filtered_Branch_ROR_plot <- reactive({
if (input$selectize2 == "ALL") {df <- branch_all} else{
df <- branch_all %>% dplyr::filter(Branch_Code == input$selectize2)
}
df$model_detection_outcome <- ifelse(df$model_pred >= model_cutoff_medium(), "Yes", "No")
df$model_prediction_outcome <- ifelse(df$predict_prob >= model_cutoff_medium(), "Yes", "No")
return(df)
})
output$plot_ROR <- renderPlotly({
if(length(unique(filtered_Branch_ROR_plot()$Branch_Code))!=1) {plotly_empty()} else {
plot_data = filtered_Branch_ROR_plot()
# first four quarter is training, so shouldn't have prediction
plot_data[1:4,]$model_detection_outcome = NA
plot_ly(plot_data, x = ~audit_qtr, y = ~scn1_1M,
type = 'scatter', mode = 'lines+markers', line = list(width = 4),
name = "Actual Audit Issue", marker = list(size = 12)) %>%
add_trace(name = "Model Prediction", y = ~model_detection_outcome, x = ~audit_qtr,
type = "scatter", mode = "lines+markers",
line = list(color = "#ff3d00", width = 4),
marker = list(size = 12)) %>%
layout(xaxis = list(title="Quarter", rangeslider = list(type = "date")),
yaxis = list(title="Risky Branch"),
title = "Audit Issue Tracking",
titlefont = list(family="Helvetica")
)}
})
output$plot_ROR_pred <- renderPlotly({
# ensure only 1 branch is selected
if(length(unique(filtered_Branch_ROR_plot()$Branch_Code))!=1) {plotly_empty()} else {
plot_data <- filtered_Branch_ROR_plot()
# first four quarter is training, so shouldn't have prediction
plot_data[1:4,]$model_prediction_outcome = NA
# prediction outcome need to shift down one row, for a new quarter
plot_data[nrow(plot_data)+1,] = NA
plot_data$model_prediction_outcome_shift = c(NA, plot_data$model_prediction_outcome[1:nrow(plot_data)-1])
timestamp = plot_data[nrow(plot_data)-1,]$audit_qtr
year = as.numeric(substr(timestamp, 1, 4))
qtr = as.numeric(substr(timestamp, 7, 7))
if (qtr < 4)
qtr = qtr + 1
else {
qtr = 1
year = year + 1
}
timestamp = paste0(year, "-Q", qtr)
levels(plot_data$audit_qtr) = c(levels(plot_data$audit_qtr), timestamp) #audit_qtr is a factor
plot_data[nrow(plot_data),]$audit_qtr = timestamp
# start plotting
# actual audit issue
plot_ly(plot_data, x = ~audit_qtr, y = ~scn1_1M,
type = 'scatter', mode = 'lines+markers', line = list(width = 4),
name = "Actual Audit Issue", marker = list(size = 12)) %>%
# predicted audit issue
add_trace(name = "Model Prediction", y = ~model_prediction_outcome_shift, x = ~audit_qtr,
type = "scatter", mode = "lines+markers",
line = list(color = "#ff3d00", width = 4),
marker = list(size = 12)) %>%
layout(xaxis = list(title="Quarter", rangeslider = list(type = "date")),
yaxis = list(title="Risky Branch"),
title = "Audit Issue Tracking",
titlefont = list(family="Helvetica")
)}
})
############################################################
# Important Features
############################################################
# dataset for Individual Branch plot, select 12 features for their changes over the quarter
filtered_Branch_heatmap_plot <- reactive({
if (input$selectize2 == "ALL") {df <- reactive_taiwan_branch_heatmap_data$df} else{
df <- reactive_taiwan_branch_heatmap_data$df %>% dplyr::filter(Branch_Code == input$selectize2)
index_row <- as.vector(t(df[df$Branch_Code==input$selectize2 & df$audit_qtr==max(as.vector(df$audit_qtr)),-c(1:2)]))
col_order <- order(index_row,na.last = FALSE) + 2
df <- df[,c(1,2,col_order)]
}
return(df)
})
## plot important features heatmap
plot_import_features <- reactive(
plot_ly(data = filtered_Branch_heatmap_plot(),
z = t(data.matrix(filtered_Branch_heatmap_plot()[,-c(1:2)])),
colors = "Reds",
y = colnames(filtered_Branch_heatmap_plot()[,-c(1:2)]),
x = ~sort(audit_qtr,decreasing = FALSE),
type = "heatmap") %>%
layout(xaxis = list(title="Quarter",
rangeslider = list(type = "date")),
title = "Important Features Tracking by Branch",
font = "Arial",
margin = list(l = 200, r = 50, b = 50, t = 50, pad = 4))
)
## plotting all important features by branch for detection model
output$plot_branch_heatmap <- renderPlotly({
if(length(unique(filtered_Branch_ROR_plot()$Branch_Code))!=1) {plotly_empty()} else {
plot_import_features()
}
})
## plotting all important features by branch for prediction model
output$plot_branch_heatmap_pred <- renderPlotly({
if(length(unique(filtered_Branch_ROR_plot()$Branch_Code))!=1) {plotly_empty()} else {
plot_import_features()
}
})
############################################################
# ROC Plot & Confusion Matrix
############################################################
## Plotting ROC curve
filtered_roc_curve <- reactive({
roc_filter_df <- branch_all %>% dplyr::filter(audit_qtr == input$quarter2) %>%
select(scn1_1M, model_pred)
roc_evaluation <- pROC::roc(roc_filter_df$scn1_1M, roc_filter_df$model_pred)
roc_df_plot <- data.frame(sensitivity = roc_evaluation$sensitivities,
specificity = 1- roc_evaluation$specificities)
return(roc_df_plot)
})
filtered_auc_selected <- reactive({
roc_filter_df <- branch_all %>% dplyr::filter(audit_qtr == input$quarter2) %>%
select(scn1_1M, model_pred)
auc_selected <- pROC::auc(roc_filter_df$scn1_1M, roc_filter_df$model_pred)
return(auc_selected)
})
output$roc_curve <- renderPlotly({
plot_ly(filtered_roc_curve(), y = ~sensitivity, x = ~specificity) %>%
add_trace(name = "Model", y = ~sensitivity, x = ~specificity, type = "scatter", mode = "lines",
line = list(shape = "spline", color = "#d50000", width = 7),
fill = "tozeroy", fillcolor = "#102027", opacity = 0.5) %>%
add_segments(x = 0, y = 0, xend = 1, yend = 1,
line = list(dash = "7px", color = "#ffa000", width = 4),
name = "Random") %>%
add_annotations(x = 0.7, y = 0.2, showarrow = F,
text = paste0("Area Under Curve (AUC): ", round(filtered_auc_selected(),2)),
font = list(family = "serif", size = 18, color = "#E8E2E2")) %>%
layout(xaxis = list(range = c(0,1), zeroline = T, showgrid = F,
title = "<b>1 - Specificity (FPR)</b>"),
yaxis = list(range = c(0,1), zeroline = T, showgrid = F,
domain = c(0, 0.9),
title = "<b>Sensitivity (TPR)</b>"),
title = paste0("<b>Receiver Operator Curve (ROC) - ",input$quarter2, "</b>"),
titlefont = list(family="Helvetica",
size = 18
),
margin = list(l = 80, r = 80, b = 80, t = 80, pad = 1),
plot_bgcolor = "#fafafa")
})
## output confusion matrix
filtered_confusion_matrix_data <- reactive({
roc_filter_df <- branch_all %>% dplyr::filter(audit_qtr == input$quarter2) %>%
select(scn1_1M, model_pred)
return(roc_filter_df)
})
output$confusion_matrix <- DT::renderDataTable({
DT::datatable(
data = data.frame(
"predicted.row_vs_actual.col" = c("No", "Yes"),
as.matrix(caret::confusionMatrix(ifelse(filtered_confusion_matrix_data()$model_pred >= model_cutoff_medium(), "Yes", "No"),
filtered_confusion_matrix_data()$scn1_1M))
)
, options = list(paging = FALSE,
searching = FALSE)
, rownames= FALSE
)
})
# ROC plot for pediction model
filtered_roc_curve_pred <- reactive({
roc_filter_df <- branch_all %>% dplyr::filter(audit_qtr == input$quarter2) %>%
select(predict_target, predict_prob)
roc_evaluation <- pROC::roc(roc_filter_df$predict_target, roc_filter_df$predict_prob)
roc_df_plot <- data.frame(sensitivity = roc_evaluation$sensitivities,
specificity = 1- roc_evaluation$specificities)
return(roc_df_plot)
})
filtered_auc_selected_pred <- reactive({
roc_filter_df <- branch_all %>% dplyr::filter(audit_qtr == input$quarter2) %>%
select(predict_target, predict_prob)
auc_selected <- pROC::auc(roc_filter_df$predict_target, roc_filter_df$predict_prob)
return(auc_selected)
})
output$roc_curve_pred <- renderPlotly({
# ensure current qtr not selected
if(input$quarter2 == current_qtr) { plotly_empty() } else {
plot_ly(filtered_roc_curve_pred(), y = ~sensitivity, x = ~specificity) %>%
add_trace(name = "Model", y = ~sensitivity, x = ~specificity, type = "scatter", mode = "lines",
line = list(shape = "spline", color = "#d50000", width = 7),
fill = "tozeroy", fillcolor = "#102027", opacity = 0.5) %>%
add_segments(x = 0, y = 0, xend = 1, yend = 1,
line = list(dash = "7px", color = "#ffa000", width = 4),
name = "Random") %>%
add_annotations(x = 0.7, y = 0.2, showarrow = F,
text = paste0("Area Under Curve (AUC): ", round(filtered_auc_selected_pred(),2)),
font = list(family = "serif", size = 18, color = "#E8E2E2")) %>%
layout(xaxis = list(range = c(0,1), zeroline = T, showgrid = F,
title = "<b>1 - Specificity (FPR)</b>"),
yaxis = list(range = c(0,1), zeroline = T, showgrid = F,
domain = c(0, 0.9),
title = "<b>Sensitivity (TPR)</b>"),
title = paste0("<b>Receiver Operator Curve (ROC) - ",input$quarter2, "</b>"),
titlefont = list(family="Helvetica",
size = 18
),
margin = list(l = 80, r = 80, b = 80, t = 80, pad = 1),
plot_bgcolor = "#fafafa")
}
})
## output confusion matrix for prediction model
filtered_confusion_matrix_data_pred <- reactive({
roc_filter_df <- branch_all %>% dplyr::filter(audit_qtr == input$quarter2) %>%
select(predict_target, predict_prob)
return(roc_filter_df)
})
output$confusion_matrix_pred <- DT::renderDataTable({
# ensure current qtr not selected
if(input$quarter2 == current_qtr) { DT::datatable(data.frame()) } else {
DT::datatable(
data = data.frame(
"predicted.row_vs_actual.col" = c("No", "Yes"),
as.matrix(caret::confusionMatrix(ifelse(filtered_confusion_matrix_data_pred()$predict_prob >= model_cutoff_medium(), "Yes", "No"),
filtered_confusion_matrix_data_pred()$predict_target))
)
, options = list(paging = FALSE,
searching = FALSE)
, rownames= FALSE
)
}
})
########################################################################
# Heatmaps for individual features
########################################################################
### 1. acct opening
filtered_acct_open_heatmap <- reactive({
last_col <- tail(colnames(reactive_acct_open_heatmap_data$data), 1)
df <- reactive_acct_open_heatmap_data$data[with(reactive_acct_open_heatmap_data$data,order(get(last_col),decreasing = FALSE)),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)
}
row.names(df) <- df$Branch_Name
return(df)
})
output$acct_open_heatmap <- renderPlotly({
plot_ly(data = filtered_acct_open_heatmap(),
z = data.matrix(filtered_acct_open_heatmap()[,-c(1:2)]),
x = ~sort(colnames(filtered_acct_open_heatmap()[,-c(1:2)]),decreasing = FALSE),
y = ~rownames(filtered_acct_open_heatmap()),
colors = "Reds",
type = "heatmap") %>%
layout(xaxis = list(title="Quarter"),
yaxis = list(title="Branch Name",
dtick = 1,
tickfont = list(size = 11)),
title = "Acct Opening Count",
font = list(family = "Helvetica"),
margin = list(l = 120, r = 50, b = 100, t = 50, pad = 4))
})
### 2. acct closure
filtered_acct_closure_heatmap <- reactive({
# print(reactive_acct_closure_heatmap_data$data)
last_col <- tail(colnames(reactive_acct_closure_heatmap_data$data),1)
df <- reactive_acct_closure_heatmap_data$data[with(reactive_acct_closure_heatmap_data$data,order(get(last_col),decreasing = FALSE)),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
row.names(df) <- df$Branch_Name
return(df)
})
output$acct_closure_heatmap <- renderPlotly({
# print(filtered_acct_closure_heatmap())
plot_ly(data = filtered_acct_closure_heatmap(),
z = data.matrix(filtered_acct_closure_heatmap()[,-c(1:2)]),
x = ~sort(colnames(filtered_acct_closure_heatmap()[,-c(1:2)]),decreasing = FALSE),
y = ~rownames(filtered_acct_closure_heatmap()),
colors = "Reds",
type = "heatmap") %>%
layout(xaxis = list(title="Quarter"),
yaxis = list(title="Branch Name",
dtick = 1,
tickfont = list(size = 11)),
title = "Acct Closure Count",
font = list(family = "Helvetica"),
margin = list(l = 120, r = 50, b = 100, t = 50, pad = 4))
})
### 3. Deposit Txn Count Heatmap
filtered_tran_dt_heatmap <- reactive({
last_col <- tail(colnames(reactive_tran_dt_heatmap_data$data),1)
df <- reactive_tran_dt_heatmap_data$data[with(reactive_tran_dt_heatmap_data$data,order(get(last_col), decreasing = FALSE)),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
row.names(df) <- df$Branch_Name
return(df)
})
output$tran_dt_heatmap <- renderPlotly({
plot_ly(data = filtered_tran_dt_heatmap(),
z = data.matrix(filtered_tran_dt_heatmap()[,-c(1:2)]),
x = ~sort(colnames(filtered_tran_dt_heatmap()[,-c(1:2)]),decreasing = FALSE),
y = ~rownames(filtered_tran_dt_heatmap()),
colors = "Reds",
type = "heatmap") %>%
layout(xaxis = list(title="Quarter"),
yaxis = list(title="Branch Name",
dtick = 1,
tickfont = list(size = 11)),
title = "Deposit Txn Count",
font = list(family = "Helvetica"),
margin = list(l = 120, r = 50, b = 100, t = 50, pad = 4))
})
### 4. Deposit Txn Amount Heatmap
filtered_tran_da_heatmap <- reactive({
last_col <- tail(colnames(reactive_tran_da_heatmap_data$data),1)
df <- reactive_tran_da_heatmap_data$data[with(reactive_tran_da_heatmap_data$data,order(get(last_col),decreasing = FALSE)),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
row.names(df) <- df$Branch_Name
return(df)
})
output$tran_da_heatmap <- renderPlotly({
plot_ly(data = filtered_tran_da_heatmap(),
z = data.matrix(filtered_tran_da_heatmap()[,-c(1:2)]),
x = ~sort(colnames(filtered_tran_da_heatmap()[,-c(1:2)]),decreasing = FALSE),
y = ~rownames(filtered_tran_da_heatmap()),
colors = "Reds",
type = "heatmap") %>%
layout(xaxis = list(title="Quarter"),
yaxis = list(title="Branch Name",
dtick = 1,
tickfont = list(size = 11)),
title = "Deposit Txn Amount",
font = list(family = "Helvetica"),
margin = list(l = 120, r = 50, b = 100, t = 50, pad = 4))
})
### 5. Withdrawal Txn Count Heatmap
filtered_tran_wt_heatmap <- reactive({
last_col <- tail(colnames(reactive_tran_wt_heatmap_data$data),1)
df <- reactive_tran_wt_heatmap_data$data[with(reactive_tran_wt_heatmap_data$data, order(get(last_col),decreasing = FALSE)),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
row.names(df) <- df$Branch_Name
return(df)
})
output$tran_wt_heatmap <- renderPlotly({
plot_ly(data = filtered_tran_wt_heatmap(),
z = data.matrix(filtered_tran_wt_heatmap()[,-c(1:2)]),
x = ~sort(colnames(filtered_tran_wt_heatmap()[,-c(1:2)]),decreasing = FALSE),
y = ~rownames(filtered_tran_wt_heatmap()),
colors = "Reds",
type = "heatmap") %>%
layout(xaxis = list(title="Quarter"),
yaxis = list(title="Branch Name",
dtick = 1,
tickfont = list(size = 11)),
title = "Withdrawal Txn Count",
font = list(family = "Helvetica"),
margin = list(l = 120, r = 50, b = 100, t = 50, pad = 4))
})
### 6. Withdrawal Txn Amount Heatmap
filtered_tran_wa_heatmap <- reactive({
last_col <- tail(colnames(reactive_tran_wt_heatmap_data$data),1)
df <- reactive_tran_wt_heatmap_data$data[with(reactive_tran_wt_heatmap_data$data, order(get(last_col),decreasing = FALSE)),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
row.names(df) <- df$Branch_Name
return(df)
})
output$tran_wa_heatmap <- renderPlotly({
plot_ly(data = filtered_tran_wa_heatmap(),
z = data.matrix(filtered_tran_wa_heatmap()[,-c(1:2)]),
x = ~sort(colnames(filtered_tran_wa_heatmap()[,-c(1:2)]),decreasing = FALSE),
y = ~rownames(filtered_tran_wa_heatmap()),
colors = "Reds",
type = "heatmap") %>%
layout(xaxis = list(title="Quarter"),
yaxis = list(title="Branch Name",
dtick = 1,
tickfont = list(size = 11)),
title = "Withdrawal Txn Amount",
font = list(family = "Helvetica"),
margin = list(l = 120, r = 50, b = 100, t = 50, pad = 4))
})
### 7. Remittance Txn Count Heatmap
filtered_tran_rt_heatmap <- reactive({
last_col <- tail(colnames(reactive_tran_rt_heatmap_data$data),1)
df <- reactive_tran_rt_heatmap_data$data[with(reactive_tran_rt_heatmap_data$data,order(get(last_col),decreasing = FALSE)),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
row.names(df) <- df$Branch_Name
return(df)
})
output$tran_rt_heatmap <- renderPlotly({
plot_ly(data = filtered_tran_rt_heatmap(),
z = data.matrix(filtered_tran_rt_heatmap()[,-c(1:2)]),
x = ~sort(colnames(filtered_tran_rt_heatmap()[,-c(1:2)]),decreasing = FALSE),
y = ~rownames(filtered_tran_rt_heatmap()),
colors = "Reds",
type = "heatmap") %>%
layout(xaxis = list(title="Quarter"),
yaxis = list(title="Branch Name",
dtick = 1,
tickfont = list(size = 11)),
title = "Remittance Txn Count",
font = list(family = "Helvetica"),
margin = list(l = 120, r = 50, b = 100, t = 50, pad = 4))
})
### 8. Remittance Txn Amount Heatmap
filtered_tran_ra_heatmap <- reactive({
last_col <- tail(colnames(reactive_tran_ra_heatmap_data$data),1)
df <- reactive_tran_ra_heatmap_data$data[with(reactive_tran_ra_heatmap_data$data,order(get(last_col),decreasing = FALSE)),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
row.names(df) <- df$Branch_Name
return(df)
})
output$tran_ra_heatmap <- renderPlotly({
plot_ly(data = filtered_tran_ra_heatmap(),
z = data.matrix(filtered_tran_ra_heatmap()[,-c(1:2)]),
x = ~sort(colnames(filtered_tran_ra_heatmap()[,-c(1:2)]),decreasing = FALSE),
y = ~rownames(filtered_tran_ra_heatmap()),
colors = "Reds",
type = "heatmap") %>%
layout(xaxis = list(title="Quarter"),
yaxis = list(title="Branch Name",
dtick = 1,
tickfont = list(size = 11)),
title = "Remittance Txn Amount",
font = list(family = "Helvetica"),
margin = list(l = 120, r = 50, b = 100, t = 50, pad = 4))
})
### 9. Total Financial Txn Count Heatmap
filtered_tran_total_ft_heatmap <- reactive({
last_col <- tail(colnames(reactive_tran_total_ft_heatmap_data$data),1)
df <- reactive_tran_total_ft_heatmap_data$data[with(reactive_tran_total_ft_heatmap_data$data,order(get(last_col),decreasing = FALSE)),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
row.names(df) <- df$Branch_Name
return(df)
})
output$tran_total_ft_heatmap <- renderPlotly({
plot_ly(data = filtered_tran_total_ft_heatmap(),
z = data.matrix(filtered_tran_total_ft_heatmap()[,-c(1:2)]),
x = ~sort(colnames(filtered_tran_total_ft_heatmap()[,-c(1:2)]),decreasing = FALSE),
y = ~rownames(filtered_tran_total_ft_heatmap()),
colors = "Reds",
type = "heatmap") %>%
layout(xaxis = list(title="Quarter"),
yaxis = list(title="Branch Name",
dtick = 1,
tickfont = list(size = 11)),
title = "Total Financial Txn Count",
font = list(family = "Helvetica"),
margin = list(l = 120, r = 50, b = 100, t = 50, pad = 4))
})
### 10. Total Financial Txn Amount Heatmap
filtered_tran_total_fa_heatmap <- reactive({
last_col <- tail(colnames(reactive_tran_total_fa_heatmap_data$data),1)
df <- reactive_tran_total_fa_heatmap_data$data[with(reactive_tran_total_fa_heatmap_data$data, order(get(last_col),decreasing = FALSE)),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
row.names(df) <- df$Branch_Name
return(df)
})
output$tran_total_fa_heatmap <- renderPlotly({
plot_ly(data = filtered_tran_total_fa_heatmap(),
z = data.matrix(filtered_tran_total_fa_heatmap()[,-c(1:2)]),
x = ~sort(colnames(filtered_tran_total_fa_heatmap()[,-c(1:2)]),decreasing = FALSE),
y = ~rownames(filtered_tran_total_fa_heatmap()),
colors = "Reds",
type = "heatmap") %>%
layout(xaxis = list(title="Quarter"),
yaxis = list(title="Branch Name",
dtick = 1,
tickfont = list(size = 11)),
title = "Total Financial Txn Amount",
font = list(family = "Helvetica"),
margin = list(l = 120, r = 50, b = 100, t = 50, pad = 4))
})
### 11. Total Non-Financial Txn Count Heatmap
filtered_nf_tran_total_heatmap <- reactive({
last_col <- tail(colnames(reactive_nf_tran_total_heatmap_data$data),1)
df <- reactive_nf_tran_total_heatmap_data$data[with(reactive_nf_tran_total_heatmap_data$data,order(get(last_col),decreasing = FALSE)),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
row.names(df) <- df$Branch_Name
return(df)
})
output$nf_tran_total_heatmap <- renderPlotly({
plot_ly(data = filtered_nf_tran_total_heatmap(),
z = data.matrix(filtered_nf_tran_total_heatmap()[,-c(1:2)]),
x = ~sort(colnames(filtered_nf_tran_total_heatmap()[,-c(1:2)]),decreasing = FALSE),
y = ~rownames(filtered_nf_tran_total_heatmap()),
colors = "Reds",
type = "heatmap") %>%
layout(xaxis = list(title="Quarter"),
yaxis = list(title="Branch Name",
dtick = 1,
tickfont = list(size = 11)),
title = "Total Non-Financial Txn Count",
font = list(family = "Helvetica"),
margin = list(l = 120, r = 50, b = 100, t = 50, pad = 4))
})
### 12. Remittance Txn Amount Heatmap
filtered_hr_abs_changes_heatmap <- reactive({
last_col <- tail(colnames(reactive_hr_abs_changes_heatmap_data$data),1)
df <- reactive_hr_abs_changes_heatmap_data$data[with(reactive_hr_abs_changes_heatmap_data$data,order(get(last_col),decreasing = FALSE, na.last = FALSE)),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
row.names(df) <- df$Branch_Name
return(df)
})
output$hr_abs_changes_heatmap <- renderPlotly({
plot_ly(data = filtered_hr_abs_changes_heatmap(),
z = data.matrix(filtered_hr_abs_changes_heatmap()[,-c(1:2)]),
x = ~sort(colnames(filtered_hr_abs_changes_heatmap()[,-c(1:2)]),decreasing = FALSE),
y = ~rownames(filtered_hr_abs_changes_heatmap()),
colors = "Reds",
type = "heatmap") %>%
layout(xaxis = list(title="Quarter"),
yaxis = list(title="Branch Name",
dtick = 1,
tickfont = list(size = 11)),
title = "HR Abs Changes",
font = list(family = "Helvetica"),
margin = list(l = 120, r = 50, b = 100, t = 50, pad = 4))
})
########################################################################
# Data Table
########################################################################
## 1. data table - account open count
filtered_acct_open_datatable <- reactive({
df <- acct_open_table_data$data[order(acct_open_table_data$data$Branch_Name, decreasing = FALSE),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
return(df)
})
output$acct_open_datatable <- DT::renderDataTable({
DT::datatable(
data = filtered_acct_open_datatable(),
extensions = c('FixedColumns','Buttons'),
rownames= FALSE,
options = list(
lengthMenu = list(c(10, 20, -1), c('10', '20', 'All')),
pageLength = 10,
scrollX=TRUE,
fixedColumns = list(leftColumns = 2),
dom = 'Blfrtip',
buttons = list(
list(extend='collection',
buttons = c('csv','excel'),
text = 'Download')
))
)
})
## 2. data table - account closure count
filtered_acct_closure_datatable <- reactive({
df <- acct_closure_table_data$data[order(acct_closure_table_data$data$Branch_Name, decreasing = FALSE),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
return(df)
})
output$acct_closure_datatable <- DT::renderDataTable({
DT::datatable(
data = filtered_acct_closure_datatable(),
extensions = c('FixedColumns','Buttons'),
rownames= FALSE,
options = list(
lengthMenu = list(c(10, 20, -1), c('10', '20', 'All')),
pageLength = 10,
scrollX=TRUE,
fixedColumns = list(leftColumns = 2),
dom = 'Blfrtip',
buttons = list(
list(extend='collection',
buttons = c('csv','excel'),
text = 'Download')
))
)
})
## 3. data table - deposit txn count
filtered_tran_dt_datatable <- reactive({
df <- tran_dt_table_data$data[order(tran_dt_table_data$data$Branch_Name, decreasing = FALSE),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
return(df)
})
output$tran_dt_datatable <- DT::renderDataTable({
DT::datatable(
data = filtered_tran_dt_datatable(),
extensions = c('FixedColumns','Buttons'),
rownames= FALSE,
options = list(
lengthMenu = list(c(10, 20, -1), c('10', '20', 'All')),
pageLength = 10,
scrollX=TRUE,
fixedColumns = list(leftColumns = 2),
dom = 'Blfrtip',
buttons = list(
list(extend='collection',
buttons = c('csv','excel'),
text = 'Download')
))
)
})
## 4. data table - deposit txn amount
filtered_tran_da_datatable <- reactive({
df <- tran_da_table_data$data[order(tran_da_table_data$data$Branch_Name, decreasing = FALSE),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
return(df)
})
output$tran_da_datatable <- DT::renderDataTable({
DT::datatable(
data = filtered_tran_da_datatable(),
extensions = c('FixedColumns','Buttons'),
rownames= FALSE,
options = list(
lengthMenu = list(c(10, 20, -1), c('10', '20', 'All')),
pageLength = 10,
scrollX=TRUE,
fixedColumns = list(leftColumns = 2),
dom = 'Blfrtip',
buttons = list(
list(extend='collection',
buttons = c('csv','excel'),
text = 'Download')
))
)
})
## 5. data table - withdrawal txn count
filtered_tran_wt_datatable <- reactive({
df <- tran_wt_table_data$data[order(tran_wt_table_data$data$Branch_Name, decreasing = FALSE),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
return(df)
})
output$tran_wt_datatable <- DT::renderDataTable({
DT::datatable(
data = filtered_tran_wt_datatable(),
extensions = c('FixedColumns','Buttons'),
rownames= FALSE,
options = list(
lengthMenu = list(c(10, 20, -1), c('10', '20', 'All')),
pageLength = 10,
scrollX=TRUE,
fixedColumns = list(leftColumns = 2),
dom = 'Blfrtip',
buttons = list(
list(extend='collection',
buttons = c('csv','excel'),
text = 'Download')
))
)
})
## 6. data table - withdrawal txn amount
filtered_tran_wa_datatable <- reactive({
df <- tran_wa_table_data$data[order(tran_wa_table_data$data$Branch_Name, decreasing = FALSE),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
return(df)
})
output$tran_wa_datatable <- DT::renderDataTable({
DT::datatable(
data = filtered_tran_wa_datatable(),
extensions = c('FixedColumns','Buttons'),
rownames= FALSE,
options = list(
lengthMenu = list(c(10, 20, -1), c('10', '20', 'All')),
pageLength = 10,
scrollX=TRUE,
fixedColumns = list(leftColumns = 2),
dom = 'Blfrtip',
buttons = list(
list(extend='collection',
buttons = c('csv','excel'),
text = 'Download')
))
)
})
## 7. data table - remittance txn count
filtered_tran_rt_datatable <- reactive({
df <- tran_rt_table_data$data[order(tran_rt_table_data$data$Branch_Name, decreasing = FALSE),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
return(df)
})
output$tran_rt_datatable <- DT::renderDataTable({
DT::datatable(
data = filtered_tran_rt_datatable(),
extensions = c('FixedColumns','Buttons'),
rownames= FALSE,
options = list(
lengthMenu = list(c(10, 20, -1), c('10', '20', 'All')),
pageLength = 10,
scrollX=TRUE,
fixedColumns = list(leftColumns = 2),
dom = 'Blfrtip',
buttons = list(
list(extend='collection',
buttons = c('csv','excel'),
text = 'Download')
))
)
})
## 8. data table - remittance txn amount
filtered_tran_ra_datatable <- reactive({
df <- tran_ra_table_data$data[order(tran_ra_table_data$data$Branch_Name, decreasing = FALSE),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
return(df)
})
output$tran_ra_datatable <- DT::renderDataTable({
DT::datatable(
data = filtered_tran_ra_datatable(),
extensions = c('FixedColumns','Buttons'),
rownames= FALSE,
options = list(
lengthMenu = list(c(10, 20, -1), c('10', '20', 'All')),
pageLength = 10,
scrollX=TRUE,
fixedColumns = list(leftColumns = 2),
dom = 'Blfrtip',
buttons = list(
list(extend='collection',
buttons = c('csv','excel'),
text = 'Download')
))
)
})
## 9. data table - total financial txn amount
filtered_tran_total_ft_datatable <- reactive({
df <- tran_total_ft_table_data$data[order(tran_total_ft_table_data$data$Branch_Name, decreasing = FALSE),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
return(df)
})
output$tran_total_ft_datatable <- DT::renderDataTable({
DT::datatable(
data = filtered_tran_total_ft_datatable(),
extensions = c('FixedColumns','Buttons'),
rownames= FALSE,
options = list(
lengthMenu = list(c(10, 20, -1), c('10', '20', 'All')),
pageLength = 10,
scrollX=TRUE,
fixedColumns = list(leftColumns = 2),
dom = 'Blfrtip',
buttons = list(
list(extend='collection',
buttons = c('csv','excel'),
text = 'Download')
))
)
})
## 10. data table - total financial txn amount
filtered_tran_total_fa_datatable <- reactive({
df <- tran_total_fa_table_data$data[order(tran_total_fa_table_data$data$Branch_Name, decreasing = FALSE),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
return(df)
})
output$tran_total_fa_datatable <- DT::renderDataTable({
DT::datatable(
data = filtered_tran_total_fa_datatable(),
extensions = c('FixedColumns','Buttons'),
rownames= FALSE,
options = list(
lengthMenu = list(c(10, 20, -1), c('10', '20', 'All')),
pageLength = 10,
scrollX=TRUE,
fixedColumns = list(leftColumns = 2),
dom = 'Blfrtip',
buttons = list(
list(extend='collection',
buttons = c('csv','excel'),
text = 'Download')
))
)
})
## 11. data table - non financial txn
filtered_nf_tran_total_datatable <- reactive({
df <- nf_tran_total_table_data$data[order(nf_tran_total_table_data$data$Branch_Name, decreasing = FALSE),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
return(df)
})
output$nf_tran_total_datatable <- DT::renderDataTable({
DT::datatable(
data = filtered_nf_tran_total_datatable(),
extensions = c('FixedColumns','Buttons'),
rownames= FALSE,
options = list(
lengthMenu = list(c(10, 20, -1), c('10', '20', 'All')),
pageLength = 10,
scrollX=TRUE,
fixedColumns = list(leftColumns = 2),
dom = 'Blfrtip',
buttons = list(
list(extend='collection',
buttons = c('csv','excel'),
text = 'Download')
))
)
})
## 12. data table - HR abs changes
filtered_hr_abs_changes_datatable <- reactive({
df <- hr_abs_changes_table_data$data[order(hr_abs_changes_table_data$data$Branch_Name, decreasing = FALSE),]
if (input$selectize2 == "ALL") {df} else{
df <- df %>% dplyr::filter(Branch_Code == input$selectize2)}
return(df)
})
output$hr_abs_changes_datatable <- DT::renderDataTable({
DT::datatable(
data = filtered_hr_abs_changes_datatable(),
extensions = c('FixedColumns','Buttons'),
rownames= FALSE,
options = list(
lengthMenu = list(c(10, 20, -1), c('10', '20', 'All')),
pageLength = 10,
scrollX=TRUE,
fixedColumns = list(leftColumns = 2),
dom = 'Blfrtip',
buttons = list(
list(extend='collection',
buttons = c('csv','excel'),
text = 'Download')
))
)
})
########################################################################
# Data Upload
########################################################################
file1_ready <- FALSE
file2_ready <- FALSE
file3_ready <- FALSE
file4_ready <- FALSE
file5_ready <- FALSE
file6_ready <- FALSE
file7_ready <- FALSE
observeEvent(input$file1, {
output$file1_check <- renderText("")
tryCatch({
file1_sheet1 <- read_excel(input$file1$datapath,
sheet = "Deposit_Transactions",
range = cell_cols("A:F"),
col_types = c("text", "text", "text", "text", "numeric", "numeric"))
file1_sheet2 <- read_excel(input$file1$datapath,
sheet = "Withdrawal_Transactions")
file1_sheet3 <- read_excel(input$file1$datapath,
sheet = "Other_Financial",
col_types = c("text", "text", "text", "text", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric"))
file1_ready <<- TRUE
if (!all(FILE1_SHEET1_COLS %in% colnames(file1_sheet1)) ||
!all(FILE1_SHEET2_COLS %in% colnames(file1_sheet2)) ||
!all(FILE1_SHEET3_COLS %in% colnames(file1_sheet3))) {
output$file1_check <- renderText("File has wrong sheet(s) or column(s). Please upload the correct file.")
file1_ready <<- FALSE
}
}, error = function(e) {
print(e)
output$file1_check <- renderText("File has wrong sheet(s) or column(s). Please upload the correct file.")
file1_ready <<- FALSE
}
)
})
observeEvent(input$file2, {
output$file2_check <- renderText("")
tryCatch({
file2 <- read_excel(input$file2$datapath, sheet = 1,
range = cell_cols("A:K"),
col_types = c("text", "text", "text", "text", "numeric", "numeric", "numeric", "text", "numeric", "numeric", "numeric"))
file2_ready <<- TRUE
if (!all(FILE2_COLS %in% colnames(file2))) {
output$file2_check <- renderText("File has wrong sheet(s) or column(s). Please upload the correct file.")
file2_ready <<- FALSE
}
}, error = function(e) {
print(e)
output$file2_check <- renderText("File has wrong sheet(s) or column(s). Please upload the correct file.")
file2_ready <<- FALSE
})
})
observeEvent(input$file3, {
output$file3_check <- renderText("")
tryCatch({
file3 <- read_excel(input$file3$datapath, sheet = 1)
file3_ready <<- TRUE
if (!all(FILE3_COLS %in% colnames(file3))) {
output$file3_check <- renderText("File has wrong sheet(s) or column(s). Please upload the correct file.")
file3_ready <<- FALSE
}
}, error = function(e) {
print(e)
output$file3_check <- renderText("File has wrong sheet(s) or column(s). Please upload the correct file.")
file3_ready <<- FALSE
})
})
observeEvent(input$file4, {
output$file4_check <- renderText("")
tryCatch({
file4 <- read_excel(input$file4$datapath, sheet = 1)
file4_ready <<- TRUE
if (!all(FILE4_COLS %in% colnames(file4))) {
output$file4_check <- renderText("File has wrong sheet(s) or column(s). Please upload the correct file.")
file4_ready <<- FALSE
}
}, error = function(e) {
print(e)
output$file4_check <- renderText("File has wrong sheet(s) or column(s). Please upload the correct file.")
file4_ready <<- FALSE
})
})
observeEvent(input$file5, {
output$file5_check <- renderText("")
tryCatch({
file5_sheet1 <- read_excel(input$file5$datapath, sheet = 1)
file5_sheet2 <- read_excel(input$file5$datapath, sheet = 2)
file5_sheet3 <- read_excel(input$file5$datapath, sheet = 3)
file5_ready <<- TRUE
if (!all(FILE5_SHEET1_COLS %in% colnames(file5_sheet1)) ||
!all(FILE5_SHEET2_COLS %in% colnames(file5_sheet2)) ||
!all(FILE5_SHEET3_COLS %in% colnames(file5_sheet3))) {
output$file5_check <- renderText("File has wrong sheet(s) or column(s). Please upload the correct file.")
file5_ready <<- FALSE
}
}, error = function(e) {
print(e)
output$file5_check <- renderText("File has wrong sheet(s) or column(s). Please upload the correct file.")
file5_ready <<- FALSE
})
})
observeEvent(input$file6, {
output$file6_check <- renderText("")
tryCatch({
file6 <- read_excel(input$file6$datapath, sheet = 1)
file6_ready <<- TRUE
if (!all(FILE6_COLS %in% colnames(file6))) {
output$file6_check <- renderText("File has wrong sheet(s) or column(s). Please upload the correct file.")
file6_ready <<- FALSE
}
}, error = function(e) {
print(e)
output$file6_check <- renderText("File has wrong sheet(s) or column(s). Please upload the correct file.")
file6_ready <<- FALSE
})
})
observeEvent(input$file7, {
output$file7_check <- renderText("")
result = 0
tryCatch({
file7_sheet1 <- read_excel(input$file7$datapath, sheet="Rep101_Open_Account")
file7_sheet2 <- read_excel(input$file7$datapath, sheet="Rep102_Close_Account")
file7_sheet3 <- read_excel(input$file7$datapath, sheet="Rep201_CHECK_ISSUE")
file7_sheet4 <- read_excel(input$file7$datapath, sheet="Rep202_CHECK_PAYMENT_CANCEL")
file7_sheet5 <- read_excel(input$file7$datapath, sheet="Rep203_CHECK_OUTWARD_CLEAR")
file7_sheet6 <- read_excel(input$file7$datapath, sheet="Rep301_CMSCHECK_TRANSACTION")
file7_ready <<- TRUE
if (!all(FILE7_SHEET1_COLS %in% colnames(file7_sheet1)) ||
!all(FILE7_SHEET2_COLS %in% colnames(file7_sheet2)) ||
!all(FILE7_SHEET3_COLS %in% colnames(file7_sheet3)) ||
!all(FILE7_SHEET4_COLS %in% colnames(file7_sheet4)) ||
!all(FILE7_SHEET5_COLS %in% colnames(file7_sheet5)) ||
!all(FILE7_SHEET6_COLS %in% colnames(file7_sheet6))) {
output$file7_check <- renderText("File has wrong sheet(s) or column(s). Please upload the correct file.")
file7_ready <<- FALSE
}
}, error = function(e) {
print(e)
output$file7_check <- renderText("File has wrong sheet(s) or column(s). Please upload the correct file.")
file7_ready <<- FALSE
})
})
shinyjs::disable("preprocess_confirm")
observeEvent({
input$file1
input$file2
input$file3
input$file4
input$file5
input$file6
input$file7
},
{
if (file1_ready & file2_ready & file3_ready & file4_ready & file5_ready & file6_ready & file7_ready) {
shinyjs::enable("preprocess_confirm")
} else {
shinyjs::disable("preprocess_confirm")
}
}
)
# observe the action button
observeEvent(input$preprocess_confirm, {
shinyjs::disable("preprocess_confirm")
withProgress({
setProgress(message = "Processing data ...", value = 0.2)
# Get file paths
file1_path <<- input$file1$datapath
file2_path <<- input$file2$datapath
file3_path <<- input$file3$datapath
file4_path <<- input$file4$datapath
file5_path <<- input$file5$datapath
file6_path <<- input$file6$datapath
file7_path <<- input$file7$datapath
# Load and process new data
data_new <- load_and_process_data(file1_path, file2_path, file3_path, file4_path, file5_path, file6_path, file7_path, branch_map)
branch_all_new <- data_new$all_data
# Drop 2013 data
branch_all_new <- branch_all_new[branch_all_new$Year!="2013",]
# Check whether the latest quarter of the new data is greater than that of the exiting data
audit_qtr_new <- as.character(sort(unique(branch_all_new$audit_qtr),decreasing = TRUE))
latest_qtr <- max(audit_qtr_new)
if (latest_qtr <= current_qtr) {
output$upload <- renderText("The dataset has been uploaded previously. Please upload the data for the latest quarter.")
return()
}
# Drop the prediction results of the exiting data
branch_all <<- branch_all[, !(names(branch_all) %in% c('model_pred', 'predict_prob'))]
# Merge the new data to the existing data
branch_all_new <- branch_all_new[!branch_all_new$audit_qtr %in% audit_qtr,]
branch_all_new <- as.data.frame(branch_all_new)
branch_all <<- rbind(branch_all, branch_all_new)
branch_all <<- branch_all[order(branch_all$Branch_Code, branch_all$Year, branch_all$Quarter),]
# Select Non_Financial_Breakdown from the merged data
Non_Financial_Breakdown <- branch_all[, c('Branch_Code', 'Year', 'Quarter', 'Acct_Open_Count_Q',
'Acct_Closure_Count_Q', 'Cheque_Issuance_Count_Q',
'Cheque_Payment_Cancel_Count_Q', 'Cheque_Outward_Clearing_Count_Q')]
# Re-train, make predictions and append prediction results to the master data frame
setProgress(message = "Training the model ...", value = 0.5)
branch_all <<- train_and_predict(branch_all, features_selected_rds_file, branch_map, detection_seed, prediction_seed)
write.csv(branch_all, branch_all_file, row.names = FALSE)
# update reactive values
reactive_branch_all$df <<- branch_all
Non_Financial_Breakdown_v3 <<- Non_Financial_Breakdown_heatmap_data_preprocess(Non_Financial_Breakdown)
taiwan_branch_heatmap_data <<- get_heapmap_data(branch_all, Non_Financial_Breakdown_v3)
acct_open_heatmap_data <<- get_acct_open_heatmap_data(taiwan_branch_heatmap_data, branch_map)
acct_closure_heatmap_data <<- get_acct_closure_heatmap_data(taiwan_branch_heatmap_data, branch_map)
tran_dt_heatmap_data <<- get_tran_dt_heatmap_data(taiwan_branch_heatmap_data, branch_map)
tran_da_heatmap_data <<- get_tran_da_heatmap_data(taiwan_branch_heatmap_data, branch_map)
tran_wt_heatmap_data <<- get_tran_wt_heatmap_data(taiwan_branch_heatmap_data, branch_map)
tran_wa_heatmap_data <<- get_tran_wa_heatmap_data(taiwan_branch_heatmap_data, branch_map)
tran_rt_heatmap_data <<- get_tran_rt_heatmap_data(taiwan_branch_heatmap_data, branch_map)
tran_ra_heatmap_data <<- get_tran_ra_heatmap_data(taiwan_branch_heatmap_data, branch_map)
tran_total_ft_heatmap_data <<- get_tran_total_ft_heatmap_data(taiwan_branch_heatmap_data, branch_map)
tran_total_fa_heatmap_data <<- get_tran_total_fa_heatmap_data(taiwan_branch_heatmap_data, branch_map)
nf_tran_total_heatmap_data <<- get_nf_tran_total_heatmap_data(taiwan_branch_heatmap_data, branch_map)
hr_abs_changes_heatmap_data <<- get_hr_abs_changes_heatmap_data(taiwan_branch_heatmap_data, branch_map)
reactive_taiwan_branch_heatmap_data <<- reactiveValues(df = taiwan_branch_heatmap_data)
reactive_acct_open_heatmap_data <<- reactiveValues(data = acct_open_heatmap_data)
reactive_acct_closure_heatmap_data <<- reactiveValues(data = acct_closure_heatmap_data)
reactive_tran_dt_heatmap_data <<- reactiveValues(data = tran_dt_heatmap_data)
reactive_tran_da_heatmap_data <<- reactiveValues(data = tran_da_heatmap_data)
reactive_tran_wt_heatmap_data <<- reactiveValues(data = tran_wt_heatmap_data)
reactive_tran_rt_heatmap_data <<- reactiveValues(data = tran_rt_heatmap_data)
reactive_tran_ra_heatmap_data <<- reactiveValues(data = tran_ra_heatmap_data)
reactive_tran_total_ft_heatmap_data <<- reactiveValues(data = tran_total_ft_heatmap_data)
reactive_tran_total_fa_heatmap_data <<- reactiveValues(data = tran_total_fa_heatmap_data)
reactive_nf_tran_total_heatmap_data <<- reactiveValues(data = nf_tran_total_heatmap_data)
reactive_hr_abs_changes_heatmap_data <<- reactiveValues(data = hr_abs_changes_heatmap_data)
acct_open_table_data <<- reactiveValues(data = get_acct_open_table_data(branch_all, branch_map))
acct_closure_table_data <<- reactiveValues(data = get_acct_closure_table_data(branch_all, branch_map))
tran_dt_table_data <<- reactiveValues(data = get_tran_dt_table_data(branch_all, branch_map))
tran_da_table_data <<- reactiveValues(data = get_tran_da_table_data(branch_all, branch_map))
tran_wt_table_data <<- reactiveValues(data = get_tran_wt_table_data(branch_all, branch_map))
tran_wa_table_data <<- reactiveValues(data = get_tran_wa_table_data(branch_all, branch_map))
tran_rt_table_data <<- reactiveValues(data = get_tran_rt_table_data(branch_all, branch_map))
tran_ra_table_data <<- reactiveValues(data = get_tran_ra_table_data(branch_all, branch_map))
tran_total_ft_table_data <<- reactiveValues(data = get_tran_total_ft_table_data(branch_all, branch_map))
tran_total_fa_table_data <<- reactiveValues(data = get_tran_total_fa_table_data(branch_all, branch_map))
nf_tran_total_table_data <<- reactiveValues(data = get_nf_tran_total_table_data(branch_all, branch_map))
hr_abs_changes_table_data <<- reactiveValues(data = get_hr_abs_changes_table_data(branch_all, branch_map))
# Update menu selection
branch_name <<- c("ALL",as.character(sort(unique(branch_all$Branch_Name))))
branch_code <<- c("ALL",sort(as.numeric(unique(branch_all$Branch_Code))))
audit_date <<- as.Date(sort(unique(branch_all$audit_date),decreasing = TRUE))
audit_qtr <<- as.character(sort(unique(branch_all$audit_qtr),decreasing = TRUE))
current_date <<- max(audit_date)
current_qtr <<- max(audit_qtr)
# update the control panel
updateSelectizeInput(session, input='selectize1', choices = branch_name, selected = "ALL")
updateSelectizeInput(session, input='selectize2', choices = branch_code, selected = "ALL")
updateSelectInput(session,input='quarter2',choices = audit_qtr[1:(length(audit_qtr)-4)],
selected = current_qtr)
setProgress(message = "Completed !", value = 1)
output$upload <<- renderText("Completed!")
})
})
}
shinyApp(ui, server,,options(shiny.port=8080))
|
a040b7ff6dae1cd87b41eeb3247fd99895ea911a
|
2cbe58f7cd1af3d7cccfda6adfdc24bb4b5dce82
|
/election1.R
|
f7a6f8659f96c774e9e2ea10d5b8aff0330a9a7f
|
[] |
no_license
|
gitcnk/Git-class-demo
|
80a731611576bba847fbb8c17cfd4f3da91d0a2b
|
aeac81250de3dfd117d8d531cc044bc5c15a70b8
|
refs/heads/master
| 2023-08-12T13:47:52.382695
| 2021-10-14T09:13:15
| 2021-10-14T09:13:15
| 291,781,172
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,669
|
r
|
election1.R
|
library(dplyr)
FL <- read.csv('https://raw.githubusercontent.com/gitcnk/Data/master/ElectionData/Florida_before2016.csv')
FL_edu <- FL %>%
select(no_hs_diploma,
hs_diploma,
associates_degree,
bachelors_degree,
above_bachelors_degree)
FL_edu_pca <- prcomp(FL_edu, center = T, scale = T)
summary(FL_edu_pca)
plot(FL_edu_pca$sdev)
PC1 <- predict(FL_edu_pca)[,1]
FL$edPC1 <- PC1
library(ggplot2)
ggplot( data = FL) +
geom_point(mapping = aes( x = who_won_2012,
y = -edPC1,
col = who_won_2012)) +
coord_flip()
ggplot( data = FL) +
geom_point(mapping = aes( x = who_won_2012,
y = education_index,
col = who_won_2012)) +
coord_flip()
## Correlation with unemployemt
# Some cleaning before plotting.
gsub(pattern = ',', replacement = '', x = FL$unemployed)
FL$unemployed <- as.numeric(gsub(pattern = ',', replacement = '', x = FL$unemployed))
ggplot( data = FL) +
geom_point(mapping = aes( x = unemployed,
y = education_index,
col = who_won_2012))
ggplot( data = FL) +
geom_point(mapping = aes( x = unemployment_rate,
y = edPC1,
col = who_won_2012)) +
scale_color_manual(values = c('blue', 'red'))
ggplot( data = FL) +
geom_point(mapping = aes( x = unemployment_rate,
y = education_index,
col = who_won_2012)) +
scale_color_manual(values = c('blue', 'red'))
|
72ebb44b16210cb4176f941ff1ea251d9949c620
|
a9e2d4666f1b55c5c70ffece61a49d1c2cf17478
|
/random.R
|
0cb4e6041d26d56bbc156eea69af8405b04eaaed
|
[] |
no_license
|
cforter/wine_experiment
|
7f56264050761374d57176db58408dce0ea05288
|
36c0f846caadf25ae671f7ce9e470853a388c75e
|
refs/heads/master
| 2019-01-02T04:55:15.902442
| 2015-05-01T17:52:16
| 2015-05-01T17:52:16
| 31,523,285
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 164
|
r
|
random.R
|
assignment <- c(rep(1,2), rep(0,2))
assignment.random <- sample(assignment, 4)
names <- c("Tyler", "Tony", "Vedant")
df <- cbind(names, assignment.random)
View(df)
|
3ae65ff9272da955d2f1c38e65c2e2a95aee4068
|
3bb9a36aaed141664f5533cc8ce36c11c6df09b1
|
/src/munge.R
|
04070960f21f3b213202cebb897a1127ca7ac835
|
[] |
no_license
|
tereom/HMM
|
0dc4c05d1792b7bc060e094b8b4222fc9355492e
|
d038497ac9d5c175c59b3bb4db5bb19914783708
|
refs/heads/master
| 2016-09-05T14:15:50.505639
| 2014-11-20T16:05:21
| 2014-11-20T16:05:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,800
|
r
|
munge.R
|
library(lubridate)
library(stringr)
library(raster)
library(plyr)
library(dplyr)
library(tidyr)
library(Hmisc)
library(ggplot2)
library(fda)
paths <- list.files(path = "../data/gpp", pattern = "\\PsnNet_1km.tif$",
full.names = TRUE)
## Aislaremos una región del mapa
image_1 <- raster(paths[1])
plot(image_1)
ext <- drawExtent()
# save(ext, file = "../data/map_subset.Rdata")
load(file = "../data/map_subset.Rdata")
## layer to kml
cropped <- crop(image_1,ext)
plot(cropped)
projection(cropped)
rlatlong <- projectRaster(cropped, crs=CRS('+proj=longlat'))
KML(rlatlong, filename='browse_cuadrito_ultima.kml')
## Brick (each time step is a layer)
brick_npp <- brick()
for (i in 1:length(paths)){
image <-raster(paths[i])
image <- crop(image, ext)
brick_npp <- addLayer(brick_npp, image)
}
# missings to NA
brick_npp[brick_npp > 32760] <- NA
# save(brick_npp, file = "../data/brick_npp.RData")
load("data/brick_npp.RData")
# convert to matrix
matrix_npp <- rasterToPoints(brick_npp)
# convert to data.frame
df_npp <- matrix_npp %>%
data.frame() %>%
gather(file, npp, -x, -y) %>%
mutate(
date = as.Date(substr(file, 9, 18), format='%Y.%m.%d'),
year = year(date),
month = month(date),
week = week(date),
day = day(date),
week2 = mapvalues(week, from = 1:52, to = rep(1:26, each = 2)),
.id = paste(x, y, sep = "_"),
x_cat = cut2(x, g = 5),
y_cat = cut2(y, g = 5)
)
# save(df_npp, file = "data/df_npp.RData")
#####################################################################
## Select rectangle in the North
image_1 <- raster(paths[1])
plot(image_1)
ext <- drawExtent()
# save(ext, file = "../data/map_subset_N.Rdata")
load(file = "../data/map_subset_N.Rdata")
## layer to kml
cropped <- crop(image_1,ext)
plot(cropped)
projection(cropped)
rlatlong <- projectRaster(cropped, crs=CRS('+proj=longlat'))
KML(rlatlong, filename='../data/cuadro_N.kml')
## Brick (each time step is a layer)
brick_npp <- brick()
for (i in 1:length(paths)){
image <-raster(paths[i])
image <- crop(image, ext)
brick_npp <- addLayer(brick_npp, image)
}
# missings to NA
brick_npp[brick_npp > 32760] <- NA
# save(brick_npp, file = "../data/brick_npp_N.RData")
load("../data/brick_npp_N.RData")
# convert to matrix
matrix_npp <- rasterToPoints(brick_npp)
# convert to data.frame
df_npp <- matrix_npp %>%
data.frame() %>%
gather(file, npp, -x, -y) %>%
mutate(
date = as.Date(substr(file, 9, 18), format='%Y.%m.%d'),
year = year(date),
month = month(date),
week = week(date),
day = day(date),
week2 = mapvalues(week, from = 1:52, to = rep(1:26, each = 2)),
.id = paste(x, y, sep = "_"),
x_cat = cut2(x, g = 5),
y_cat = cut2(y, g = 5)
)
# save(df_npp, file = "../data/df_npp_N.RData")
|
a822b5232a7604374dd69a5768462f6ef2c03213
|
34eec9275ed1b4106e0aca5af87ce33e99a4f782
|
/Exploratory Data Analysis/Week4 Project/Plot5.R
|
4088f5f00177a5211bfe4a023a5069a4a0797e2e
|
[] |
no_license
|
dexterwang/DataScienceJohnHopkinsUni
|
eb6bb7fc4fdadd7c31b0b2415125b9228c3eea17
|
8182450515a49317ec4034bce7bbc61b8461431a
|
refs/heads/master
| 2020-12-24T20:24:30.050685
| 2016-05-25T17:29:42
| 2016-05-25T17:29:42
| 57,115,505
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,630
|
r
|
Plot5.R
|
setwd("C:/D/R/Exploratory Data Analysis/week4 project")
if (!require("ggplot2")) {
install.packages("ggplot2")
}
library(ggplot2)
# read data from source file
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# by searching Source Classification Code file,
# Short.Name (the third column) seems the best place to find targeted subset of data
# by matching "Motor Vehicle[s]"
# there are 20 categories related
unique(SCC[grep("\\<motor vehicle[s]*\\>", SCC[,3], ignore.case = TRUE) ,3])
#[1] Surface Coating /Motor Vehicles /Total: All Solvent Types Surface Coating /Motor Vehicles /Acetone
# [3] Surface Coating /Motor Vehicles /Butyl Acetate Surface Coating /Motor Vehicles /Butyl Alcohols: All Types
# [5] Surface Coating /Motor Vehicles /n-Butyl Alcohol Surface Coating /Motor Vehicles /Isobutyl Alcohol
# [7] Surface Coating /Motor Vehicles /Diethylene Glycol Monobutyl Ether Surface Coating /Motor Vehicles /Diethylene Glycol Monoethyl Ether
# [9] Surface Coating /Motor Vehicles /Diethylene Glycol Monomethyl Ether Surface Coating /Motor Vehicles /Ethyl Acetate
#[11] Surface Coating /Motor Vehicles /Ethylene Glycol Monoethyl Ether (2-Ethoxyethanol) Surface Coating /Motor Vehicles /Ethylene Glycol Monomethyl Ether (2-Methoxyethanol)
#[13] Surface Coating /Motor Vehicles /Ethylene Glycol Monobutyl Ether (2-Butoxyethanol) Surface Coating /Motor Vehicles /Glycol Ethers: All Types
#[15] Surface Coating /Motor Vehicles /Isopropanol Surface Coating /Motor Vehicles /Methyl Ethyl Ketone
#[17] Surface Coating /Motor Vehicles /Methyl Isobutyl Ketone Surface Coating /Motor Vehicles /Special Naphthas
#[19] Surface Coating /Motor Vehicles /Solvents: NEC Motor Vehicle Fires /Unspecified
# get the SCC code for motor vehicle related sectors
subSCC <- SCC[grep("\\<motor vehicle[s]*\\>", SCC[,3], ignore.case = TRUE) ,1]
# get the subset of NEI which is motor vehicle related in Baltimore city
subNEI <- NEI[NEI$SCC %in% subSCC & NEI$fips=="24510",]
# there are only two observations found
# fips SCC Pollutant Emissions type year
#4888524 24510 2810050000 PM25-PRI 10.17 NONPOINT 2002
#11064649 24510 2810050000 PM25-PRI 10.17 NONPOINT 2005
# Seems like the method above is not quite approprate
# So let's assume emissions with type "ON ROAD" are motor vehicle related
subNEI <- NEI[NEI$type == "ON-ROAD"& NEI$fips=="24510",]
# calculate total emission by each year
Yearly_Total_Emission <- with(subNEI,tapply(Emissions,year,sum))
# construct a data frame to contain the yearly emission
df_YTE <- data.frame(Yearly_Total_Emission)
df_YTE$year <- row.names(Yearly_Total_Emission)
# plot the yearly emission trends for motor vehicle related sources using ggplot2
# the emission decreased over the period of 1999 and 2008
g<- ggplot(df_YTE,aes(year,Yearly_Total_Emission))+geom_bar(stat="identity")
g+ labs(title="Total yearly PM2.5 emission from motor vehicle related sources in Baltimore City",x="Year",y="total emission (in tons)")
# save screen output as png
dev.copy(png,"plot5.png", width=480, height=480)
dev.off()
|
ddcfba770366bb048ec0f6c8d73c05500407c674
|
4af1baeb8bd7ca845beb983fcf7c662ab5df6d7e
|
/Finance/S&P Analysis - All Stock.R
|
d7a6b578535503d7e1f0031817b68632f04f7682
|
[] |
no_license
|
santiagovama/R
|
54a52cebae1d36ddbabb3080205fbf2fe8b7b956
|
c12cd9b9de4e7a8888386c20ce64a1a481327766
|
refs/heads/master
| 2023-08-17T06:27:32.434463
| 2021-10-02T15:01:05
| 2021-10-02T15:01:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,464
|
r
|
S&P Analysis - All Stock.R
|
# http://www.mattdancho.com/investments/2016/10/23/SP500_Analysis.html
library(quantmod) # get stock prices; useful stock analysis functions
library(xts) # working with extensible time series
library(rvest) # web scraping
library(tidyverse) # ggplot2, purrr, dplyr, tidyr, readr, tibble
library(stringr) # working with strings
library(forcats) # working with factors
library(lubridate) # working with dates in tibbles / data frames
library(plotly) # Interactive plots
library(corrplot) # Visuazlize correlation plots
library(magrittr)
# Web-scrape SP500 stock list
sp_500 <- read_html("https://en.wikipedia.org/wiki/List_of_S%26P_500_companies") %>%
html_node("table.wikitable") %>%
html_table() %>%
select(`Ticker symbol`, Security, `GICS Sector`, `GICS Sub Industry`) %>%
as_tibble()
# Format names
names(sp_500) <- sp_500 %>%
names() %>%
str_to_lower() %>%
make.names()
# Show results
sp_500
sp_500 %>%
lapply(function(x) x %>% unique() %>% length()) %>%
unlist() # show in condensed format
sp_500 %>%
group_by(security) %>%
summarize(count = n()) %>%
filter(count > 1)
sp_500 %>%
filter(security == "Under Armour")
sp_500 <- sp_500 %>%
filter(ticker.symbol != "UA.C")
sp_500 %>%
# Summarise data by frequency
group_by(gics.sector) %>%
summarise(count = n()) %>%
# Visualize
ggplot(aes(x = gics.sector %>% fct_reorder(count),
y = count
)) +
geom_bar(stat = "identity") +
geom_text(aes(label = count), size = 3, nudge_y = 4, nudge_x = .1) +
scale_y_continuous(limits = c(0,100)) +
ggtitle(label = "Sector Frequency Among SP500 Stocks") +
xlab(label = "GICS Sector") +
theme(plot.title = element_text(size = 16)) +
coord_flip()
get_stock_prices <- function(ticker, return_format = "tibble", ...) {
# Get stock prices
stock_prices_xts <- getSymbols(Symbols = ticker, auto.assign = FALSE, ...)
# Rename
names(stock_prices_xts) <- c("Open", "High", "Low", "Close", "Volume", "Adjusted")
# Return in xts format if tibble is not specified
if (return_format == "tibble") {
stock_prices <- stock_prices_xts %>%
as_tibble() %>%
rownames_to_column(var = "Date") %>%
mutate(Date = ymd(Date))
} else {
stock_prices <- stock_prices_xts
}
stock_prices
}
get_log_returns <- function(x, return_format = "tibble", period = 'daily', ...) {
# Convert tibble to xts
if (!is.xts(x)) {
x <- xts(x[,-1], order.by = x$Date)
}
# Get log returns
log_returns_xts <- periodReturn(x = x$Adjusted, type = 'log', period = period, ...)
# Rename
names(log_returns_xts) <- "Log.Returns"
# Return in xts format if tibble is not specified
if (return_format == "tibble") {
log_returns <- log_returns_xts %>%
as_tibble() %>%
rownames_to_column(var = "Date") %>%
mutate(Date = ymd(Date))
} else {
log_returns <- log_returns_xts
}
log_returns
}
sp_500 <- sp_500 %>%
mutate(
stock.prices = map(ticker.symbol,
function(.x) get_stock_prices(.x,
return_format = "tibble",
from = "2007-01-01",
to = "2016-10-23")
),
log.returns = map(stock.prices,
function(.x) get_log_returns(.x, return_format = "tibble")),
mean.log.returns = map_dbl(log.returns, ~ mean(.$Log.Returns)),
sd.log.returns = map_dbl(log.returns, ~ sd(.$Log.Returns)),
n.trade.days = map_dbl(stock.prices, nrow)
)
library(plotly)
plot_ly(data = sp_500,
type = "scatter",
mode = "markers",
x = ~ sd.log.returns,
y = ~ mean.log.returns,
color = ~ n.trade.days,
colors = "Blues",
size = ~ n.trade.days,
text = ~ str_c("<em>", security, "</em><br>",
"Ticker: ", ticker.symbol, "<br>",
"Sector: ", gics.sector, "<br>",
"Sub Sector: ", gics.sub.industry, "<br>",
"No. of Trading Days: ", n.trade.days),
marker = list(opacity = 0.8,
symbol = 'circle',
sizemode = 'diameter',
sizeref = 4.0,
line = list(width = 2, color = '#FFFFFF'))
) %>%
layout(title = 'S&P500 Analysis: Stock Risk vs Reward',
xaxis = list(title = 'Risk/Variability (StDev Log Returns)',
gridcolor = 'rgb(255, 255, 255)',
zerolinewidth = 1,
ticklen = 5,
gridwidth = 2),
yaxis = list(title = 'Reward/Growth (Mean Log Returns)',
gridcolor = 'rgb(255, 255, 255)',
zerolinewidth = 1,
ticklen = 5,
gridwith = 2),
margin = list(l = 100,
t = 100,
b = 100),
font = list(color = '#FFFFFF'),
paper_bgcolor = 'rgb(0, 0, 0)',
plot_bgcolor = 'rgb(0, 0, 0)')
# From the plot we can see that a number of stocks have a unique
# combination of high mean and low standard deviation log returns.
# We can isolate them
sp_500 %>%
filter(mean.log.returns >= 0.001,
sd.log.returns < 0.0315) %>%
select(ticker.symbol, mean.log.returns:n.trade.days) %>%
arrange(mean.log.returns %>% desc())
# compute correlation
limit <- 30
sp_500_hp <- sp_500 %>%
filter(n.trade.days > 1000) %>%
filter(sd.log.returns < 0.0315) %>%
mutate(rank = mean.log.returns %>% desc() %>% min_rank()) %>%
filter(rank <= limit) %>%
arrange(rank) %>%
select(ticker.symbol, rank, mean.log.returns, sd.log.returns, log.returns)
sp_500_hp
sp_500_hp_unnest <- sp_500_hp %>%
select(ticker.symbol, log.returns) %>%
unnest()
sp_500_hp_unnest
sp_500_hp_spread <- sp_500_hp_unnest %>%
spread(key = ticker.symbol, value = Log.Returns) %>%
na.omit()
sp_500_hp_spread
sp_500_hp_cor <- sp_500_hp_spread %>%
select(-Date) %>%
cor()
sp_500_hp_cor[1:6, 1:6] # show first 6 columns and rows
sp_500_hp_cor %>%
corrplot(order = "hclust",
addrect = 11)
|
c1647fcb999d41715b4c10f8eb4184c73be62f73
|
7737cd699104d2a88668f1defa564ef63c52628e
|
/workout03/binomial/man/bin_kurtosis.Rd
|
513547f01f8c493d940588c802022bec27875530
|
[] |
no_license
|
stat133-sp19/hw-stat133-yousufhusain
|
a7a920554e5b93992c535152f8c4c5566121b6b9
|
d8ddc6f8b6c3dddc75a4e295f915ad3f92dfaf1e
|
refs/heads/master
| 2020-04-28T20:47:21.487400
| 2019-05-04T03:32:54
| 2019-05-04T03:32:54
| 175,556,678
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 384
|
rd
|
bin_kurtosis.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/All Functions.R
\name{bin_kurtosis}
\alias{bin_kurtosis}
\title{bin_kurtosis}
\usage{
bin_kurtosis(trials, prob)
}
\arguments{
\item{trials}{number of trials (numeric)}
\item{prob}{probabiltiy value (numeric)}
}
\value{
kurtosis
}
\description{
determine the kurtosis
}
\examples{
bin_kurtosis(10, 0.3)
}
|
0d84f7fc0dbbae8bd83d7d60c0585a0ef8bda147
|
6197cec08c2f1d9d0e4d0f14c179e38d8705cbf1
|
/man/get_ancestry_matrix.Rd
|
6a5cf41b4dd043d58092ddf9d03bd91cf701382e
|
[] |
no_license
|
dgrtwo/GSEAMA
|
a63ab3db90027228ae3b83b0800570cfb14e20e6
|
54913f20b83444aa4f8292f0a14152a111ac24a7
|
refs/heads/master
| 2021-05-16T02:39:32.695142
| 2018-05-11T17:57:26
| 2018-05-11T17:57:26
| 23,238,459
| 12
| 9
| null | null | null | null |
UTF-8
|
R
| false
| true
| 746
|
rd
|
get_ancestry_matrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GOMembershipMatrix.R
\name{get_ancestry_matrix}
\alias{get_ancestry_matrix}
\title{Build an offspring matrix of GO terms}
\usage{
get_ancestry_matrix(terms, ontology = c("BP", "MF", "CC"),
type = "OFFSPRING", upward = TRUE, tbl = FALSE)
}
\arguments{
\item{terms}{IDs of GO terms that should be included in the ancestry matrix}
\item{ontology}{Ontologies to use}
\item{type}{Either OFFSPRING (default), CHILDREN, ANCESTOR, or PARENT}
}
\description{
A sparse binary Matrix object with one row and column for each pair
of GO terms provided, where each row represents an ancestor and each column
represents a descendant, with 1 marking ancestor/descendant pairs.
}
|
f2fe8030bb34d65958f0d2d7b5dc2796962cdad6
|
3bd97ff19bc6cb8db2f20df43cf6f1bc9a3aba82
|
/scratch.R
|
a95f2575e6a019abd9afa6219ca8d82c0621f4da
|
[] |
no_license
|
bpafoshizle/RepData_PeerAssessment1
|
e4204b116302664dc3295a2ae3313af4242952e0
|
55cced4d29aaedeb60f55a63063d9f358903029f
|
refs/heads/master
| 2021-01-17T12:12:47.944910
| 2015-02-09T05:00:11
| 2015-02-09T05:00:11
| 30,498,091
| 0
| 0
| null | 2015-02-08T16:58:46
| 2015-02-08T16:58:46
| null |
UTF-8
|
R
| false
| false
| 894
|
r
|
scratch.R
|
readZipCSV <- function(zipFilePath, ...){
# Function extracts a csv file in a zip file assuming the same name.
# Get the file name without extension
fileNameNoExt = gsub(pattern = "(.*)\\..*$", "\\1", basename(zipFilePath))
read.csv(unz(zipFilePath, paste(fileNameNoExt, ".csv", sep="")))
}
activity = readZipCSV("./activity.zip", row.names=null)
library(dplyr)
activityByDay <- activity %>%
group_by(date) %>%
summarize(
sumSteps=sum(steps)
) %>%
select(date, sumSteps)
activityByInterval <- activity %>%
group_by(interval) %>%
summarize(meanSteps=mean(steps, na.rm=TRUE))
activityWithMeans <- left_join(activity, activityByInterval, by="interval")
idxNa = which(is.na(activityWithMeans$steps))
activityWithMeans[idxNa, ]$steps = round(activityWithMeans[idxNa, ]$meanSteps)
gsub("Sunday|Saturday", "Weekend", weekdays(as.Date(activityWithMeans$date)))
|
dfe3abcfc69338e2f0120826661beb6b2db6d476
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.developer.tools/man/drs_describe_jobs.Rd
|
cd11002d80723dce48890edcf71cc8139c2e3084
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,009
|
rd
|
drs_describe_jobs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drs_operations.R
\name{drs_describe_jobs}
\alias{drs_describe_jobs}
\title{Returns a list of Jobs}
\usage{
drs_describe_jobs(filters = NULL, maxResults = NULL, nextToken = NULL)
}
\arguments{
\item{filters}{A set of filters by which to return Jobs.}
\item{maxResults}{Maximum number of Jobs to retrieve.}
\item{nextToken}{The token of the next Job to retrieve.}
}
\description{
Returns a list of Jobs. Use the JobsID and fromDate and toDate filters to limit which jobs are returned. The response is sorted by creationDataTime - latest date first. Jobs are created by the StartRecovery, TerminateRecoveryInstances and StartFailbackLaunch APIs. Jobs are also created by DiagnosticLaunch and TerminateDiagnosticInstances, which are APIs available only to \emph{Support} and only used in response to relevant support tickets.
See \url{https://www.paws-r-sdk.com/docs/drs_describe_jobs/} for full documentation.
}
\keyword{internal}
|
76aba84135d247ae3de8c99ca4c54e98f869f0fb
|
7c30f05e1c8e2bf6e5ce2c2a44257d3478fd1427
|
/twitter sentiment.r
|
d331fc8f4142b6c6591c79633b0b57c6de177845
|
[] |
no_license
|
Rishisai/rishi2mshanker
|
8dca9557a757018e27d92fc2c1c483e49bb1de48
|
b27aceb6f78faefd4b54686ebe86ebf84f868a7e
|
refs/heads/master
| 2021-01-22T18:33:01.373610
| 2018-05-06T17:34:13
| 2018-05-06T17:34:13
| 102,409,624
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,169
|
r
|
twitter sentiment.r
|
data = twitter_sanders_apple2
data$class = NULL
str(data)
is.na(data)
data$text = gsub("&", "", data$text)
data$text = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", data$text)
data$text = gsub("@\\w+", "", data$text)
data$text = gsub("[[:punct:]]", "", data$text)
data$text = gsub("[[:digit:]]", "", data$text)
data$text = gsub("http\\w+", "", data$text)
data$text = gsub("[ \t]{2,}", "", data$text)
data$text = gsub("^\\s+|\\s+$", "", data$text)
data$text <- tolower(data$text)
install.packages("rJava")
options(java.parameters = "- Xmx1024m")
install.packages("RSentiment")
library(RSentiment)
result <- data.frame(matrix(nrow = 479, ncol = 2))
for(i in 1:479){
score = calculate_score(data$text[i])
result[i,1] = i
result[i,2] = score
}
result1 = result
sort(unique(result$X2))
result1$X1 = NULL
result1$X2 <- ordered(result1$X2,
levels = c(-4, -3, -2, -1, 0, 1, 2, 3, 4, 5),
labels = c("Neg4","Neg3","Neg2","Neg1", "Neg0","Pos1","Pos2","Pos3","Pos4","Pos5"))
result1$X2 = gsub('.{1}$', '', result1$X2)
library(caret)
confusionMatrix(result1$X2, twitter_sanders_apple2$class)
|
33c1e044294eeb13e88ff9f04581812822fdfe4e
|
9e06252e613edcefaa4d7c569a3f18ab4ede85e4
|
/R/Officials.getByOfficeTypeState.R
|
d9af2595c04257ae1ded6d504cc3c249f2219038
|
[] |
no_license
|
umatter/pvsR
|
b9be083c1224a96fdbc817b2c2749b9763284e7d
|
9ab57a5a67c0bbf9e0342ea37e14ea496d180df4
|
refs/heads/master
| 2021-01-19T08:41:25.275771
| 2021-01-05T06:38:13
| 2021-01-05T06:38:13
| 87,662,576
| 1
| 3
| null | 2021-01-05T06:33:50
| 2017-04-08T20:34:59
|
R
|
UTF-8
|
R
| false
| false
| 3,535
|
r
|
Officials.getByOfficeTypeState.R
|
##' Get a list of officials according to office type and state
##'
##' This function is a wrapper for the Officials.getByOfficeTypeState() method of the PVS API Officials class which grabs a list of officials according to the office type and state they represent. The function sends a request with this method to the PVS API for all state and office type IDs given as a function input, extracts the XML values from the returned XML file(s) and returns them arranged in one data frame.
##' @usage Officials.getByOfficeTypeState(stateId="NA", officeTypeId)
##' @param stateId (optional) a character string or list of character strings with the state ID(s) (default: "NA", for national) (see references for details)
##' @param officeTypeId a character string or list of character strings with the office type ID(s) (see references for details)
##' @return A data frame with a row for each official and columns with the following variables describing the official:\cr candidateList.candidate*.candidateId,\cr candidateList.candidate*.firstName,\cr candidateList.candidate*.nickName,\cr candidateList.candidate*.middleName,\cr candidateList.candidate*.lastName,\cr candidateList.candidate*.suffix,\cr candidateList.candidate*.title,\cr candidateList.candidate*.electionParties,\cr candidateList.candidate*.officeParties,\cr candidatelist.candidate*.officeStatus,\cr candidateList.candidate*.officeDistrictId,\cr candidateList.candidate*.officeDistrictName,\cr candidateList.candidate*.officeTypeId,\cr candidateList.candidate*.officeId,\cr candidateList.candidate*.officeName,\cr candidateList.candidate*.officeStateId.
##' @references http://api.votesmart.org/docs/Officials.html\cr
##' Use State.getStateIDs() to get a list of state IDs.\cr
##' See http://api.votesmart.org/docs/semi-static.html or use Office.getTypes or Office.getOfficesByLevel to get a list of office types ID(s). \cr
##' See also: Matter U, Stutzer A (2015) pvsR: An Open Source Interface to Big Data on the American Political Sphere. PLoS ONE 10(7): e0130501. doi: 10.1371/journal.pone.0130501
##' @author Ulrich Matter <ulrich.matter-at-unibas.ch>
##' @examples
##' # First, make sure your personal PVS API key is saved as an option
##' # (options("pvs.key" = "yourkey")) or in the pvs.key variable:
##' # Note that some officeTypeIds are only available on the state level or national level
##' # (e.g. "L" for State Legislature only if stateId is specified!)
##' \dontrun{pvs.key <- "yourkey"}
##' # get a list of officials by state and office type
##' \dontrun{CAlegislators <- Officials.getByOfficeTypeState(officeTypeId="L", stateId="CA")}
##' \dontrun{head(CAlegislators)}
##' \dontrun{suprcourt <- Officials.getByOfficeTypeState(officeTypeId="J")}
##' \dontrun{head(suprcourt)}
##' @export
Officials.getByOfficeTypeState <-
function (stateId="NA", officeTypeId) {
# internal function
Officials.getByOfficeTypeState.basic <-
function (.stateId, .officeTypeId) {
request <- "Officials.getByOfficeTypeState?"
inputs <- paste("&stateId=",.stateId,"&officeTypeId=",.officeTypeId,sep="")
output <- pvsRequest4(request,inputs)
output$stateId <- .stateId
output$officeTypeId <- .officeTypeId
return(output)
}
# Main function
output.list <- lapply(stateId, FUN= function (y) {
lapply(officeTypeId, FUN= function (s) {
Officials.getByOfficeTypeState.basic(.stateId=y, .officeTypeId=s)
}
)
}
)
output.list <- redlist(output.list)
output <- bind_rows(output.list)
return(output)
}
|
7395a51fbcea35f97759675bb0be46bff03346c2
|
7ee660ae88243581367005cc35f2e5be95d5b284
|
/inst/unitTests/test_combine.R
|
24b110f8d688d92a53fbc3185540c3975eef8164
|
[] |
no_license
|
dbnunes23/bsseq
|
1de16429c41ff2257c18dab3657f144e18b20d35
|
98a0ecfaa56d25e77012aabdd45ec6ab19979b48
|
refs/heads/master
| 2021-01-25T08:01:15.903678
| 2017-04-11T12:44:05
| 2017-04-11T12:44:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,924
|
r
|
test_combine.R
|
checkBSseqAssaysIdentical <- function(x, y) {
stopifnot(is(x, "BSseq") && is(y, "BSseq"))
assay_names <- c("M", "Cov", "coef", "se.coef")
check_identical <- vapply(assay_names, function(an) {
if (!is.null(getBSseq(x, an))) {
identical(as.array(getBSseq(x, an)), as.array(getBSseq(y, an)))
} else {
identical(getBSseq(x, an), getBSseq(y, an))
}
}, logical(1L))
checkTrue(all(check_identical))
}
checkBSseqIdentical <- function(x, y) {
checkTrue(identical(rowRanges(x), rowRanges(y)) &&
identical(getBSseq(x, "trans"), getBSseq(y, "trans")) &&
identical(getBSseq(x, "parameters"),
getBSseq(y, "parameters")) &&
checkBSseqAssaysIdentical(x, y))
}
test_.subassignRowsDelayedMatrix <- function() {
nrow <- 1000L
ncol <- 10L
x <- writeHDF5Array(matrix(seq_len(nrow * ncol), ncol = ncol))
x_i <- seq(1L, 2L * nrow, 2L)
y <- writeHDF5Array(matrix(seq(-1L, -nrow * ncol, -1L), ncol = 10))
y_i <- seq(2L, nrow, 2L)
z1 <- bsseq:::.subassignRowsDelayedMatrix(x = x,
i = x_i,
nrow = 2L * nrow,
fill = NA_integer_,
BACKEND = NULL)
z2 <- bsseq:::.subassignRowsDelayedMatrix(x = x,
i = x_i,
nrow = 2L * nrow,
fill = NA_integer_,
BACKEND = "HDF5Array",
by_row = FALSE)
z3 <- bsseq:::.subassignRowsDelayedMatrix(x = x,
i = x_i,
nrow = 2L * nrow,
fill = NA_integer_,
BACKEND = "HDF5Array",
by_row = TRUE)
checkIdentical(as.array(z1), as.array(z2))
checkIdentical(as.array(z1), as.array(z3))
}
test_.combineListOfDelayedMatrixObjects <- function() {
nrow <- 10
ncol <- 4
x <- matrix(seq_len(nrow),
ncol = ncol / 2,
dimnames = list(NULL, letters[1:2]))
y <- matrix(100L + seq_len(nrow),
ncol = ncol / 2,
dimnames = list(NULL, letters[3:4]))
x_i <- seq(1, nrow, ncol / 2)
y_i <- seq(2, nrow, ncol / 2)
fill <- NA_integer_
# The expected output
z <- matrix(fill,
nrow = nrow,
ncol = ncol,
dimnames = list(NULL, letters[seq_len(ncol)]))
# NOTE: as.array(x) is a no-op if x is a matrix and realises a
# DelayedMtrix in memory
z[x_i, seq(1, ncol(x))] <- x
z[y_i, seq(ncol(x) + 1, ncol(x) + ncol(y))] <- y
# # Test with in-memory DelayedMatrix objects
X <- bsseq:::.DelayedMatrix(x)
Y <- bsseq:::.DelayedMatrix(y)
Z <- bsseq:::.combineListOfDelayedMatrixObjects(
X = list(X, Y),
I = list(x_i, y_i),
nrow = nrow,
ncol = ncol,
dimnames = list(NULL, c(colnames(X), colnames(Y))),
fill = fill,
BACKEND = NULL)
checkIdentical(z, as.array(Z))
checkTrue(!bsseq:::.isHDF5ArrayBacked(Z))
# Test with HDF5Array-backed DelayedMatrix objects
hdf5_X <- realize(X, BACKEND = "HDF5Array")
hdf5_Y <- realize(Y, BACKEND = "HDF5Array")
hdf5_Z <- bsseq:::.combineListOfDelayedMatrixObjects(
X = list(hdf5_X, hdf5_Y),
I = list(x_i, y_i),
nrow = nrow,
ncol = ncol,
dimnames = list(NULL, c(colnames(hdf5_X), colnames(hdf5_Y))),
fill = fill,
BACKEND = "HDF5Array")
checkIdentical(z, as.array(hdf5_Z))
checkTrue(bsseq:::.isHDF5ArrayBacked(hdf5_Z))
}
test_combine <- function() {
data(BS.chr22)
bsseq <- BS.chr22[1:1000, ]
bsseq_fit <- BSmooth(bsseq, verbose = FALSE)
BSSEQ <- saveHDF5SummarizedExperiment(x = bsseq, dir = tempfile())
BSSEQ_FIT <- saveHDF5SummarizedExperiment(x = bsseq_fit, dir = tempfile())
ai <- 1:100
bi <- 51:150
a <- bsseq[ai, 1]
b <- bsseq[bi, 2]
ab <- combine(a, b)
A <- BSSEQ[ai, 1]
B <- BSSEQ[bi, 2]
AB <- combine(A, B)
checkBSseqIdentical(ab, AB)
aB <- combine(a, B)
checkBSseqIdentical(ab, aB)
Ab <- combine(A, b)
checkBSseqIdentical(ab, Ab)
z <- combine(bsseq_fit[, 1], bsseq_fit[, 2])
checkBSseqIdentical(z, bsseq_fit)
Z <- combine(BSSEQ_FIT[, 1], BSSEQ_FIT[, 2])
checkBSseqIdentical(Z, BSSEQ_FIT)
}
test_combineList <- function() {
data(BS.chr22)
bsseq <- BS.chr22[1:1000, ]
bsseq_fit <- BSmooth(bsseq, verbose = FALSE)
BSSEQ <- saveHDF5SummarizedExperiment(x = bsseq, dir = tempfile())
BSSEQ_FIT <- saveHDF5SummarizedExperiment(x = bsseq_fit, dir = tempfile())
ai <- 1:100
bi <- 51:150
ci <- 201:300
a <- bsseq[ai, 1]
b <- bsseq[bi, 2]
c <- bsseq[ci, 1]
colnames(c) <- "r3"
abc <- combineList(list(a, b, c))
A <- BSSEQ[ai, 1]
B <- BSSEQ[bi, 2]
C <- BSSEQ[ci, 1]
colnames(C) <- "r3"
ABC <- combineList(list(A, B, C))
checkBSseqIdentical(abc, ABC)
checkBSseqIdentical(combineList(list(a, b, c)),
combineList(a, b, c))
checkBSseqIdentical(combineList(list(a, B, c)),
combineList(A, b, C))
checkBSseqIdentical(combine(a, b),
combineList(a, b))
checkBSseqIdentical(combine(A, B),
combineList(A, B))
z <- combineList(bsseq_fit[, 1], bsseq_fit[, 2])
checkBSseqIdentical(z, bsseq_fit)
Z <- combineList(BSSEQ_FIT[, 1], BSSEQ_FIT[, 2])
checkBSseqIdentical(Z, BSSEQ_FIT)
}
|
8ce5ab4badc7a394572c69f4696cd3c4728f5cef
|
9300d3ad75869869d214d6bdfd390b02f5f3c137
|
/exercise_3/Exercise3.r
|
6ea234fb2099df14e32c1e6b6bd21d23941cb1f6
|
[] |
no_license
|
KouXou/R_Exercises
|
11cb1d006253dcc1927cd856e97c6bd2ae879fa0
|
5840aa9e297d3209b5b0c0f9dd239a9a712d1ec4
|
refs/heads/master
| 2022-04-11T01:19:02.968963
| 2020-03-23T18:36:29
| 2020-03-23T18:36:29
| 230,941,794
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,590
|
r
|
Exercise3.r
|
library(car)
library(leaps)
# Function to read Data
read_data <- function(file_path) {
d <- read.table(file_path, header = TRUE, dec = ".", row.names = 1)
return(d)
}
# Function to plot Data
plot_data <- function(in_data) {
pairs(in_data)
summary(in_data)
}
ozone_data <- read_data('./ozone.txt')
head(ozone_data)
plot_data(ozone_data)
scatterplotMatrix(formula = ~maxO3+T9+T12+T15,
data = ozone_data[1:4],main="Scatter Plot Matrix",
regLine=list(method=lm, lty=1, lwd=2, col='red'))
maxO3 <- ozone_data[,1]
T9 <- ozone_data[,2]
T12 <- ozone_data[,3]
T15 <- ozone_data[,4]
lm1=lm(maxO3~T9,data=ozone_data)
lm2=lm(maxO3~T12,data=ozone_data)
lm3=lm(maxO3~T15,data=ozone_data)
lm1
lm2
lm3
summary(lm2)
print(paste('Συντελεστής Προσδιορισμού : ', summary(lm2)$r.squared ))
print(paste('Προσαρμοσμένος Συντελεστής Προσδιορισμού : 0.6116 '))
plot(maxO3 ~ T12)
abline(lm2)
res<--rstudent(lm2)
plot(res,pch=15,cex=.5,ylab="Residuals")
abline(h=c(-2,0,2),lty=c(2,1,2))
leaps<-regsubsets(maxO3 ~ T9 + T12 + T15 + Ne9 + Ne12 + Ne15 + Wx9 + Wx12 + Wx15 + maxO3y
, data=ozone_data, nbest=1)
plot(leaps, scale="adjr2")
new_data.maxO3 <- 70
new_data.T12 <- 19
new_data.Ne9 <- 8
new_data.Wx9 <- 2.05
newx<-matrix(c(new_data.T12, new_data.Ne9, new_data.Wx9, new_data.maxO3), nrow = 1)
colnames(newx)<- c('T12', 'Ne3', 'Wx9', 'maxO3n')
newdata<-data.frame(newx)
pr<-predict(lm2, newdata, interval = 'pred')
print(paste('Προβλεπόμενη τιμή: ', pr[1]))
|
b9827dba0570d17aa118f5d779c081733a51bf30
|
c2d9ded06bb67d510a0797f8bf22b24532f80f0d
|
/03_Humidity_Correction.R
|
8e68812dcdde9c5da771bfebae49782e10244557
|
[] |
no_license
|
Neys-Code/Masterarbeit
|
2093a34c68f6d7b2043aa9d60725ff9bf8ac114a
|
7fa32a3f0a73381c74d3e66c3f5dbc5a664ffb1b
|
refs/heads/main
| 2022-12-24T23:51:35.473151
| 2020-10-11T21:03:15
| 2020-10-11T21:03:15
| 303,211,670
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,044
|
r
|
03_Humidity_Correction.R
|
########################################################
#
#Humidity correction
#
########################################################
#run script 2 first !!!!!!
liste3<-c()
# best parameter for every sensor and correction formula
# create variables
parameter.PM10<-c()
corr.PM10<-c()
parameter.PM25<-c()
corr.PM25<-c()
#iterate though every sensor getting the best set of parameters
# long runtime !
for (i in 1:5)
{
b<-get(liste[i]) # read data
b<-na.omit(b)
PM=b$PM10
PM2=b$PM5
Hum=b$Hum
h3<-minify(PM,Hum,200, meth="pearson",fun="haenel") #run minify function
h4<-minify(PM2,Hum,200, meth="pearson",fun="haenel")
parameter.PM10[i]<-h3[2] # write parameter
corr.PM10[i]<-h3[1] # write corresponding R² for minimal correlation after correction with certain parameter
parameter.PM25[i]<-h4[2]
corr.PM25[i]<-h4[1]
}
#print values
parameter.PM10
corr.PM10
parameter.PM25
corr.PM25
#saved parameters:
# summer period
# hänel parameters pearson r
parameter.PM10.haenel<-c( 0.16, 0.01, 0.13, 0.10, 0.08, 0.10, 0.05, 0.01, 0.10, 0.05, 0.08, 0.05, 0.01, 0.07, 0.11, 0.33 ,0.07, 0.04)
corr.PM10.haenel<-c(3.016909e-04, 5.464012e-03, 2.954449e-04, 5.016221e-03, 2.705385e-04 ,4.871921e-03, 6.477679e-03, 4.479281e-03, 6.758295e-04, 5.805504e-04 ,5.467699e-05, 3.738570e-04, 1.243516e-03 ,1.850964e-03,6.222919e-03, 3.852336e-04 ,6.399280e-04 ,0.00433258)
parameter.PM25.haenel<-c(0.16 ,0.02 ,0.15 ,0.07 ,0.15 ,0.11 ,0.05 ,0.08 ,0.01 ,0.11, 0.09, 0.01, 0.01, 0.06, 0.16 ,0.73 ,0.11 ,0.02)
corr.PM25.haenel<-c(0.0006027008, 0.0107193796 ,0.0074230416 ,0.0031312635, 0.0017701254 ,0.0026194102, 0.0060226544, 0.0056408189, 0.0064777976 ,0.0014677468, 0.0074307741 ,0.0032105885, 0.0016125007, 0.0023624328,0.0023525978 ,0.0003923794, 0.0041374884, 0.00487286)
# fall
#Hänel pearson
parameter.PM10.haenel<-c(0.09, 0.03, 0.08, 0.05, 0.08, 0.08, 0.17, 0.08, 0.01, 0.01, 0.09, 0.01, 0.01, 0.37, 0.14, 0.13, 0.03)
corr.PM10.haenel<-c(0.011874971 ,0.001176366 ,0.012782197, 0.002973503 ,0.003198040, 0.001206695, 0.004409697, 0.006643064 ,0.000909106 ,0.032864726 ,0.005654078, 0.040340846 ,0.184692998, 0.001490821, 0.006113390, 0.005903745,0.005049788)
parameter.PM25.haenel<-c(0.11, 0.05, 0.12, 0.10, 0.14, 0.11, 0.28, 0.10, 0.07 ,0.02, 0.10 ,0.01, 0.01, 0.48, 0.16, 0.10 ,0.10)
corr.PM25.haenel<-c(0.010041799, 0.008299816 ,0.011599963, 0.001410846 ,0.005704815, 0.002258389 ,0.002207879 ,0.003622712, 0.001878455, 0.003513041,0.008423391, 0.024696667, 0.157034945, 0.003227737, 0.003986456 ,0.012807499,0.000841304)
#######################
#Plot growthfunction
########################
# growth factor for every Sensor
gf.S<-c()
gf.H<-c()
for (i in 1:19)
{
c.haenel<-get(liste[i])
c.soneja<-c.haenel
#growth haenel functiom
c.haenel$PM10<-gf.haenel(c.haenel$PM10,c.haenel$Hum,parameter.PM10.haenel[i])
c.haenel$PM5<-gf.haenel(c.haenel$PM5,c.haenel$Hum,parameter.PM25.haenel[i])
#growth factor soneja function
#c.soneja$PM10<-gf.soneja(c.soneja$PM10,c.soneja$Hum,parameter.PM10.soneja[i])
#c.soneja$PM5<-gf.soneja(c.soneja$PM5,c.soneja$Hum,parameter.PM25.soneja[i])
#write data
assign(paste0("gf.haenel",liste[i]),c.haenel)
gf.H[i]<-paste0("gf.haenel",liste[i])
#assign(paste0("gf.soneja",liste[i]),c.soneja)
#gf.S[i]<-paste0("gf.soneja",liste[i])
}
#set colors or plot
farben<-c("gold", "darkorange", "red","darkred","hotpink","plum3","darkmagenta","cornflowerblue","blue","darkblue","turquoise","turquoise4","green","darkgreen","tan2","navajowhite4","grey","black")
#Plotloop growthfunction PM10 Hänel
windows()
plot(`gf.haenelSensor1-7132425`$Hum,`gf.haenelSensor1-7132425`$PM10, type="n", col = "magenta", ylab="Wachstumsfaktor",cex.axis=1.5,cex.lab=1.5, xlab="Luftfeuchtigkeit %", lwd = 1, main="Wachstumsfunktion für PM 10 (Funktion nach Hänel)")
par(new=T)
for (i in 1:19)
{ Sensor<-get(gf.H[i])
plot(Sensor$Hum,Sensor$PM10, xlab="",ylab="",type="p",col = farben[i], axes=FALSE)
par(new=T)}
#Plotloop growthfunction PM 2,5 Hänel
windows()
plot(`gf.haenelSensor1-7132425`$Hum,`gf.haenelSensor1-7132425`$PM5, type="n", col = "magenta", ylab="Wachstumsfaktor",cex.axis=1.5,cex.lab=1.5, xlab="Luftfeuchtigkeit %", lwd = 1, main="Wachstumsfunktion für PM 2,5 (Funktion nach Hänel)")
par(new=T)
for (i in 1:19)
{ Sensor<-get(gf.H[i])
plot(Sensor$Hum,Sensor$PM5, xlab="",ylab="",type="p",col = farben[i], axes=FALSE)
par(new=T)}
############################################
#
#Correct and write Data
#
###########################################
# corret PM10 and PM 25 with best parameters for correction function
liste4<-c()
for (i in 1:19)
{
c.haenel<-get(liste[i])
#c.soneja<-c.haenel
#correct with haenel functiom
c.haenel$PM10<-haenel(c.haenel$PM10,c.haenel$Hum,parameter.PM10.haenel[i])
c.haenel$PM5<-haenel(c.haenel$PM5,c.haenel$Hum,parameter.PM25.haenel[i])
#write data
assign(paste0("Haenel",liste[i]),c.haenel)
liste4[i]<-paste0("Haenel",liste[i])
# write.csv(c.haenel,file=(paste0("E:/Dropbox/Dropbox/Masterarbeit","/Sommerhaenel",substr (f[i],1,10),".csv")))
#write.csv(c.haenel,file=(paste0("E:/Dropbox/Dropbox/Masterarbeit","/Herbsthaenel",substr (f[i],1,10),".csv")))
}
########################
#Classification for corrected values
########################
########## hourly values
#switch between seasons
liste7<-c()
liste7h<-c()
for (i in 1:length(liste))
{
s<-get(liste4[i])
result<-Stundenwerte(s)
assign(paste0("c.Herbst",liste[i]), result)
#assign(paste0("c.Sommer",liste[i]), result)
#liste7<-rbind(liste7,paste0("c.Sommer",liste[i]))
liste7h<-rbind(liste7h,paste0("c.Herbst",liste[i]))
#write.csv(result,file=paste0(substr(liste7[i],1,16),"Sommer.csv"))
write.csv(result,file=paste0(substr(liste7h[i],1,16),"Herbst.csv"))
}
# merge in one Dataset
dummy2<-data.frame(`c.HerbstSensor10-7131453`$Date,`c.HerbstSensor10-7131453`$PM10)#fall
#dummy2<-data.frame(`c.SommerSensor6-7134028`$Date,`c.SommerSensor6-7134028`$PM10)#sommer
names(dummy2)<-c("Date","dummy")
#merging Data
s<-dummy2
c.PM10<-c.PM5<-data.frame(s$Date,s$dummy)
names(c.PM10)<-c("Date","dummy")
names(c.PM5)<-c("Date","dummy")
liste7<-na.omit(liste7)
liste7h<-na.omit(liste7h)
for (i in 1:length(liste7h))
{ #s<-get(liste7[i])
s<-get(liste7h[i])
c.PM10<-merge(x=c.PM10 , y= s[1:2], by= 'Date', all.x= T)
c.PM5<-merge(x=c.PM5 , y=s[,c(1,3)] , by= 'Date', all.x= T)
names(c.PM10)<-c("Date","dummy",liste[1:i])
names(c.PM5)<-c("Date","dummy",liste[1:i])
}
c.PM10<-c.PM10[-c(2)]
c.PM5<-c.PM5[-c(2)]
#write.csv(c.PM10,file="PM10SommerR.csv")
#write.csv(c.PM5,file="PM25SommerR.csv")
write.csv(c.PM10,file="PM10HerbstR.csv")
write.csv(c.PM5,file="PM25HerbstR.csv")
# calculate row means as above
c.RMPM10<-rowMeans(c.PM10[-c(1)], na.rm=TRUE)
c.RMPM5<-rowMeans(c.PM5[-c(1)], na.rm=TRUE)
#calculate column means
c.CMPM10<-colMeans(c.PM10[-c(1)], na.rm=TRUE)
c.CMPM5<-colMeans(c.PM5[-c(1)], na.rm=TRUE)
### difference between corrected PM and uncorrected PM
#datacloud for basestatistics
#summer
w.Hum <-c(Hum[,2],Hum[,3],Hum[,4],Hum[,5],Hum[,6],Hum[,7],Hum[,8],Hum[,9],Hum[,10],Hum[,11],Hum[,12],Hum[,13],Hum[,14],Hum[,15],Hum[,16],Hum[,17],Hum[,18])
w.PM10 <-c(PM10[,2],PM10[,3],PM10[,4],PM10[,5],PM10[,6],PM10[,7],PM10[,8],PM10[,9],PM10[,10],PM10[,11],PM10[,12],PM10[,13],PM10[,14],PM10[,15],PM10[,16],PM10[,17],PM10[,18])
w.PM5 <-c(PM5[,2],PM5[,3],PM5[,4],PM5[,5],PM5[,6],PM5[,7],PM5[,8],PM5[,9],PM5[,10],PM5[,11],PM5[,12],PM5[,13],PM5[,14],PM5[,15],PM5[,16],PM5[,17],PM5[,18])
w.c.PM10 <-c(c.PM10[,2],c.PM10[,3],c.PM10[,4],c.PM10[,5],c.PM10[,6],c.PM10[,7],c.PM10[,8],c.PM10[,9],c.PM10[,10],c.PM10[,11],c.PM10[,12],c.PM10[,13],c.PM10[,14],c.PM10[,15],c.PM10[,16],c.PM10[,17],c.PM10[,18])
w.c.PM5 <-c(c.PM5[,2],c.PM5[,3],c.PM5[,4],c.PM5[,5],c.PM5[,6],c.PM5[,7],c.PM5[,8],c.PM5[,9],c.PM5[,10],c.PM5[,11],c.PM5[,12],c.PM5[,13],c.PM5[,14],c.PM5[,15],c.PM5[,16],c.PM5[,17],c.PM5[,18])
w.Temp<-c(Temp[,2],Temp[,3],Temp[,4],Temp[,5],Temp[,6],Temp[,7],Temp[,8],Temp[,9],Temp[,10],Temp[,11],Temp[,12],Temp[,13],Temp[,14],Temp[,15],Temp[,16],Temp[,17],Temp[,18])
#fall
w.Hum <-c(Hum[,2],Hum[,3],Hum[,4],Hum[,5],Hum[,6],Hum[,7],Hum[,8],Hum[,9],Hum[,10],Hum[,11],Hum[,12],Hum[,13],Hum[,14],Hum[,15])
w.PM10 <-c(PM10[,2],PM10[,3],PM10[,4],PM10[,5],PM10[,6],PM10[,7],PM10[,8],PM10[,9],PM10[,10],PM10[,11],PM10[,12],PM10[,13],PM10[,14],PM10[,15])
w.PM5 <-c(PM5[,2],PM5[,3],PM5[,4],PM5[,5],PM5[,6],PM5[,7],PM5[,8],PM5[,9],PM5[,10],PM5[,11],PM5[,12],PM5[,13],PM5[,14],PM5[,15])
w.c.PM10 <-c(c.PM10[,2],c.PM10[,3],c.PM10[,4],c.PM10[,5],c.PM10[,6],c.PM10[,7],c.PM10[,8],c.PM10[,9],c.PM10[,10],c.PM10[,11],c.PM10[,12],c.PM10[,13],c.PM10[,14],c.PM10[,15])
w.c.PM5 <-c(c.PM5[,2],c.PM5[,3],c.PM5[,4],c.PM5[,5],c.PM5[,6],c.PM5[,7],c.PM5[,8],c.PM5[,9],c.PM5[,10],c.PM5[,11],c.PM5[,12],c.PM5[,13],c.PM5[,14],c.PM5[,15])
w.Temp<-c(Temp[,2],Temp[,3],Temp[,4],Temp[,5],Temp[,6],Temp[,7],Temp[,8],Temp[,9],Temp[,10],Temp[,11],Temp[,12],Temp[,13],Temp[,14],Temp[,15])
Statistik(w.PM10)
Statistik(w.PM5)
Statistik(w.c.PM10)
Statistik(w.c.PM5)
diff.PM10<-w.c.PM10-w.PM10
diff.PM5<-w.c.PM5-w.PM5
Statistik(diff.PM10)
Statistik(diff.PM5)
####################################
#significance humidity correction
PM=`Sensor16- 713582`$PM10
Hum=`Sensor16- 713582`$Hum
res<-list()
for (i in 1:17)
{
b<-get(liste[i]) # read data
b<-na.omit(b)
PM=b$PM10
PM2=b$PM5
Hum=b$Hum
iter=50
corr<-c()
pval<-c()
a=0.01
for ( j in 1: iter)
{ NPM<-mapply(haenel,PM,Hum,a)
corr[j]<-cor.test(NPM,Hum,method="pearson")[["estimate"]]#korrelation berechnen # korrelationskoeffizient
param[j]<-cor.test(NPM,Hum,method="pearson")[["p.value"]]#p value
a=a+0.01
NPM<-c()}
g<-data.frame(abs(corr),param)
mini<-min(g$abs.corr., na.rm = TRUE)
par<-g$param[g$abs.corr.==mini]
res[i]<-c(mini,par)
}
# Quality of implementation of the humidity correction
######################################
#Compare to Data provided by Streibl
######################################
#set path
pfad<-("E:/Dropbox/Dropbox/Masterarbeit/Silberberg")#desktop1
setwd(pfad)# set woring directory
f<-list.files(path = pfad, pattern = NULL, all.files = FALSE,full.names = FALSE, recursive = FALSE,ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
# create all matrices to be filled within loop
n=c()
i=1
liste=c()
#for loop to read all data and perform several stats
for (i in 1:2 )
{
y=j=k=1 # reset counter Variables
Date=Time=Hum=PM5=PM10=Temp=c() # reset help Variables
# e list to be iterated
e<-list.files(path = paste0(pfad,"/",f[i]), pattern = NULL, all.files = FALSE,full.names = FALSE, recursive = FALSE,ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
for (k in 1:length(e))
{ y=1
n<-e[k]
# list to be iterated
l<-list.files(path = paste0(pfad,"/",f[i],"/",e[k]), pattern = NULL, all.files = FALSE,full.names = FALSE, recursive = FALSE,ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
#reset "inner" help variables
Temp2<-c()
Date2=Time4=Hum2=PM52=PM102=c()
#inner Loop
for (y in 1:length(l))
{j=1
x<-read_delim(paste0(pfad,"/",f[i],"/",e[k],"/",l[y]),";", escape_double = FALSE, trim_ws = TRUE, na = c("", "NA"),quoted_na = TRUE)
#correct different variable names
if (is.na(x$BMP_temperature[1])==FALSE)
{x$Temp<-x$BMP_temperature
x$Humidity<-x$BMP_pressure}
x<-Runden(x)
Temp2<-c(Temp2,x$Temp)
PM102=c(PM102,x$SDS_P1)
PM52=c(PM52,x$SDS_P2)
Hum2=c(Hum2,x$Humidity)
}
Temp<-c(Temp,Temp2)
PM10=c(PM10,PM102)
PM5=c(PM5,PM52)
Hum=c(Hum,Hum2)
}
PM10<-round(PM10,digits=2)
PM5<-round(PM5,digits=2)
Temp<-round(Temp,digits=2)
Hum<-round(Hum,digits=2)
Sensor<-data.frame(PM10,PM5,Temp,Hum)
#write values to dataframe
assign(paste0("Sensor",substr (f[i],1,10)),Sensor)
liste<-c(liste,(paste0("Sensor",substr (f[i],1,10))))
}
PM<-as.numeric(Sensor746774$PM10)
PM2<-as.numeric(Sensor746774$PM5)
Hum<-as.numeric(Sensor746774$Hum)
#Hänel
#log-transformed
h8<-minify(PM2,Hum,2000, meth="log",fun="haenel")
#spearman
h10<-minify(PM2,Hum,2000, meth="spearman",fun="haenel")
#pearson
h12<-minify(PM2,Hum,2000, meth="pearson",fun="haenel")
#Soneja
#log-transformed
h2<-minify(PM2,Hum,2000, meth="log",fun="soneja")
#spearman
h4<-minify(PM2,Hum,2000, meth="spearman",fun="soneja")
#pearson
h6<-minify(PM2,Hum,2000, meth="pearson",fun="soneja")
#Köhler
#log-transformed
h14<-minify(PM2,Hum,2000, meth="log",fun="koehler")
#spearman
h16<-minify(PM2,Hum,2000, meth="spearman",fun="koehler")
#pearson
h18<-minify(PM2,Hum,2000, meth="pearson",fun="koehler")
#############################################################
# find best fitting method with Sensor 16
PM<-`Sensor16- 713582`$PM10
PM2<-`Sensor16- 713582`$PM5
Hum<-`Sensor16- 713582`$Hum
#Soneja
#log-transformed
h1<-minify(PM,Hum,2000, meth="log",fun="soneja")
h2<-minify(PM2,Hum,2000, meth="log",fun="soneja")
#spearman
h3<-minify(PM,Hum,2000, meth="spearman",fun="soneja")
h4<-minify(PM2,Hum,2000, meth="spearman",fun="soneja")
#pearson
h5<-minify(PM,Hum,2000, meth="pearson",fun="soneja")
h6<-minify(PM2,Hum,2000, meth="pearson",fun="soneja")
#Hänel
#log-transformed
h7<-minify(PM,Hum,2000, meth="log",fun="haenel")
h8<-minify(PM2,Hum,2000, meth="log",fun="haenel")
#spearman
h9<-minify(PM,Hum,2000, meth="spearman",fun="haenel")
h10<-minify(PM2,Hum,2000, meth="spearman",fun="haenel")
#pearson
h11<-minify(PM,Hum,2000, meth="pearson",fun="haenel")
h12<-minify(PM2,Hum,2000, meth="pearson",fun="haenel")
#Köhler
#log-transformed
h13<-minify(PM,Hum,2000, meth="log",fun="koehler")
h14<-minify(PM2,Hum,2000, meth="log",fun="koehler")
#spearman
h15<-minify(PM,Hum,2000, meth="spearman",fun="koehler")
h16<-minify(PM2,Hum,2000, meth="spearman",fun="koehler")
#pearson
h17<-minify(PM,Hum,2000, meth="pearson",fun="koehler")
h18<-minify(PM2,Hum,2000, meth="pearson",fun="koehler")
#######################################
#compare Sensor with oficcial data
#######################################
#set path
#ZIMEN Data
PM_Sommer <- read.csv("E:/Dropbox/Dropbox/Masterarbeit/PM_Herbst.csv", header=TRUE,stringsAsFactors=FALSE)
View(PM_Sommer)
PM_Sommer$Date<-paste0(substr(PM_Sommer$Zeitpunkt,7,10),"/",substr(PM_Sommer$Zeitpunkt ,4,5),"/",substr(PM_Sommer$Zeitpunkt,1,2)," ",substr(PM_Sommer$Zeitpunkt,12,17),":00")
PM_Sommer$Trier.Ostallee.PM10<-as.numeric(PM_Sommer$Trier.Ostallee.PM10)
PM_Sommer$Trier.Pfalzel.PM2.5<-as.numeric(PM_Sommer$Trier.Pfalzel.PM2.5)
# hourly values
a<-`Sensor16- 713582`
a$Date2<-paste0(substr(a$Date,1,14),"00:00")
PM10 <- aggregate(a$PM10 ~ Date2, a, mean)
PM5 <- aggregate(a$PM5 ~ Date2, a, mean)
Temp <- aggregate(a$Temp ~ Date2, a, mean)
Hum <- aggregate(a$Hum ~ Date2, a, mean)
tag<- substr(PM10[[1]],1,10)
stunde<-substr(PM10[[1]],12,22)
b<- data.frame(c(PM10[1]),c(tag),c(stunde),c(PM10[2]),c(PM5[2]),rbind(Temp[2]),rbind(Hum[2]))
names(b)<-c("Date","tag","Stunde","PM10","PM5","Temp","Hum")
#compare data
#01.09.2019-24.11.2019
ds1<-data.frame(b$Date,b$Hum)
names(ds1)<-c("Date","Hum_SDS")
ds2<-data.frame(dwd$time,dwd$AVG_RH350)
names(ds2)<-c("Date","Hum_DWD")
ds3<-merge(x= ds1, y= ds2, by= 'Date', all.x= T)
ds3$pm10<-b$PM10
ds3$pm5<-b$PM5
ds3=na.omit(ds3)
ds4<-data.frame(PM_Sommer$Date,PM_Sommer$Trier.Ostallee.PM10,PM_Sommer$Trier.Pfalzel.PM2.5)
names(ds4)<-c("Date","Ostalee","Pfalzel")
ds5<-merge(x= ds3, y= ds4, by= 'Date', all.x= T)
plot(ds5$pm10~ds5$Ostalee)
diff<-ds5$pm10-ds5$Ostalee
diff=na.omit(diff)
Statistik(diff)
#correct with haenel and Soneja and Köhler
s16HP<-mapply(haenel,ds3$pm10,ds3$Hum_SDS,0.46)
s16SP<-mapply(soneja,ds3$pm10,ds3$Hum_SDS,0.17)
s16KP<-mapply(koehler,ds3$pm10,ds3$Hum_SDS,0.17)
#plot Feuchtigkeit über die zeit
windows()
plot(as.numeric(ds5$Date),ds5$pm10,col="red",type="l",ylim =c(0,50),xlab="Zeit in Stunden",cex.lab=1.5,cex.axis=1.5, ylab="PM 10 µg/m³", main= "Vergleich Sensor 16 und Messstation Ostallee")
par(new=T)
plot(as.numeric(ds5$Date),ds5$Ostalee,ylim =c(0,50),type="l", axes=FALSE, col="grey", xlab="",ylab="")
par(new=T)
plot(as.numeric(ds3$Date),s16HP, type="l",ylim=c(0,50),col="green", xlab="",ylab="", axes=FALSE)
par(new=T)
plot(as.numeric(ds3$Date),s16SP, type="l",ylim=c(0,50),col="darkgreen", xlab="",ylab="", axes=FALSE)
par(new=T)
plot(as.numeric(ds3$Date),s16KP, type="l",ylim=c(0,50),col="blue", xlab="",ylab="", axes=FALSE)
legend(17,50,legend=c("Messstation Ostallee","Sensor 16 unkorrigiert", "Sensor 16 Korrektur nach Hänel", "Sensor 16 Korrektur nach Soneja", "Sensor 16 Korrektur nach Köhler"),col=c("grey","red","green","darkgreen","blue"), bg="white", lty=1:1,lwd=3, cex=0.8,text.font=4)
|
7a56167283914c77d1832c1f1055216d0a4fcc4e
|
97a26457d27e5976d7f7a716b795999ba9b03de5
|
/lab2a.R
|
1f6e10fa9cd715674d32cc1c178aa7a433fee207
|
[] |
no_license
|
agdosne/uuadvstatcomp
|
015681766f8ab258f31dc4a38a26c9bf2ddffa69
|
eb3a87fa2422e346413215d8e2b4e546819c805f
|
refs/heads/master
| 2021-01-16T23:22:54.313582
| 2015-12-04T09:45:48
| 2015-12-04T09:45:48
| 43,811,883
| 0
| 0
| null | 2015-10-07T11:47:00
| 2015-10-07T11:47:00
| null |
UTF-8
|
R
| false
| false
| 4,745
|
r
|
lab2a.R
|
#####################################################
### Advanced Statistical Computing Course
### Lab 2
### Winter 2015
### Report by Anne-Gaelle Dosne
#####################################################
### Optimization
myfun <- function(x) { # create a univariate function
(x - 3)**2 + 2 * (x - 3)**2 + 3 * (x - 15)**2 + sin(100*x)
}
myfun(3) # test the function
opt1 <- optimize(f=myfun, interval=c(0,100)) # optimize it over the interval from 0 to 100 (local minimization): min: 9.03
myfun(opt1$minimum) # returns function value at minimum (~215)
optimize(f=myfun, interval=c(0,15)) # min: 9.28
optimize(f=myfun, interval=c(9,12)) # min: 9.53
optimize(f=myfun, interval=c(10,11)) # min: 10.16
optimize(f=myfun, interval=c(-1000,1000)) # min: 8.71
# The optimum changes a little every time but is close to 9 whenever 9 is in the interval.
# The value of the minimum varies by up to 0.5.
# There is no guarantee that this optimum is the global optimum, but it is the best over -1000,1000.
### Integrate a function
multsin <- function(x) {x*sin(x)} # create a new function
integrate(multsin,lower=-7E5,upper=7E5,subdivisions=1E7) # result: 1356376
system.time(integrate(multsin,lower=-7E5,upper=7E5,subdivisions=1E7)) # integrate it over large range and small steps
# takes ~12 seconds
library(parallel) # make it parallel
p_multsin <- function(lim) { # create function that integrate multsin in parallel
y <- integrate(function(x) {x*sin(x)},
lower = lim[1],
upper = lim[2],
subdivisions = 1E7)
return(y$value)
}
p_multsin(c(-7E5,7E5)) # test function: same result as before so OK
p_integrate <- function(nodes) {
cl <- makePSOCKcluster(nodes)
intervals <- seq(from = -7E5, to = 7E5, length.out = nodes + 1) # creates nodes intervals
min <- intervals[-length(intervals)] # create vector of min values to use as lower arguments to integrate function
max <- intervals[-1] # create vector of max values to use as upper arguments to integrate function
out <- parLapply(cl = cl, X = Map(c,min,max), fun = p_multsin)
return(sum(unlist(out)))
}
p_integrate(2) # result: 1356376, same as before so function OK
p_integrate(4) # same
system.time({p_integrate(2)}) # takes 4 sec
system.time({p_integrate(4)}) # takes 2.9 sec --> best
system.time({p_integrate(8)}) # takes 3 sec
# Speed up 4-fold using parallel computing on 4 nodes (from 12 to 3 seconds)
### Functional operators: memoisation
require(memoise)
fib <- function(n) { # no memoisation
if (n < 2) return(1)
fib(n - 2) + fib(n - 1)
}
fib2 <- memoise(function(n) {
if (n < 2) return(1)
fib2(n - 2) + fib2(n - 1)
})
fib3 <- memoise(fib)
system.time(fib(28)) # takes 1.6 sec
system.time(fib2(28)) # takes 0.01 sec --> fastest
system.time(fib3(28)) # takes 1.6 sec
# forget(fib3)
### Domain specific languages: ggplot
library(ggplot2)
# Lab example
str(mpg) # dataframe, 234 obs and 11 variables
qplot(displ, hwy , data=mpg) # relation between engine displacement and highway miles per gallon
qplot(displ, hwy , data=mpg, color =drv) # color legend: drive
qplot(displ, hwy, data =mpg, geom=c("point","smooth")) # add global statistic: smooth
qplot(displ, hwy, data =mpg, geom=c("point","smooth"),method="lm") # add global statistic: linear model
qplot(displ, hwy, data =mpg, geom=c("point","smooth"),method="lm",color=drv) # add local statistic by drive type
gr <- qplot(displ, hwy, data =mpg, geom=c("point","smooth"),method="lm",color=drv)
gr + theme(panel.background = element_rect(fill = "pink")) # change background color
# Exercise
str(diamonds) # dataframe, 53940 obs and 10 variables
qplot(carat, price , data=diamonds) # relation between carat and price (expected to increase): looks exponential
qplot(carat, price , data=diamonds, color=color) # color legend: color --> increase steeper in alphabetical color order
qplot(carat, price , data=diamonds, geom=c("point","smooth"),color=color) # add local statistic by color type: difficult to see
qplot(carat, price , data=diamonds, geom=c("smooth"),color=color) # remove points to see potential differences between smooths
qplot(carat, price , data=diamonds, geom=c("point","smooth")) # add global statistic
gr <- qplot(carat, price , data=diamonds, color=color)
gr + scale_color_discrete(name='Diamond color') # change title of color legend
qplot(color, price/carat , data=diamonds, geom="boxplot") # draw boxplot of price/carat for the different colors
# END
###########################################################################
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.