blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
12d218cba2a977e3aeb3c7b5f3d9b898fc4e78b2 | 14f7644cd202ddfad8fc4e542092d92184b96623 | /shinnyApp/03 Map/module.R | 6c19568cbace9340edd1c21804cc6c600c45c888 | [] | no_license | DanielMCM/H2020Shiny | 44712ab2c5e279fc6567388565d0dea26412b077 | 6b406ffa9c9ee0fd59ed4cb46526365791cec743 | refs/heads/master | 2020-09-01T17:55:14.002319 | 2019-11-01T16:32:10 | 2019-11-01T16:32:10 | 219,020,831 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,374 | r | module.R | ###################################################
######### Map Module ###########
###################################################
Map_menuItem <- function(id) {
ns <- NS(id)
menuItem("Map", tabName = str_c(id, "Mp"), icon = icon("globe"))
}
# UI (tabs)
r_colors <- rgb(t(col2rgb(colors()) / 255))
names(r_colors) <- colors()
Map_ui <- function(id) {
ns <- NS(id)
list(tabItem(tabName = str_c(id, "Mp"), tags$style(HTML("#big-heading{color: #1e88e5; margin-top: 0%;}
#controls {
/* Appearance */
background-color: #eceff1;
padding: 0 20px 20px 20px;
cursor: move;
/* Fade out while not hovering */
opacity: 0.65;
zoom: 0.9;
transition: opacity 500ms 1s;
margin-top:23px;
}
#controls:hover {
/* Fade in while hovering */
opacity: 0.95;
transition-delay: 0;
}
")),
fluidPage(
# App title ----
h3(id = "big-heading", "Number of projects per country"),
p("It is recommended to use Internet explorer in order to use the date slider"),
leafletOutput(ns("mymap"), width = "102%", height = "500px"),
absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE,
draggable = TRUE, top = 120, left = "auto", right = 20, bottom = "auto",
width = 250, height = "auto",
p(),
# h2("ZIP explorer"),
selectizeInput(ns("country"), "Visualize by Country", choices = countries,
options = list(placeholder = 'select a country', maxItems = 5, 'plugins' = list('remove_button')), multiple = TRUE),
sliderInput(ns("range"),
"Projects started in years ",
value = c(2014, 2020),
sep = "",
min = 2014,
max = 2020)
),
br(),
h4("Questions that can be answered using the interactive Map: "),
tags$div(tags$ul(
tags$li(tags$span("How many projects are being develop in each country?")),
tags$li(tags$span("How are the projects distributed along a specific country?")),
tags$li(tags$span("Which are the countries with more projects?")),
tags$li(tags$span("How many projects started in a year in each country?")),
tags$li(tags$span("How many projects started in certain period in each country?"))
)
)
)
))
}
# Server
Map_server <- function(input, output, session) {
# Load reactives
points <- eventReactive(input$recalc, {
cbind(rnorm(40) * 2 + 13, rnorm(40) + 48)
}, ignoreNULL = FALSE)
print(points)
sliderValues <- reactive({
projects <- values$ProjectsByCountry[!is.na(values$ProjectsByCountry$startDate),]
projects$startYear <- as.numeric(projects$startYear)
projects$endDate <- as.numeric(projects$endDate)
start <- as.numeric(input$range[1])
end <- as.numeric(input$range[2])
a <- projects[(projects$startYear >= start & projects$startYear <= end),]
cat(file = stderr(), "TYPE ->", typeof(input$country), "<-\n")
if (!is.null(input$country)) {
a <- projects[projects$countryName == input$country,]
}
cat(file = stderr(), "drawing a map with", start, " range ", end, "\n")
num <- nrow(a)
cat(file = stderr(), "AFTER", num, "\n")
return(a)
})
# Call questions' servers
output$mymap <- renderLeaflet({
projects <- sliderValues() # Add this
zoomSize <- 2
if (!is.null(input$country)) {
zoomSize <- 4
}
leaflet(data = projects) %>% addTiles() %>% setView(6.143158, 46.204391, zoom = zoomSize) %>% addMarkers(
lng = ~uni_lng, lat = ~uni_lat,
clusterOptions = markerClusterOptions(),
popup = ~paste("<b style=\"color:#1e88e5\">", projects$coordinator, "</b>",
"<hr style=\"color:#b0bec5; margin-top: 0em; margin-bottom: 0em;\">",
"Country: ", projects$countryName
)
)
})
} |
15d9abf2e4f9c743967996fccd303ee02333878d | cb1edbd312fe5583702e8567e1aa6e32e103d300 | /man/roundBranches.Rd | 73dd9305dabe26dd5539094281f43dfdeecf0a42 | [] | no_license | cran/phytools | e8cb2ddac5592a9c27a0036df4599649a393717a | 910fa95b3f5f1619c85ac420bd07286a3fe8cfcf | refs/heads/master | 2023-07-22T15:18:46.363446 | 2023-07-14T20:00:02 | 2023-07-14T21:30:43 | 17,698,535 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,021 | rd | roundBranches.Rd | \name{roundBranches}
\alias{roundBranches}
\title{Rounds the branch lengths of a tree}
\usage{
roundBranches(tree, digits)
}
\arguments{
\item{tree}{an object of class \code{"phylo"}, \code{"multiPhylo"}, \code{"simmap"}, or \code{"multiSimmap"}.}
\item{digits}{number of digits for rounding. Passed to \code{\link{round}}.}
}
\description{
Rounds the branch lengths of a phylogenetic tree.
}
\details{
This function rounds the branch lengths of a tree or trees to a precision indicated by \code{digits}, and reconciles any mappings for objects of class \code{"simmap"} or \code{"multiSimmap"}.
}
\value{
An object of class \code{"phylo"}, \code{"multiPhylo"}, \code{"simmap"}, or \code{"multiSimmap"}, with rounded edge lengths.
}
\references{
Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). \emph{Methods Ecol. Evol.}, \bold{3}, 217-223.
}
\author{Liam Revell \email{liam.revell@umb.edu}}
\keyword{phylogenetics}
\keyword{utilities}
|
c1cd2dc6cf6ae79200c864939873b14fcccbe5e9 | 79a54bdd930b0ff24bee12107a0f2d4ea0141a12 | /man/check_columns.Rd | f1caff6733d3fe9c9250551c3cf1644999df6e7f | [] | no_license | abcwcm/Klebanoff21LT2 | 957ef1d19263f35653de9347fac24ac2b8da166b | 6bb124f4d5d97fece322d422e533a3e898c43ce8 | refs/heads/master | 2023-04-13T23:20:27.592502 | 2022-03-03T11:26:43 | 2022-03-03T11:26:43 | 465,661,359 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,024 | rd | check_columns.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{check_columns}
\alias{check_columns}
\title{Check for the presence of named columns}
\usage{
check_columns(which_names, input, input_name, function_name)
}
\arguments{
\item{which_names}{Column names to check, e.g. c("peaks", "genes")}
\item{input}{The names() of this will be checked.}
\item{input_name}{Name of the input, e.g. "start_params" just to make it
identifiable if an error message is returned.}
\item{function_name}{Name of function for which this test is carried out.
Again, just to make the returning error message a bit more readable. This
helps with debugging if you're wrapping many functions within each other.}
}
\value{
Returns an error and stop signal if entries of \code{which_names}
are missing in the \code{input}.
}
\description{
Check for the presence of named columns
}
\examples{
\dontrun{
check_columns( c("cells", "sample", "condition"),
long_df, "long_df", "plot_profile")
}
}
|
3afe642c5eaf7b37532052b1ceb6fe6af9e57520 | 9a3708f68f2ef43467f88428cb26f7c4f92a795f | /code/model/dummy_advmodel.R | 6cffa24c3d8272443a472d98acc3e55fb0ddc96a | [] | no_license | sravi9/captainu-csp572 | ad79f7f199dd85441824bc81dd347550ca93f232 | 0324a871d78c1845047bf37b5f05e7551b2cb695 | refs/heads/master | 2021-01-01T20:02:21.292424 | 2017-08-24T00:57:58 | 2017-08-24T00:57:58 | 98,747,353 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 7,378 | r | dummy_advmodel.R | library(leaps)
library(MASS)
library('DMwR')
library(randomForest)
library(MASS)
library(caret)
library(ROCR)
library(e1071)
library(dummies)
library(glmnet)
analysisdata<-read.csv('C:/Users/sshss-pc/Desktop/project/processed/analysisdata.csv', header = TRUE)
analysisdata<-analysisdata[,-1]
january<-analysisdata[which(analysisdata[,"Timetag"]=="2016/1/1"),]
march<-analysisdata[which(analysisdata[,"Timetag"]=="2016/3/1"),]
october<-analysisdata[which(analysisdata[,"Timetag"]=="2015/10/1"),]
november<-analysisdata[which(analysisdata[,"Timetag"]=="2015/11/1"),]
december<-analysisdata[which(analysisdata[,"Timetag"]=="2015/12/1"),]
january<-january[,c(-1,-2,-4,-5,-6,-24,-25,-70,-71,-73,-74,-75,-76)]
#january<-january[,c(-1,-2,-4,-5,-6,-25,-68)]
names(january)
january[,61]<-as.factor(january[,61])
january[,62]<-as.factor(january[,62])
january[,63]<-as.factor(january[,63])
january[,64]<-as.factor(january[,64])
january[,65]<-as.factor(january[,65])
january[,66]<-as.factor(january[,66])
str(january)
nrow(january)
prechurn<-which(january[,61]=="1")
length(prechurn)
dummyjanuary<-dummy.data.frame(data = january, names = c("gender","act","Current","Underpromotion","Ralations"))
dummyjanuary[,1:60]<-scale(dummyjanuary[,1:60])
dummyjanuary[is.na(dummyjanuary)]<-0
selejan<-dummyjanuary[,c(68,5,72,10,4,52,1,74,67,44,70,24,61)]
names(selejan)
targetVar<-"one_month_churn"
xVars<-names(selejan[,-13])
set.seed(600)
inTrain <- createDataPartition(y = selejan[,targetVar], list = FALSE, p = .8)
train_jan <- selejan[inTrain,]
test_jan <- selejan[-inTrain,]
stopifnot(nrow(train_jan) + nrow(test_jan) == nrow(selejan))
testchurn<-which(test_jan[,13]=="1")
createModelFormula <- function(targetVar, xVars, includeIntercept = TRUE){
if(includeIntercept){
modelForm <- as.formula(paste(targetVar, "~", paste(xVars, collapse = '+ ')))
} else {
modelForm <- as.formula(paste(targetVar, "~", paste(xVars, collapse = '+ '), -1))
}
return(modelForm)
}
modelForm <- createModelFormula(targetVar, xVars)
ctrl <- trainControl(method = "cv",
number = 10,
sampling = "up")
balanced_rf <- caret::train(modelForm,
data = train_jan,
method = "rf",
trControl = ctrl)
Prediction_jan <- predict(balanced_rf, test_jan, type = "prob")
jan_pred <- ifelse(Prediction_jan[,2]> 0.5,1,0)
Actual_jan <- test_jan$one_month_churn
confusionMatrix(reference = Actual_jan, data = jan_pred)
varImp(balanced_rf)
balanced_svmweight <- caret::train(modelForm,
data = train_jan,
method = "svmLinearWeights",
trControl = ctrl)
Prediction_jan <- predict(balanced_svmweight, test_jan)
Actual_jan <- test_jan$one_month_churn
confusionMatrix(reference = Actual_jan, data = Prediction_jan)
varImp(balanced_svmweight)
balanced_svmlk <- caret::train(modelForm,
data = train_jan,
method = "svmLinear2",
trControl = ctrl)
Prediction_jan <- predict(balanced_svmlk, test_jan)
Actual_jan <- test_jan$one_month_churn
confusionMatrix(reference = Actual_jan, data = Prediction_jan)
balanced_boosting <- caret::train(modelForm,
data = train_jan,
method = "gbm",
trControl = ctrl)
Prediction_jan <- predict(balanced_boosting, test_jan)
Actual_jan <- test_jan$one_month_churn
confusionMatrix(reference = Actual_jan, data = Prediction_jan)
varImp(balanced_boosting)
balanced_descent <- caret::train(modelForm,
data = train_jan,
method = "mlpSGD",
trControl = ctrl)
Prediction_jan <- predict(balanced_boosting, test_jan)
Actual_jan <- test_jan$one_month_churn
confusionMatrix(reference = Actual_jan, data = Prediction_jan)
varImp(balanced_descent)
#############################################################################################################################
#############################################################################################################################
#############################################################################################################################
november<-november[,c(-1,-2,-4,-5,-6,-24,-25,-70,-71,-73,-74,-75,-76)]
names(november)
november[,61]<-as.factor(november[,61])
november[,62]<-as.factor(november[,62])
november[,63]<-as.factor(november[,63])
november[,64]<-as.factor(november[,64])
november[,65]<-as.factor(november[,65])
november[,66]<-as.factor(november[,66])
str(november)
nrow(november)
prechurn<-which(november[,61]=="1")
length(prechurn)
dummynovember<-dummy.data.frame(data = november, names = c("gender","act","Current","Underpromotion","Ralations"))
dummynovember[,1:60]<-scale(dummynovember[,1:60])
dummynovember[is.na(dummynovember)]<-0
nullModel <- glm(formula = one_month_churn ~ 1,family=binomial,data=dummynovember)
fullModel <- glm(formula = one_month_churn ~ .,family=binomial,data=dummynovember)
nove_Selection <- step(nullModel, scope=list(lower=nullModel, upper=fullModel), direction="forward")
summary(nove_Selection)
nove_variable <- rownames(summary(nove_Selection)$coefficients)[-1]
match(nove_variable,names(dummynovember))
selenove<-dummynovember[,c(68,1,43,4,58,35,55,67,24,12,72,74,57,46,29,61)]
targetVar<-"one_month_churn"
xVars<-names(selenove[,-16])
set.seed(600)
inTrain <- createDataPartition(y = selenove[,targetVar], list = FALSE, p = .8)
train_nove <- selenove[inTrain,]
test_nove <- selenove[-inTrain,]
stopifnot(nrow(train_nove) + nrow(test_nove) == nrow(selenove))
trainweight<-as.vector(rep(NA,11012))
trainchurn<-which(train_nove[,16]=="1")
trainweight[trainchurn]<-15
trainweight[-trainchurn]<-1
trainweight
testchurn<-which(test_nove[,16]=="1")
createModelFormula <- function(targetVar, xVars, includeIntercept = TRUE){
if(includeIntercept){
modelForm <- as.formula(paste(targetVar, "~", paste(xVars, collapse = '+ ')))
} else {
modelForm <- as.formula(paste(targetVar, "~", paste(xVars, collapse = '+ '), -1))
}
return(modelForm)
}
modelForm <- createModelFormula(targetVar, xVars)
nove_model <- glm(modelForm,family=binomial(link='logit'),weights = trainweight, data=train_nove)
nove_fitted <- predict(nove_model
,newdata = test_nove[,xVars]
# Specifying response means we want the probabilities
,type='response')
nove_pred <- ifelse(nove_fitted > 0.5,1,0)
confusion <- confusionMatrix(data = nove_pred
, reference = test_nove[,targetVar]
, dnn = c("Predicted Churn", 'Actual Churn')
)
confusion
PRcurve(preds = nove_pred, trues = test_nove$one_month_churn)
pr <- prediction(nove_pred, test_nove$one_month_churn)
prf <- performance(pr, measure = "tpr", x.measure = "fpr")
plot(prf)
auc <- performance(pr, measure = "auc")
auc <- auc@y.values[[1]]
auc
|
3097af915a9952be478d7c4687edbae2bc44e59f | 22d316e4dda53044ae7a6389c699f06baf673adf | /GenerateFlightsPADS.R | 5d11f22152eedd72a738af519ba9a61ef4977cb4 | [] | no_license | sujaykhandekar/R_check | e454b396c8b354ce89086a3dde8e6f1a29179b13 | 00fa4571b93a0ffe63f0d685d2163d815aedb0ca | refs/heads/master | 2023-07-02T01:10:46.521824 | 2021-08-11T03:16:39 | 2021-08-11T03:16:39 | 390,935,115 | 0 | 1 | null | 2021-08-05T17:25:37 | 2021-07-30T04:55:02 | R | UTF-8 | R | false | false | 31,551 | r | GenerateFlightsPADS.R | # Generate airlines / flight PADS
# Author: Jitender Aswani, Co-Founder @datadolph.in
# Date: 3/15/2013
# Copyright (c) 2011, under the Creative Commons Attribution-NonCommercial 3.0 Unported (CC BY-NC 3.0) License
# For more information see: https://creativecommons.org/licenses/by-nc/3.0/
# All rights reserved.
source("CreatePADS.R")
#
# startup
#
startup <- function() {
#initialize system
initializeSystem()
assign("flights.folder.path", "./pads/raw-data/flights/stats/", envir=.GlobalEnv)
assign("dataset", "US-Flights", envir=.GlobalEnv)
#prepare pad meta data
series <- list()
series["source"] <- "Bureau of Transportation Statistics"
series["category"] <- "Transportation"
series["subcategory"] <- "Flights Records USA"
series["category_id"]<- 23
series["subcategory_id"]<- 210
series["tags"] <- tolower(paste(series$category, series$subcategory, series$source, "Flights Airlines, Airports, USA", sep=","))
series.desc <- "US Flights Data. The data are collected from BTS, USA and includes only passenger flights as reports to BTS by airlines."
assign("series", series, envir=.GlobalEnv)
assign("series.desc", series.desc, envir=.GlobalEnv)
#load data
loadData()
}
#
# cleanup IPL
#
cleanup <- function(){
cleaupSystem()
}
#
# loadData
#
loadData <- function(){
n <- 1:6
filenames <- paste(flights.folder.path, n, ".csv", sep="")
assign("filenames", filenames, envir=.GlobalEnv)
#load carriers data
carriers <- data.table(read.csv(paste(flights.folder.path, "carriers.csv", sep=""),stringsAsFactors=F))
setnames(carriers,colnames(carriers),tolower(colnames(carriers)))
carriers <- carriers[, description:=NULL]
carriers$airlines <- removeMetaChars(carriers$airlines)
setkey(carriers, uniquecarrier)
assign("carriers", carriers, envir=.GlobalEnv)
#load airports data
airport.list <- data.table(read.csv(paste(flights.folder.path, "airports.csv", sep=""),stringsAsFactors=F))
airport.list <- airport.list[country=="USA"][, city:=NULL][, state:=NULL][, country:=NULL][, lat:=NULL][, long:=NULL]
airport.list$airport <- removeMetaChars(airport.list$airport)
setkey(airport.list, iata)
assign("airport.list", airport.list, envir=.GlobalEnv)
#lmonths
lmonths <- data.table(val=1:12,
month=c("January","February","March", "April","May","June",
"July","August","September", "October","November","December"))
setkey(lmonths, val)
assign("lmonths", lmonths, envir=.GlobalEnv)
}
#
# overall summary stats
#
overallSummaryStats <- function(flights.data, for.period){
#for all years
series.data <- flights.data[, list(flights=sum(flights),
flights_delayed=sum(flights_departed_late),
flights_cancelled=sum(flights_cancelled),
flights_diverted=sum(flights_diverted),
total_delay_in_mins=sum(total_dep_delay_in_mins))]
series["title"] <- paste("Key Statistics - US Flights Data ", for.period, sep="")
series["desc"] <- series.desc
series.data <- as.data.frame(t(series.data))
series.data$measure <- rownames(series.data)
series.data <- series.data[,c(2,1)]
colnames(series.data) <- c("measure", "aggregate_value")
padify(series, series.data[order(-series.data$aggregate_value),])
## airlines by year
series.data <- flights.data[, list(year, airlines)]
series["title"] <- paste("Total Number of Passenger Airlines", for.period, sep=" ")
padify(series, series.data)
## airports by year
series.data <- flights.data[, list(year, airports=dep_airports)]
series["title"] <- paste("Total Number of Passenger Airports", for.period, sep=" ")
padify(series, series.data)
## Flights by year
series.data <- flights.data[, list(year, flights)]
series["title"] <- paste("Total Number of Passenger Flights", for.period, sep=" ")
padify(series, series.data)
# get growth rates
series.data$growth <- c("NA", diff(series.data$flights))
series.data <- series.data[-c(1:2),][,flights:=NULL]
series.data$growth <- as.integer(series.data$growth)
series["title"] <- paste("Growth (Decline) In Passenger Flights", for.period, sep=" ")
padify(series, series.data)
}
#
# summary stats by year or by month
#
summaryStatsByPeriod <- function(flights.data, freq="year", for.period=NULL, monthly.compare=F) {
#convert to a data.table
flights.data <- data.table(flights.data)
if(is.null(for.period))
for.period <- paste("(", flights.data$year[1],")", sep="")
if(monthly.compare)
for.period <- paste("For", lmonths[flights.data$month[1]]$month,for.period, sep=" ")
cat("\n", freq, " - ", for.period)
series["desc"] <- series.desc
## Flights
series.data <- flights.data[, list(period=get(freq), flights)]
series["title"] <- paste( "Total Number of Passenger Flights", for.period, sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
## Flights departed late
series.data <- flights.data[, list(period=get(freq), late_flights=flights_departed_late)]
series["title"] <- paste( "Total Number of Passenger Flights Departed Late", for.period, sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
## Flights vs late departed flights
series.data <- flights.data[, list(period=get(freq), flights, late_flights=flights_departed_late)]
series["title"] <- paste( "Total Number of Passenger Flights and Flights Delayed", for.period, sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
## % of flights departed late by year
series.data <- flights.data[, list(period=get(freq), percent_late_flights=round(flights_departed_late/flights, 2)*100)]
series["title"] <- paste( "Percent of Passenger Flights Departed Late", for.period, sep=" ")
series["desc"] <- paste(series.desc, "Unit: in percent(%).", sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
#reset
series["desc"] <- series.desc
## Flights Canceled
series.data <- flights.data[, list(period=get(freq), canceled_flights=flights_cancelled)]
series["title"] <- paste( "Total Number of Canceled Passenger Flights", for.period, sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
## % of flights Canceled
series.data <- flights.data[, list(period=get(freq), percent_canceled_flights=round(flights_cancelled/flights, 2)*100)]
series["title"] <- paste( "Percent of Canceled Passenger Flights", for.period, sep=" ")
series["desc"] <- paste(series.desc, "Unit: in percent(%).", sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
#reset
series["desc"] <- series.desc
## Flights diverted by year
series.data <- flights.data[, list(period=get(freq), diverted_flights=flights_diverted)]
series["title"] <- paste( "Total Number of Diverted Passenger Flights ", for.period, sep="")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
# ## % of flights Canceled by year
# series.data <- flights.data[, list(period=get(freq), percent_diverted_flights=round(flights_diverted/flights, 2)*100)]
# series["title"] <- paste( "Percent of Diverted Passenger Flights ", for.period, sep="")
# series["desc"] <- paste(series.desc, "Unit: in percent(%).", sep=" ")
# if(freq=="month"){
# setkey(series.data, period)
# series.data <- series.data[lmonths][,period:=NULL]
# series$title <- paste("Monthly", series$title)
# }
# padify(series, series.data)
# #reset
# series["desc"] <- series.desc
## total delay
series.data <- flights.data[, list(period=get(freq), total_dep_delay_in_mins)]
series["title"] <- paste( "Total Departure Delay (in mins) ", for.period, sep="")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
## average dep delay
series.data <- flights.data[, list(period=get(freq), avg_dep_delay_in_mins)]
series["title"] <- paste( "Average Departure Delay (in mins)", for.period, sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
## median dep delay
series.data <- flights.data[, list(period=get(freq), median_dep_delay_in_mins)]
series["title"] <- paste( "Median Departure Delay (in mins)", for.period, sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
return(NULL)
}
#
#build over all summary stats without breaking it by airlines or by airports
#
buildSummaryStats <- function(){
flights <- data.table(read.csv(filenames[1], stringsAsFactors=F))
period.min <- min(flights$year)
period.max <- max(flights$year)
years <- paste("(", period.min, "-", period.max, ")", sep="")
# get overall summary stats
overallSummaryStats(flights, years)
#get summary stats
summaryStatsByPeriod(flights, "year", years)
## read the second file
flights <- data.table(read.csv(filenames[2], stringsAsFactors=F))
setkeyv(flights, c("month", "year"))
#load flights stats file by month for every year
try(ddply(flights, .(year), summaryStatsByPeriod, "month"), silent=F)
#compare across same months for every year
try(ddply(flights, .(month), summaryStatsByPeriod, "year", years, T), silent=F)
}
#
#summaryStatsfor AllAirlines
#
summaryStatsAllAirlines <- function(flights.data, for.period=NULL){
if(is.null(for.period))
for.period <- flights.data$year[1]
series["desc"] <- series.desc
print(for.period)
## Flights by airlines by year
f.d <- flights.data[, list(flights=sum(flights)), by=uniquecarrier][order(-flights)]
series["title"] <- paste("Top 15 Airlines by Flights", for.period, sep=" ")
setkey(f.d, uniquecarrier)
series.data <- carriers[f.d][,uniquecarrier:=NULL][,description:=NULL][order(-flights)][1:15]
padify(series, series.data)
## Delayed Flights by airlines by year
d.d <- flights.data[, list(late_flights=sum(flights_departed_late)), by=uniquecarrier][order(-late_flights)]
series["title"] <- paste("Worst 15 Airlines by Number of Late Flights", for.period, sep=" ")
setkey(d.d, uniquecarrier)
series.data <- carriers[d.d][,uniquecarrier:=NULL][,description:=NULL][order(-late_flights)][1:15]
padify(series, series.data)
# merge the two - flights and delayed flights
series.data <- f.d[d.d]
series["title"] <- paste("Top 15 Airlines by Number of Flights and Late Flights", for.period, sep=" ")
setkey(series.data, uniquecarrier)
series.data <- carriers[series.data][,uniquecarrier:=NULL][,description:=NULL][order(-flights)][1:10]
padify(series, series.data)
## Flights by total delay
t.d <- flights.data[, list(dep_delay=sum(total_dep_delay_in_mins)), by=uniquecarrier][order(-dep_delay)]
series["title"] <- paste("Top 15 Worst Airlines by Total Departure Delay", for.period, sep=" ")
setkey(t.d, uniquecarrier)
series.data <- carriers[t.d][,uniquecarrier:=NULL][,description:=NULL][order(-dep_delay)][1:15]
padify(series, series.data)
# merge the two - average dep. delay
series.data <- d.d[t.d]
series.data <- series.data[, list(uniquecarrier, avg_delay_in_mins=round(dep_delay/late_flights, 2))]
series["title"] <- paste("Top 15 Worst Airlines by Average Departure Delay", for.period, sep=" ")
setkey(series.data, uniquecarrier)
series.data <- carriers[series.data][,uniquecarrier:=NULL][,description:=NULL][order(-avg_delay_in_mins)][1:15]
padify(series, series.data)
# for large airlines
series.data <- d.d[t.d]
series.data <- series.data[, list(uniquecarrier, late_flights, avg_delay_in_mins=round(dep_delay/late_flights, 2))]
series["title"] <- paste("Top 15 Worst Large Airlines by Average Departure Delay", for.period, sep=" ")
setkey(series.data, uniquecarrier)
series.data <- carriers[series.data][,uniquecarrier:=NULL][,description:=NULL][order(-late_flights)][,late_flights:=NULL][1:15][order(-avg_delay_in_mins)]
padify(series, series.data)
## Canceled Flights by airlines by year
c.d <- flights.data[, list(canceled_flights=sum(flights_cancelled)), by=uniquecarrier][order(-canceled_flights)]
series["title"] <- paste("Worst 15 Airlines by Number of Canceled Flights", for.period, sep=" ")
setkey(c.d, uniquecarrier)
series.data <- carriers[c.d][,uniquecarrier:=NULL][,description:=NULL][order(-canceled_flights)][1:15]
padify(series, series.data)
# merge the two - flights and delayed flights
series.data <- f.d[c.d]
series.data <- series.data[, list(uniquecarrier,percent_canceled_flights=round(canceled_flights/flights, 4))]
series["title"] <- paste("Worst 15 Airlines by Percent of Canceled Flights", for.period, sep=" ")
setkey(series.data, uniquecarrier)
series.data.w <- carriers[series.data][,uniquecarrier:=NULL][,description:=NULL][order(-percent_canceled_flights)][1:15]
padify(series, series.data.w)
#best
series["title"] <- paste("Best 15 Airlines by Percent of Canceled Flights", for.period, sep=" ")
series.data.b <- carriers[series.data][,uniquecarrier:=NULL][,description:=NULL][order(percent_canceled_flights)][1:15]
padify(series, series.data.b)
## diverted Flights by airlines by year
di.d <- flights.data[, list(flights_diverted=sum(flights_diverted)), by=uniquecarrier][order(-flights_diverted)]
series["title"] <- paste("Worst 15 Airlines by Number of Diverted Flights", for.period, sep=" ")
setkey(di.d, uniquecarrier)
series.data <- carriers[di.d][,uniquecarrier:=NULL][,description:=NULL][order(-flights_diverted)][1:15]
padify(series, series.data)
return(NULL)
}
#Summary Stats by airlines
summaryStatsByAirlines <- function(flights.data, freq="year", for.period=NULL) {
series["desc"] <- series.desc
flights.data <- data.table(flights.data)
#get airline name
airline <- carriers[flights.data$uniquecarrier[1]]$airlines
if(is.null(for.period))
for.period <- paste("(", flights.data$year[1],")", sep="")
cat("\n", airline, " - ", freq, " - ", for.period)
## airports by year
series.data <- flights.data[, list(period=get(freq), airports=dep_airports)]
series["title"] <- paste("Number of Passenger Airports for", airline, ",", for.period, sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
## Flights by year
series.data <- flights.data[, list(period=get(freq), flights)]
series["title"] <- paste("Number of Passenger Flights by", airline, for.period, sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
## Flights & airports by year
series.data <- flights.data[, list(period=get(freq), flights, airports=dep_airports)]
series["title"] <- paste("Number of Passenger Flights and Airports for", airline, for.period, sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
## Flights departed late by year
series.data <- flights.data[, list(period=get(freq), flights_departed_late)]
series["title"] <- paste("Total Number of Passenger Flights Departed Late", airline, for.period, sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
## Flights vs late departed flights by year
series.data <- flights.data[, list(period=get(freq), flights, flights_departed_late)]
series["title"] <- paste("Total Number of Passenger Flights and Flights Departed Late", airline, for.period, sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
## % of flights departed late by year
series.data <- flights.data[, list(period=get(freq), percent_delayed_flights=round(flights_departed_late/flights, 2)*100)]
series["title"] <- paste("Percent of Passenger Flights Departed Late", airline, for.period, sep=" ")
series["desc"] <- paste(series.desc, "Unit: in percent(%).", sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
#reset
series["desc"] <- series.desc
## Flights Canceled late by year
series.data <- flights.data[, list(period=get(freq), flights_cancelled)]
series["title"] <- paste("Number of Canceled Passenger Flights", airline, for.period, sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
## % of flights Canceled late by year
series.data <- flights.data[, list(period=get(freq), percent_canceled_flights=round(flights_cancelled/flights, 2)*100)]
series["title"] <- paste("Percent of Canceled Passenger Flights", airline, for.period, sep=" ")
series["desc"] <- paste(series.desc, "Unit: in percent(%).", sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
#reset
series["desc"] <- series.desc
## Flights diverted by year
series.data <- flights.data[, list(period=get(freq), flights_diverted)]
series["title"] <- paste("Number of Diverted Passenger Flights", airline, for.period, sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
## % of flights diverted by year
series.data <- flights.data[, list(period=get(freq), percent_diverted_flights=round(flights_diverted/flights, 2)*100)]
series["title"] <- paste("Percent of Diverted Passenger Flights", airline, for.period, sep=" ")
series["desc"] <- paste(series.desc, "Unit: in percent(%).", sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
#reset
series["desc"] <- series.desc
## total delay
series.data <- flights.data[, list(period=get(freq), total_dep_delay_in_mins)]
series["title"] <- paste("Total Departure Delay (in mins)", airline, for.period, sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
## average dep delay
series.data <- flights.data[, list(period=get(freq), avg_dep_delay_in_mins)]
series["title"] <- paste("Average Departure Delay (in mins)", airline, for.period, sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
## median dep delay
series.data <- flights.data[, list(period=get(freq), median_dep_delay_in_mins)]
series["title"] <- paste("Median Departure Delay (in mins)", airline, for.period, sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
return(NULL)
}
#
# Summary Stats for airlines
#
buildSummaryStatsAirlines <- function(){
flights <- data.table(read.csv(filenames[5], stringsAsFactors=F))
setkeyv(flights, c("uniquecarrier", "year"))
period.min <- min(flights$year)
period.max <- max(flights$year)
years <- paste("(", period.min, "-", period.max, ")", sep="")
# overall airline summary for the entire period
try(summaryStatsAllAirlines(flights, years), silent=T)
#overall summary by every year
try(ddply(flights, .(year), summaryStatsAllAirlines), silent=T)
#summary be Airlines
try(ddply(flights, .(uniquecarrier), summaryStatsByAirlines, "year", years), silent=F)
#load the airlines stats file by month
flights <- data.table(read.csv(filenames[6], stringsAsFactors=F))
setkeyv(flights, c("uniquecarrier", "year"))
try(ddply(flights, .(uniquecarrier, year), summaryStatsByAirlines, "month"), silent=F)
}
#
# Summary Stats for all airports
#
summaryStatsAllAirports <- function(flights.data, for.period=NULL){
series["desc"] <- series.desc
if(is.null(for.period))
for.period <- flights.data$year[1]
print(for.period)
## Flights by airport by year
f.d <- flights.data[, list(flights=sum(flights)), by=airport][order(-flights)]
series["title"] <- paste("Top 15 Airports by Flights", for.period, sep=" ")
padify(series, f.d[1:15])
## Delayed Flights by airlines by year
d.d <- flights.data[, list(late_flights=sum(flights_departed_late)), by=airport][order(-late_flights)]
series["title"] <- paste("Worst 15 Airports by Number of Delayed Flights", for.period, sep=" ")
padify(series, d.d[1:15])
# merge the two - flights and delayed flights
setkey( f.d, "airport")
setkey( d.d, "airport")
series.data <- f.d[d.d][order(-flights)][1:15]
series["title"] <- paste("Top 15 Airports by Number of Flights and Late Flights", for.period, sep=" ")
padify(series, series.data)
## Flights by total delay
t.d <- flights.data[, list(dep_delay=sum(total_dep_delay_in_mins)), by=airport][order(-dep_delay)]
series["title"] <- paste("Top 15 Worst Airports by Total Departure Delay", for.period, sep=" ")
padify(series, t.d[1:15])
# merge the two - average dep. delay
setkey( t.d, "airport")
series.data <- d.d[t.d]
series.data <- series.data[, list(airport, avg_delay_in_mins=round(dep_delay/late_flights, 2))][order(-avg_delay_in_mins)][1:15]
series["title"] <- paste("Top 15 Worst Airports by Average Departure Delay", for.period, sep=" ")
padify(series, series.data)
# for large airlines
series.data <- d.d[t.d]
series.data <- series.data[, list(airport, late_flights, avg_delay_in_mins=round(dep_delay/late_flights, 2))][order(-late_flights)][1:15][,late_flights:=NULL]
series["title"] <- paste("Top 15 Worst Large Airports by Average Departure Delay", for.period, sep=" ")
padify(series, series.data)
## Canceled Flights by
c.d <- flights.data[, list(canceled_flights=sum(flights_cancelled)), by=airport][order(-canceled_flights)]
series["title"] <- paste("Worst 15 Airports by Number of Canceled Flights", for.period, sep=" ")
padify(series, c.d[1:15])
# merge the two - flights and Canceled flights
setkey( c.d, "airport")
series.data <- f.d[c.d]
series.data <- series.data[, list(airport,percent_canceled_flights=round(canceled_flights/flights, 4))]
series["title"] <- paste("Worst 15 Airports by Percent of Canceled Flights", for.period, sep=" ")
padify(series, series.data[order(-percent_canceled_flights)][1:15])
# merge the two - flights and Canceled flights - for large airlines
series.data <- f.d[c.d]
series.data <- series.data[, list(airport, flights, percent_canceled_flights=round(canceled_flights/flights, 4))]
series["title"] <- paste("Worst 15 Large Airports by Percent of Canceled Flights", for.period, sep=" ")
padify(series, series.data[order(-flights)][,flights:=NULL][1:15])
return(NULL)
}
#Summary Stats for individual airports across years and by months for every year
summaryStatsByAirport <- function(flights.data, freq="year", for.period=NULL) {
flights.data <- data.table(flights.data)
series["desc"] <- series.desc
if(is.null(for.period))
for.period <- paste("(", flights.data$year[1],")", sep="")
#get full airport name
ap <- paste(airport.list[flights.data$airport[1]]$airport, " (", flights.data$airport[1], ")", sep="")
cat("\n", ap, " - ", freq, " - ", for.period)
## Flights by year
series.data <- flights.data[, list(period=get(freq), flights)]
series["title"] <- paste("Number of Passenger Flights by,", ap,for.period, sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
## Flights departed late by year
series.data <- flights.data[, list(period=get(freq), flights_departed_late)]
series["title"] <- paste("Total Number of Passenger Flights Departed Late,", ap,for.period, sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
## Flights vs late departed flights by year
series.data <- flights.data[, list(period=get(freq), flights, flights_departed_late)]
series["title"] <- paste("Total Number of Passenger Flights and Flights Departed Late,", ap, for.period, sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
## % of flights departed late by year
series.data <- flights.data[, list(period=get(freq), percent_delayed_flights=round(flights_departed_late/flights, 2)*100)]
series["title"] <- paste("Percent of Passenger Flights Departed Late,", ap,for.period, sep=" ")
series["desc"] <- paste(series.desc, "Unit: in percent(%).", sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
#reset
series["desc"] <- series.desc
## Flights Canceled late by year
series.data <- flights.data[, list(period=get(freq), flights_cancelled)]
series["title"] <- paste("Number of Canceled Passenger Flights,", ap,for.period,sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
## % of flights Canceled late by year
series.data <- flights.data[, list(period=get(freq), percent_canceled_flights=round(flights_cancelled/flights, 2)*100)]
series["title"] <- paste("Percent of Canceled Passenger Flights,", ap,for.period, sep=" ")
series["desc"] <- paste(series.desc, "Unit: in percent(%).", sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
#reset
series["desc"] <- series.desc
## total delay
series.data <- flights.data[, list(period=get(freq), total_dep_delay_in_mins)]
series["title"] <- paste("Total Departure Delay (in mins),", ap,for.period, sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
## average dep delay
series.data <- flights.data[, list(period=get(freq), avg_dep_delay_in_mins)]
series["title"] <- paste("Average Departure Delay (in mins),", ap,for.period, sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
## median dep delay
series.data <- flights.data[, list(period=get(freq), median_dep_delay_in_mins)]
series["title"] <- paste("Median Departure Delay (in mins),", ap,for.period, sep=" ")
if(freq=="month"){
setkey(series.data, period)
series.data <- series.data[lmonths][,period:=NULL]
series$title <- paste("Monthly", series$title)
}
padify(series, series.data)
return(NULL)
}
#
# Summary Stats
#
buildSummaryStatsAirports <- function(){
flights <- data.table(read.csv(filenames[3], stringsAsFactors=F))
setkeyv(flights, c("airport", "year"))
period.min <- min(flights$year)
period.max <- max(flights$year)
years <- paste("(", period.min, "-", period.max, ")", sep="")
# overall airline summary for the entire period
try(summaryStatsAllAirports(flights, years), silent=F)
#overall summary by every year
try(ddply(flights, .(year), summaryStatsAllAirports), silent=F)
#summary be airport
try(ddply(flights, .(airport), summaryStatsByAirport, "year", years), silent=F)
#load the airlines stats file by month
flights <- data.table(read.csv(filenames[4], stringsAsFactors=F))
setkeyv(flights, c("airport", "year"))
try(ddply(flights, .(airport, year), summaryStatsByAirport, "month"), silent=F)
}
runAirlines <- function(){
startup()
#355 pads
buildSummaryStats()
#4300 pads
#buildSummaryStatsAirlines()
#51,156
#buildSummaryStatsAirports()
cleanup()
updateCatPadCount()
}
# http://www.linkedin.com/shareArticle?mini=true&source=datadolph.in&url=http://datadolph.in/dg/2013/05/number-of-passenger-flights-by-john-f-kennedy-intl-jfk-1987-2008/&title=Not%20very%20often%20you%20see%20that%20# of flights get reduced year over year - 2001 (911 attacks) and great recession in 2008 cut # of flights #bigdata&summary=Not very often you see that # of flights get reduced year over year - 2001 (911 attacks) and great recession in 2008 cut # of flights #bigdata
# http://www.linkedin.com/shareArticle?mini=true&source=datadolph.in&url=http://datadolph.in/dg/2013/05/number-of-passenger-flights-by-john-f-kennedy-intl-jfk-1987-2008/&title=Not%20very%20often%20you%20see%20that%20%23%20of%20flights%20get%20reduced%20year%20over%20year%20-%202001%20(911%20attacks)%20and%20great%20recession%20in%202008%20cut%20%23%20of%20flights%20%23bigdata&summary=Not%20very%20often%20you%20see%20that%20%23%20of%20flights%20get%20reduced%20year%20over%20year%20-%202001%20(911%20attacks)%20and%20great%20recession%20in%202008%20cut%20%23%20of%20flights%20%23bigdata
|
a2d58ffa4f2ab3bdc63cbb73df0d2a519e4e70dc | fe8e5ccc9567ed3d76e5e6c1f51ce14e2b068caa | /R/lets_shpfilter.R | 404b451a9002ff4fb46031b76c6e09e2f8c98f0e | [] | no_license | SaraVarela/letsR-1 | 40e525b72511c59028843e8b502adfa843c85727 | 04fb5be5d8471b2ccaa0518f6c27b4c724895776 | refs/heads/master | 2020-12-26T21:47:43.942753 | 2014-09-16T09:09:31 | 2014-09-16T09:09:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,387 | r | lets_shpfilter.R | #' Shapefiles filtering
#'
#' @author Bruno Vilela
#'
#' @description Filter species shapefiles by origin, presence and seasonal type (following IUCN types: \url{http://www.iucnredlist.org/technical-documents/spatial-data}, see metadata)).
#'
#' @usage lets.shFilter(shapes, presence=NULL, origin=NULL, seasonal=NULL)
#'
#' @param shapes Object of class SpatialPolygonsDataFrame (see function readShapePoly to open this files).
#' @param presence A vector with the code numbers for the presence type to be maintained.
#' @param origin A vector with the code numbers for the origin type to be maintained.
#' @param seasonal A vector with the code numbers for the seasonal type to be maintained.
#'
#' @return The result is the shapefile(s) filtered according to the selected types. If the filters remove all polygons, the result will be NULL.
#'
#'
#' @details Presence codes:
#' (1) Extant,
#' (2) Probably Extant,
#' (3) Possibly Extant,
#' (4) Possibly Extinct,
#' (5) Extinct (post 1500) &
#' (6) Presence Uncertain.
#'
#' Origin codes:
#' (1) Native,
#' (2) Reintroduced,
#' (3) Introduced,
#' (4) Vagrant &
#' (5) Origin Uncertain.
#'
#' Seasonal codes:
#' (1) Resident,
#' (2) Breeding Season,
#' (3) Non-breeding Season,
#' (4) Passage &
#' (5) Seasonal Occurrence Uncertain.
#'
#' More info in the shapefiles' metadata.
#'
#' @seealso \code{\link{plot.PresenceAbsence}}
#' @seealso \code{\link{lets.presab}}
#' @seealso \code{\link{lets.presab.birds}}
#'
#'
#' @export
lets.shFilter <- function(shapes, presence=NULL, origin=NULL, seasonal=NULL){
if(is.null(presence) & is.null(origin) & is.null(seasonal)){
return(shapes)
}else{
try(names(shapes)[names(shapes)=="ORIGIN"] <- "origin", silent = T)
try(names(shapes)[names(shapes)=="PRESENCE"] <- "presence", silent = T)
try(names(shapes)[names(shapes)=="SEASONAL"] <- "seasonal", silent = T)
if(!is.null(presence)){
pos <- which(shapes$presence %in% presence)
if(length(pos)>0){
shapes <- shapes[pos, ]
}else{
shapes <- NULL
}
}
if(!is.null(origin)){
pos2 <- which(shapes$origin %in% origin)
if(length(pos2)>0){
shapes <- shapes[pos2, ]
}else{
shapes <- NULL
}
}
if(!is.null(seasonal)){
pos3 <- which(shapes$seasonal %in% seasonal)
if(length(pos3)>0){
shapes <- shapes[pos3, ]
}else{
shapes <- NULL
}
}
return(shapes)
}
}
|
bf7c71d53dba2ef195e0b96c2cb29f6645aab247 | cfea5974c9ea24817bb7ec2b3915c0bd43186247 | /extract_EOBS/global.R | 1074ad0b3fd4b9ab99d34fc15474d7b96c1d0e73 | [] | no_license | tommy-klein/webXTREME | 1ea4e07bbd6dc41afa5d057ff08e3effbe90e7ce | 4f1cfcbbf417be06b0e63c5057f9b8ccda49567a | refs/heads/master | 2021-01-10T02:42:08.477426 | 2016-10-23T21:05:19 | 2016-10-23T21:05:19 | 51,244,426 | 1 | 3 | null | null | null | null | UTF-8 | R | false | false | 2,816 | r | global.R | library(RCurl)
library(XML)
library(dplyr)
library(stringr)
library(httr)
library(lubridate)
#### METADATA
catalog.datasets <- getURL("http://opendap.knmi.nl/knmi/thredds/catalog/e-obs_0.25regular/catalog.html") %>%
strsplit("\n") %>% unlist()
catalog.metadata <- getURL("http://opendap.knmi.nl/knmi/thredds/catalog/e-obs_0.25regular/catalog.html?dataset=e-obs_0.25regular") %>%
strsplit("\n") %>% unlist()
start.date <- grep("Start", catalog.metadata, value = T) %>% str_sub(start = 25) %>% as.Date()
end.date <- grep("End", catalog.metadata, value = T) %>% str_sub(start = 23) %>% as.Date()
dates.all <- as.Date(start.date : end.date, origin = "1970-01-01")
res <- grep("Dataset", catalog.metadata, value = T) %>%
str_sub(start = 14, end = 17) %>% as.numeric()
parameters <- c("Maximum air temperature (ºC)" = "tx",
"Minimum air temperature (ºC)" = "tn",
"Precipitation (mm)" = "rr")
#### FUNCTIONS
parse.dataset.name <- function(string, param) {
param.long <- paste0(param, "_", res, "deg_reg")
lines <- grep(param.long, string, value = T)
this.line <- lines[!grepl(pattern = "stderr", x = lines)]
dataset <- this.line %>% strsplit("dataset=") %>% unlist() %>% .[2] %>% strsplit(".nc") %>% unlist() %>% .[1]
paste0("http://opendap.knmi.nl/knmi/thredds/dodsC/", dataset, ".nc.ascii?")
}
get.data <- function(lat, lon, time.start, time.end, string, param) {
scale.factor <- switch(param,
"rr" = 0.1,
"tn" = 0.01,
"tx" = 0.01)
time <- sprintf("[%i:%i]", time.start, time.end)
lat.str <- sprintf("[%i]", lat)
lon.str <- sprintf("[%i]", lon)
api <- parse.dataset.name(string = string, param = param) %>% paste0(param)
query <- paste0(api, time, lat.str, lon.str)
dat <- GET(query) %>% content() %>% str_split("\n") %>% .[[1]] %>% .[-1:-12] %>%
head(., (time.end - time.start) + 1) %>%
str_split(", ", ) %>% unlist %>% matrix(ncol = 2, byrow = T) %>% as.data.frame(stringsAsFactors = F)
colnames(dat) <- c("time", param)
dat[, param] <- as.numeric(dat[, param]) * scale.factor
dat
}
####
api.elev <- parse.dataset.name(string = catalog.datasets, param = "elev")
lat.query <- api.elev %>% paste0("latitude")
lon.query <- api.elev %>% paste0("longitude")
lat.all <- GET(lat.query) %>% content() %>% str_split("\n") %>%
.[[1]] %>% .[6] %>% str_split(", ") %>% unlist %>% as.numeric()
lon.all <- GET(lon.query) %>% content() %>% str_split("\n") %>%
.[[1]] %>% .[6] %>% str_split(", ") %>% unlist %>% as.numeric()
grid <- expand.grid(lat = lat.all, lon = lon.all) %>%
mutate(lng1 = lon - res / 2,
lat1 = lat - res / 2,
lng2 = lon + res / 2,
lat2 = lat + res / 2) |
43522f363bdc4f2dbfec4e181ecb4f84a6b4ac2e | db12b990924703cd74748d8585cd9c11fafa6746 | /h2o-r/tests/testdir_jira/runit_NOPASS_INTERNAL_hex_2026_baysian_priors.R | 0871d12483c7bd574cd2c3c9d97ac7299624ac0c | [
"Apache-2.0"
] | permissive | h2oai/h2o-3 | 919019a8f297eec676011a9cfd2cc2d97891ce14 | d817ab90c8c47f6787604a0b9639b66234158228 | refs/heads/master | 2023-08-17T18:50:17.732191 | 2023-08-17T16:44:42 | 2023-08-17T16:44:42 | 17,371,412 | 6,872 | 2,345 | Apache-2.0 | 2023-09-14T18:05:40 | 2014-03-03T16:08:07 | Jupyter Notebook | UTF-8 | R | false | false | 3,668 | r | runit_NOPASS_INTERNAL_hex_2026_baysian_priors.R | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
test <- function(){
## Helper functions
# Function to standardize data
standardizeVec <- function(v) {(v - mean(v))/sd(v)}
standardizeH2OFrame <- function(X) {
X2 <- X
for(i in seq(1,ncol(X)-1))
X2[,i] <- standardizeVec(X2[,i])
X2
}
# Functions to calculate logistic gradient
logistic_gradient <- function(x,y,beta) {
y <- -1 + 2*y
eta <- x %*% beta
d <- 1 + exp(-y*eta)
grad <- -y * (1-1.0/d)
t(grad) %*% x
}
# no L1 here, alpha is 0
h2o_logistic_gradient <- function(x,y,beta,beta_given,rho,lambda) {
grad <- logistic_gradient(x,y,beta)/nrow(x) + (beta - beta_given)*rho + lambda*beta
grad
}
## Import data
h2oData <- h2o.importFile("/mnt/0xcustomer-datasets/c27/data.csv")
betaConstraints <- h2o.importFile("/mnt/0xcustomer-datasets/c27/constraints_indices.csv")
betaConstraints <- betaConstraints[1:(nrow(betaConstraints)-1),] # remove intercept
betaConstraints <- as.data.frame(betaConstraints)
## Set Parameters
indVars <- as.character(betaConstraints$names[1:nrow(betaConstraints)])
depVars <- "C3"
lambda <- 0
alpha <- 0
family_type <- "binomial"
## Take subset of data
Log.info("Subset dataset to only predictor and response variables...")
h2oData <- h2oData[,c(indVars, depVars)]
summary(h2oData)
## Run full H2O GLM with Bayesian priors vs no priors
Log.info("Run a logistic regression with no regularization and alpha = 0 and beta constraints with priors. ")
glm_bayesianp <- h2o.glm(x = indVars, y = depVars, training_frame = h2oData, family = family_type, lambda = lambda,
alpha = alpha, beta_constraints = betaConstraints)
Log.info("Run a logistic regression with no regularization and alpha = 0 and beta constraints without priors. ")
glm_nopriors <- h2o.glm(x = indVars, y = depVars, training_frame = h2oData, family = family_type, lambda = lambda,
alpha = alpha, beta_constraints = betaConstraints[c("names","lower_bounds","upper_bounds")])
## Standardize Data Set
Log.info("Standardize Data in R: ")
data.df <- as.data.frame(h2oData)
data.standardize <- standardizeH2OFrame(data.df)
## check standardization is done correctly
checkEqualsNumeric(apply(data.standardize[,1:22], 2, mean), rep(0, 22), 1E-10)
checkEqualsNumeric(apply(data.standardize[,1:22], 2, sd), rep(1, 22), 1E-10)
## Seperate to x and y matrices
y <- as.matrix(data.standardize[,depVars])
x <- cbind(as.matrix(data.standardize[,indVars]),1)
Log.info("Calculate the gradient: ")
beta1 <- glm_bayesianp@model$coefficients_table[,3]
beta2 <- glm_nopriors@model$coefficients_table[,3]
## Standardize beta given
beta_given.df <- as.data.frame(betaConstraints$beta_given)
col_sd <- apply(data.df[,1:22], 2, sd)
beta_given <- beta_given.df[,1]*col_sd
lambda <- glm_bayesianp@allparameters$lambda
rho <- c(rep(1,22),0)
beta <- c(beta_given,0)
gradient1 <- h2o_logistic_gradient(x,y,beta = beta1, beta_given = beta, rho= rho, lambda)
gradient2 <- h2o_logistic_gradient(x,y,beta = beta2, beta_given = beta, rho= 0, lambda)
Log.info("Check gradient of beta constraints with priors or beta given...")
print(gradient1)
if(!all(gradient1 < 1E-8)) stop(paste0("Gradient from model output > ", 1E-8))
Log.info("Check gradient of beta constraints without priors or beta given...")
print(gradient2)
if(!all(gradient2 < 1E-4)) stop(paste0("Gradient from model output > ", 1E-4))
}
doTest("GLM Test: Bayesian Priors with Standardization = FALSE: ", test)
|
b0881533f211edac61af4977c4966718f9e72ab6 | 53db63194c085eb30daa499c494db92f1b132e48 | /R/compute_metrics.R | 7cdf3bc1f0da8f1132dafe2d98b4a4bd4c624011 | [] | no_license | COMBINE-lab/selective-alignment-experiment | 8b65e3f0e74be7d97f00d5e63b7b2f6b23a5be25 | a35ccd775440e80d1991acfdb152a9b40dc36c0a | refs/heads/master | 2021-01-15T16:47:52.935743 | 2018-05-20T14:04:09 | 2018-05-20T14:04:09 | 99,725,450 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,543 | r | compute_metrics.R | library(tidyverse)
source('readers.R')
source('utils.R')
source('plots.R')
source('metrics.R')
library(jsonlite)
library(stargazer)
methods = c('hera', 'Bowtie2', 'STAR', 'selaln', 'selaln-or', 'kallisto')
mreaders <- list(hera=read_hera, Bowtie2=read_salmon, STAR=read_salmon, selaln=read_salmon, `selaln-or`=read_salmon, kallisto=read_kallisto)
jsonFile <- '../../results/sim30/data.json'
d <- jsonlite::read_json(jsonFile)
path <- normalizePath(dirname(jsonFile))
dset <- d['experiment']
dflist <- list()
print(d['truth'])
t <- read_rsem_truth(file.path(path, d['truth']))
#t <- read_polyester_truth(file.path(d[dset][[1]]['truth']))
for (m in methods) {
print(sprintf("reading in results for %s", m))
dflist[[m]] <- mreaders[[m]](file.path(path, d[m]))
}
print("merging data frames")
merged <- merge_tables(dflist, t)
print(head(merged))
mnames <- rep('', length(methods))
spears <- rep(0.0, length(methods))
mards <- rep(0.0, length(methods))
for (i in seq_along(methods)) {
m <- methods[[i]]
k <- sprintf("NumReads.%s",m)
s <- cor(merged$NumReads, merged[[k]], method='spearman')
mrd <- mard(merged, "NumReads", k, cutoff=0.0)
meanae <- mae(merged, "NumReads", k, cutoff=0.0)
print(sprintf("truth vs. %s", m))
print("============")
print(sprintf(" spearman %f", s))
print(sprintf(" mard %f", mrd))
print(sprintf(" MAE %f", meanae))
cat("\n")
mnames[[i]] <- m
spears[[i]] <- s
mards[[i]] <- mrd
}
res <- data.frame(method=mnames, spearman=spears, mard=mards)
print(stargazer(res, summary=FALSE))
|
85dd270285741ecbf4d023a583f7bc8bffe0d655 | 24bb8d525f1c87676188766e12a5c89cf166477b | /tests/testthat/test-hello.R | 12415c5aab8afd60dc13eaed29c7148b92ad4726 | [] | no_license | markusmortensen/testPkg | 344f0f33b5c681460977af90f2a812c6e322ef6d | 7a417408764d52dc6c390b30acbf7c82d9367df0 | refs/heads/master | 2020-07-12T00:04:27.737241 | 2019-08-27T10:40:59 | 2019-08-27T10:40:59 | 204,671,271 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 91 | r | test-hello.R | context("test-hello")
test_that("multiplication works", {
expect_equal(times2(2), 4)
})
|
18db1951b80f355c3a21e773a548eaa8706b6489 | eb337ade6a7dc65b2ecc3d295a674cb7222e4779 | /Exploration_4.R | 2a1de252a495ba6f19e50e7ee0a8856c7ca332de | [
"MIT"
] | permissive | mathmanda/AirbnbExplorations | 6cd410c76615a2e0aaa855b8736929eab8c2b50d | b811fe138b7087c517a6b94f24d499d750b6e384 | refs/heads/master | 2021-01-25T04:26:43.200027 | 2017-06-15T20:22:48 | 2017-06-15T20:22:48 | 93,440,320 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,828 | r | Exploration_4.R | library(ggplot2) # graphics
library(dplyr) # data wrangling
library(cowplot) #plot grids
library(knitr) # fancy tables
library(GGally) # scatterplot matrices
library(rpart) #regression and classification trees
library(rpart.plot) #plots of classification trees
library(car) # marginal model plots
seattle<-read.csv('Airbnb_Seattle.csv')
currency_to_numeric <- function(x){
# Convert currencies to numeric i.e. strip dollar signs and commas
x <- gsub('\\$','', as.character(x))
x <- gsub('\\,','', as.character(x))
x <- as.numeric(x)
return(x)
}
seattle<-seattle %>% mutate(price = currency_to_numeric(price))
showcode<-F
#Let's eliminate all variables except neighbourhood and cancellation policies,
# and filter out the irrelevant neighbourhoods.
seattle1<-seattle %>%
select(cancellation_policy,neighbourhood_cleansed)%>%
filter( (neighbourhood_cleansed == 'Alki' | neighbourhood_cleansed == 'Belltown'))
#Create a visualization showing the different distributions of cancellation policies
ggplot(seattle1, aes(x = neighbourhood_cleansed, fill = cancellation_policy)) +
geom_bar(position = 'fill') + coord_flip() + ylab('Proportion')+ xlab('Neighborhood')+
ggtitle('Cancellation Policy in Two Neighborhoods')
#Descriptive statistics:
mytable1<-seattle1 %>% filter(cancellation_policy=='strict')%>%
group_by(neighbourhood_cleansed) %>% tally()
mytable2<-seattle1 %>% group_by(neighbourhood_cleansed) %>% tally()
x1<-mytable1[[1,2]]
x2<-mytable1[[2,2]]
n1<-mytable2[[1,2]]
n2<-mytable2[[2,2]]
p1hat= x1/n1
p2hat= x2/n2
#Difference of proportions tests:
#Parametric (Z-test)
phatdiff = p1hat - p2hat
ppooled = (x1+x2)/(n1+n2)
stderror = sqrt(ppooled*(1-ppooled)*(1/n1+1/n2))
tstat = phatdiff/stderror
pvalue<- 2*pnorm(tstat)
#Nonparametric (Chi-Squared-test)
prop.test(x = c(x1,x2), n = c(n1,n2))
|
75746f57d121ea7f8936881af8a2001c515a874d | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/likelihoodExplore/examples/likmultinom.Rd.R | 58e17935dbdc99ab405b926d38b598aa87c2403a | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 228 | r | likmultinom.Rd.R | library(likelihoodExplore)
### Name: likmultinom
### Title: Multinomial Log Likelihood Function
### Aliases: likmultinom
### ** Examples
likmultinom(x = rmultinom(n = 2, size = 3, prob = .4),
size = 3, prob = .4)
|
8575a68e6990bb2872cf75e60e378f9e63303e6c | c4ee384cbb2a7071832a87c27e19bdb9f1662d66 | /scripts/nate_eda_lab.R | e59c2ebfbcfd4455d76ea3a7d6445ff591e5df4f | [] | no_license | naterowan00/cloth_filter | eb836ec1b0e22a910bd7ecadf26d8627dfe94843 | e57eee69027d15172118dac80c50f597454cd4a1 | refs/heads/master | 2022-10-07T12:10:38.467989 | 2020-06-10T05:46:23 | 2020-06-10T05:46:23 | 269,202,652 | 2 | 1 | null | 2020-06-10T05:46:24 | 2020-06-03T21:55:41 | Rich Text Format | UTF-8 | R | false | false | 4,132 | r | nate_eda_lab.R | ###### Exploratory Data Analysis of Lab Data #######
# Load in libraries
library(tidyverse); theme_set(theme_minimal())
library(lubridate)
library(broom)
library(glue)
library(patchwork)
# Read in the data
lab_data <- readRDS("datasets/clean_lab_data.rda")
# Create a tibble of just data from the filter
filter_data <- lab_data %>%
filter(treatment == "filter")
# Remove crazy outliers
no_outliers <- filter_data %>%
filter(
bod < 1000 | is.na(bod),
tss < 1000
)
# Create a tibble of just data from the clarifier
clarifier_data <- lab_data %>%
filter(treatment == "clarifier")
# effluent vs influent plots ----------------------------------------------
# tss
tss_plot <- filter_data %>%
ggplot(aes(x = datetime, y = tss)) +
geom_point(aes(color = flow), size = 2, show.legend = FALSE) +
scale_x_datetime(
limits = c(as_datetime("2019-01-01"), as_datetime("2020-01-01"))
)
# vss
filter_data %>%
ggplot(aes(datetime, vss)) +
geom_point(aes(color = flow), size = 2)
# cod
filter_data %>%
ggplot(aes(datetime, cod)) +
geom_point(aes(color = flow), size = 2)
# bod
filter_data %>%
ggplot(aes(datetime, bod)) +
geom_point(aes(color = flow), size = 2)
# cod vs bod
filter_data %>%
filter(flow == "influent") %>%
ggplot(aes(x = bod, y = cod)) +
geom_point()
# distribution tables and plots ------------------------------------------------------
add_na_col <- function(x){
mutate(x, na = 0)
}
has_n_col <- function(x, n = 6){
return(ncol(x) == n)
}
parameter_names <- filter_data %>%
select_if(is.numeric) %>%
colnames()
# Five Number Summary of influent filter data
influent_summary <- filter_data %>%
filter(flow == "influent") %>%
select_if(is.numeric) %>%
map(~tidy(summary(.x))) %>%
map_if(., has_n_col, add_na_col) %>%
do.call(rbind, .) %>%
add_column(parameter = parameter_names, .before = "minimum")
# Five Number Summary of effluent filter data
effluent_summary <- filter_data %>%
filter(flow == "effluent") %>%
select_if(is.numeric) %>%
map(~tidy(summary(.x))) %>%
map_if(., has_n_col, add_na_col) %>%
do.call(rbind, .) %>%
add_column(parameter = parameter_names, .before = "minimum")
# tss boxplot, sorted by flow
no_outliers %>%
ggplot(aes(x = flow, y = tss)) +
geom_boxplot()
# vss boxplot, sorted by flow
no_outliers %>%
ggplot(aes(x = flow, y = vss)) +
geom_boxplot()
# bod boxplot, sorted by flow
no_outliers %>%
ggplot(aes(x = flow, y = bod)) +
geom_boxplot()
# cod boxplot, sorted by flow
no_outliers %>%
ggplot(aes(x = flow, y = cod)) +
geom_boxplot()
# relationships between parameters ----------------------------------------
### influent water quality vs effluent water quality. I have removed crazy outliers
### all plots indicate a weak, positive relationship. Weaker than I expected, and with
### a more shallow slope than I expected
# tss
no_outliers %>%
select(datetime:sample_type, tss) %>%
pivot_wider(names_from = treatment:flow, values_from = tss) %>%
filter(filter_effluent < 400) %>%
ggplot(aes(x = filter_influent, y = filter_effluent)) +
geom_point() +
geom_smooth(method = "lm", se = F)
### linear model for TSS. y = 18.797 + .105x, R-squared = .2003
no_outliers %>%
select(datetime:sample_type, tss) %>%
pivot_wider(names_from = treatment:flow, values_from = tss) %>%
lm(filter_effluent ~ filter_influent, .) %>%
summary()
# vss
no_outliers %>%
select(datetime:sample_type, vss) %>%
pivot_wider(names_from = treatment:flow, values_from = vss) %>%
filter(filter_effluent < filter_influent) %>%
ggplot(aes(x = filter_influent, y = filter_effluent)) +
geom_point()
# bod
no_outliers %>%
select(datetime:sample_type, bod) %>%
pivot_wider(names_from = treatment:flow, values_from = bod) %>%
ggplot(aes(x = filter_influent, y = filter_effluent)) +
geom_point()
# cod
no_outliers %>%
select(datetime:sample_type, cod) %>%
pivot_wider(names_from = treatment:flow, values_from = cod) %>%
filter(filter_effluent < 800) %>%
ggplot(aes(x = filter_influent, y = filter_effluent)) +
geom_point()
|
7232a32c6a9fae4196db6ff917dd1ac58cda8efd | 8c4244e9c045eb31f02308316bdb8ea4d27ba1c7 | /plot1.R | e699234f390e392ec256fd3dd1f5e41bbb91d87b | [] | no_license | highsmith/ExData_Plotting1 | a663b56a4fa39099e0f78805a34a47ff773e8b8f | 14cec9ee09ad643aeb58d67bc3d7677a647db56e | refs/heads/master | 2021-01-21T20:22:51.273909 | 2014-05-11T21:26:26 | 2014-05-11T21:26:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 541 | r | plot1.R | ##read file
powerD <- read.table("./data/household_power_consumption.txt", header=T,sep=";",stringsAsFactors=F,na.string="?")
##subset data to required dates
subpowerD <- powerD[powerD$Date %in% c("1/2/2007","2/2/2007"),]
##open png device
png("./plots/plot1.png", w=480, h=480)
##write plot
hist(subpowerD$Global_active_power
, xlab="Global Active Power (kilowatts)"
, main="Global Active Power"
, col="red")
##close png device
dev.off()
|
36ceca9426178200fb8e6bc620c7d6deacbd6740 | 7187292199b342afcc71f7e21ae087624c4ef401 | /plot3.R | 7f4adcfaa58f045db5b7fd158b5a1dbad7a71d36 | [] | no_license | ofelia90/ExData_Plotting1 | 7740b539dcf740283d92e283dd7693ddbbc4a080 | 140d537b54372edc0f14d83f2da86187dcce1779 | refs/heads/master | 2020-12-31T07:10:44.226609 | 2015-11-08T21:11:44 | 2015-11-08T21:11:44 | 45,796,599 | 0 | 0 | null | 2015-11-08T20:03:34 | 2015-11-08T20:03:34 | null | UTF-8 | R | false | false | 1,046 | r | plot3.R | library(sqldf)
library(png)
library(ggplot2)
# reading the data
df<-read.csv2("exdata_data_household_power_consumption/household_power_consumption.txt", header = TRUE, sep=";", stringsAsFactors = FALSE, na.strings = "?")
# converting Date and Time variables to Date/Time classes
df$Date <- as.Date(df$Date , "%d/%m/%Y")
df$Time <- paste(df$Date, df$Time, sep=" ")
df$Time <- strptime(df$Time, "%Y-%m-%d %H:%M:%S")
# subsetting data in those two dates
data<-subset(df, df$Date == "2007-02-01" | df$Date=="2007-02-02")
# converting column to be numeric
data$Global_active_power <- as.numeric(as.character(data$Global_active_power))
#plotting and saving file (polish names of the week days stayed...)
png(file="plot3.png")
with(data, plot(Time, Sub_metering_1, type="l"))
with(data, lines(Time, Sub_metering_2, type="l", col="red"))
with(data, lines(Time, Sub_metering_3, type="l", col="blue"))
legend("topright", lty=c(1,1,1), col=c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
0876707187984bbca3217407ae03e21d2e1c9156 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/AER/examples/USConsump1950.Rd.R | abed51dc5bc1fe2ddd56978b4bb380f9013a1d22 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 973 | r | USConsump1950.Rd.R | library(AER)
### Name: USConsump1950
### Title: US Consumption Data (1940-1950)
### Aliases: USConsump1950
### Keywords: datasets
### ** Examples
## Greene (2003)
## data
data("USConsump1950")
usc <- as.data.frame(USConsump1950)
usc$war <- factor(usc$war, labels = c("no", "yes"))
## Example 2.1
plot(expenditure ~ income, data = usc, type = "n", xlim = c(225, 375), ylim = c(225, 350))
with(usc, text(income, expenditure, time(USConsump1950)))
## single model
fm <- lm(expenditure ~ income, data = usc)
summary(fm)
## different intercepts for war yes/no
fm2 <- lm(expenditure ~ income + war, data = usc)
summary(fm2)
## compare
anova(fm, fm2)
## visualize
abline(fm, lty = 3)
abline(coef(fm2)[1:2])
abline(sum(coef(fm2)[c(1, 3)]), coef(fm2)[2], lty = 2)
## Example 3.2
summary(fm)$r.squared
summary(lm(expenditure ~ income, data = usc, subset = war == "no"))$r.squared
summary(fm2)$r.squared
|
155aa1c1b6150f56961122907ab040c427beace8 | 0f86a75b7631f8b19895c24bc15a1ead720c3738 | /R/p.soft.omni.R | eb716c1b66f4a744c509afcfb9c8a556a2c23bcd | [] | no_license | cran/TFisher | acad8f6088559ce54131aa07dd8262de8fc582ed | c9a1d8fdd7e50c25e9baf2db3789198eb1de75e0 | refs/heads/master | 2021-09-10T06:18:55.516622 | 2018-03-21T11:05:33 | 2018-03-21T11:05:33 | 109,835,846 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,075 | r | p.soft.omni.R | #' CDF of omnibus soft-thresholding Fisher's p-value combination statistic under the null hypothesis.
#' @param q - quantile, could be a vector.
#' @param n - dimension parameter, i.e. the number of p-values to be combined.
#' @param TAU1 - a vector of truncation parameters (=normalization parameters). Must be in non-descending order.
#' @param M - correlation matrix of the input statistics. Default = NULL assumes independence.
#' @return The left-tail probability of the null distribution of omnibus soft-thresholding Fisher's p-value combination statistic.
#' @seealso \code{\link{stat.soft.omni}} for the definition of the statistic.
#' @references 1. Hong Zhang and Zheyang Wu. "TFisher Tests: Optimal and Adaptive Thresholding for Combining p-Values", submitted.
#'
#' @examples
#' q = 0.01
#' n = 20
#' TAU1 = c(0.01, 0.05, 0.5, 1)
#' M = matrix(0.3,20,20) + diag(1-0.3,20)
#' p.soft.omni(q=q, n=n, TAU1=TAU1, M=M)
#' @export
#' @importFrom mvtnorm pmvnorm
p.soft.omni <- function(q, n, TAU1, M=NULL){
p.tfisher.omni(q, n, TAU1, TAU1, M)
}
|
7b65c4024056dd6bbc11b3eda8de99f7c5d268a0 | 007ada87a0e07f16bf2596a41f28d93b5182bd17 | /R/Lace Analysis Code/histograms.R | e38854265f81f6a5049a153fad43d25b5ab77cb1 | [] | no_license | spsanderson/bmhmc-sql | 94f6e8e243830823af48c71d43cb53f982cf56cd | e2a4d7ea746b094f13c39b8a18b1849077e4f318 | refs/heads/master | 2023-07-07T02:29:33.556474 | 2023-07-03T15:44:55 | 2023-07-03T15:44:55 | 11,010,270 | 9 | 4 | null | null | null | null | UTF-8 | R | false | false | 1,660 | r | histograms.R | # Histograms of Lace Failures, non-failures and faceted
lace <- read.csv("lace for R.csv")
# Required libraries
library(ggplot2)
qplot(TOTAL.LACE,
data = lace,
colour = SEX,
geom = "density",
size = I(1.25),
main = "Density Graphs for Total Lace Score by Sex")
qplot(TOTAL.LACE,
data = lace,
colour = factor(FAILURE),
geom = "density",
size = I(1.25),
main = "Density Graphs for Total Lace Score by Failure")
qplot(TOTAL.LACE, LACE.ER.SCORE,
data = lace,
facets = SEX ~ .,
geom = "smooth",
main = "Total Lace Score by Lace ER Visits Grouped by Sex")
# Breaking data into failure = F and non Failure = N
f <- data.frame(lace[lace$FAILURE == 1,])
n <- data.frame(lace[lace$FAILURE == 0,])
qplot(TOTAL.LACE,
data = f,
facets = SEX ~ .,
geom = "density",
main = "Density Graph for Readmits")
qplot(TOTAL.LACE,
data = n,
facets = SEX ~ .,
geom = "density",
main = "Density Graph for Patients Not Readmitted")
qplot(TOTAL.LACE,
data = lace,
colour = SEX,
facets = FAILURE ~ .,
geom = "density",
size = I(1.25),
main = "Density Graphs: Total Lace Grouped by Failure; Color = Failure")
qplot(TOTAL.LACE,
data = lace,
colour = factor(FAILURE),
facets = SEX ~ .,
geom = "density",
size = I(1.25),
main = "Density Graphs: Total lace Grouped by Sex; Color = Failure")
qplot(TOTAL.LACE, FAILURE,
data = lace,
geom = "smooth",
main = "Total Lace Score by Failure") |
97aeb1a844f8bb65023192839ad551c4de4ea7f9 | e65cd34a3ab0a63df9d10f02767d7fd017e846cb | /.Rprofile | 4cdcb21d69ab0a037a03ea5110f47872d9a8b790 | [] | no_license | uribo/argama | 75e64fcca801f1ddabfdd5b8cd99fac5452c8f0e | c5011154f800fab09eb59dad0dea973cd6a4486a | refs/heads/master | 2021-05-16T16:30:24.578255 | 2018-10-13T23:37:44 | 2018-10-13T23:37:44 | 119,823,798 | 0 | 0 | null | 2019-07-04T02:15:09 | 2018-02-01T10:59:07 | HTML | UTF-8 | R | false | false | 32 | rprofile | .Rprofile | options(blogdown.author = NULL)
|
24ad228638f69b99280900322d90fc52284b0945 | 52e569f75e990862d33b3d8969c4dba89d7622a4 | /shiny.r | 22c8c90b90514f18701f8f5335815dab835f0072 | [] | no_license | danielbonhaure/Corona | e224921ccadc3ac16a5d4419f83048b4778bd03c | c3a58f9b79ee70836b5bfb48992dfdf663fbd833 | refs/heads/master | 2021-03-27T04:04:18.428466 | 2020-05-03T03:54:53 | 2020-05-03T03:54:53 | 247,784,828 | 1 | 1 | null | 2020-03-16T18:06:48 | 2020-03-16T18:06:48 | null | UTF-8 | R | false | false | 26,482 | r | shiny.r | library(shiny)
library(leaflet)
library(RColorBrewer)
library(rgdal)
library(RCurl)
library(plotly)
library(viridis)
library(tidyverse)
library(geojsonsf)
library(sf)
variable <-F
# names(data)
url <- "https://twitter.com/intent/tweet?url=https://danielbonhaure.shinyapps.io/covid-19"
#
# ver: https://carto.com/developers/sql-api/reference/#operation/getSQLStatement (para entender format)
URL <- RCurl::getURL("http://geo.stp.gov.py/user/dgeec/api/v2/sql?q=SELECT%20*%20FROM%20dgeec.paraguay_2002_departamentos&format=GeoJSON")
# URL <- "data/paraguayDepartamentos.geojson"
sf_py <- geojsonsf::geojson_sf(URL) %>%
dplyr::select(Dpto = departamen)
URL <- RCurl::getURL("https://www.dgeec.gov.py/microdatos/cuadro/b7dc2DEP01-Paraguay-Poblacion-total-por-anio-calendario-segun-sexo-y-departamento-2000-2025.csv")
populationPY <- read.csv(text = URL, stringsAsFactors = F) %>%
# URL <- "data/paraguayPoblacion.csv"
# populationPY <- read.csv(file = URL, stringsAsFactors = F) %>%
dplyr::select(Dpto = X., Pop = X2020) %>%
dplyr::filter(dplyr::between(dplyr::row_number(), 2, 19)) %>%
dplyr::mutate(Dpto = toupper(Dpto), Pop = as.numeric(Pop)) %>%
dplyr::mutate(Dpto = ifelse(Dpto == 'NEEMBUCU', 'ÑEEMBUCU', Dpto))
# URL <- RCurl::getURL("https://raw.githubusercontent.com/danielbonhaure/Corona/master/data/py_confirmed_cases.csv")
# dataCasesPY <- read.csv(text = URL, check.names = F, stringsAsFactors = F) %>% dplyr::select(-Cod)
dataCasesPY <- read.csv(file = "data/py_confirmed_cases.csv", check.names = F, stringsAsFactors = F) %>% dplyr::select(-Cod)
# URL <- RCurl::getURL("https://raw.githubusercontent.com/danielbonhaure/Corona/master/data/py_deaths.csv")
# dataDeathsPY <- read.csv(text = URL, check.names = F, stringsAsFactors = F) %>% dplyr::select(-Cod)
dataDeathsPY <- read.csv(file = "data/py_deaths.csv", check.names = F, stringsAsFactors = F) %>% dplyr::select(-Cod)
jour<-names(dataCasesPY%>%select(contains( "/")))
jourDate<- as.Date(jour, "%m/%d/%y")
names(dataCasesPY)[str_detect(names(dataCasesPY), "/")]<-format.Date(jourDate, "%m/%d/%y")
names(dataDeathsPY)[str_detect(names(dataDeathsPY), "/")]<-format.Date(jourDate, "%m/%d/%y")
dataCasesPY <- sf_py %>% sf::st_drop_geometry() %>%
dplyr::left_join(populationPY, by = "Dpto") %>%
dplyr::left_join(dataCasesPY, by = "Dpto")
dataDeathsPY <- sf_py %>% sf::st_drop_geometry() %>%
dplyr::left_join(populationPY, by = "Dpto") %>%
dplyr::left_join(dataDeathsPY, by = "Dpto")
arrondi<- function(x) 10^(ceiling(log10(x)))
dataDeathsPY[is.na(dataDeathsPY)]<- 0
dataCasesPY[is.na(dataCasesPY)]<- 0
ui <- bootstrapPage(
tags$style(type = "text/css", "html, body {width:100%;height:100%}",
HTML( ".panel-default {background-color: rgb(256, 256, 256,0.5);
padding : 10px;;}
.panel-title {background-color: rgb(256, 256, 256,0.8);
padding : 10px;
border-style: solid;
border-color: grey;}
.panel-credits {background-color: rgb(256, 256, 256,1);
padding : 15px;
border-style: solid;
border-color: black;}
")
),
leafletOutput("map", width = "100%", height = "93%"),
column(6, HTML("<b><a href='https://www.linkedin.com/in/daniel-bonhaure/'>Daniel BONHAURE</a></b></br>
Based on work of <b><a href='https://www.linkedin.com/in/thibaut-fabacher'>Thibaut FABACHER</a></b></br>
For the rest of the world: <b><a href='https://thibautfabacher.shinyapps.io/covid-19'>COVID-19 outbreak</a></b>")),
column(2, br(), actionButton("twitter_share",
label = "Share",
icon = icon("twitter"),
onclick = sprintf("window.open('%s')",url))
),
column(2, br(), checkboxInput("plotEvolT", "Show Evolution",F)
),
column(2, br(), checkboxInput("credits", "Credits", FALSE)
),
absolutePanel(id = "input_date_control",class = "panel panel-default",bottom = 60, left = 10, draggable = F,
selectInput("choices", "Cases or Deaths ?", choices = c("Cases","Deaths"),selected = "Cases"),
uiOutput("Slider"),
helpText("The detail of each departament can be obtained by clicking on it."),
uiOutput("selection"),
checkboxInput("legend", "Show legend", TRUE)
),
uiOutput("Credits"),
uiOutput("plotEvol"),
absolutePanel(id = "name",class = "panel panel-title",top = 10, left = 100, HTML("<h1>COVID-19 outbreak - Paraguay</h1>"),draggable = T)
)
server <- function(input, output, session) {
dataDpto<- reactive({
if(!is.null(input$choices)){
if(input$choices == "Cases"){
return( dataCasesPY)
}else{
return( dataDeathsPY)
}}
})
maxTotal<- reactive( max(dataDpto()%>%select(-Pop)%>%select_if(is.numeric), na.rm = T)
)
maxTotalPrevalence<- reactive( max(dataDpto()%>%select(-Pop)%>%select_if(is.numeric)%>%mutate_all(function(x) x/dataDpto()$Pop*100000), na.rm = T)
)
#
#
Top5<-reactive( unique(dataDpto()$Dpto[order(dataDpto()[,dim(dataDpto())[2]]%>%unlist(),decreasing = T)][1:5])
)
#
#
#
output$map <- renderLeaflet({
# Use leaflet() here, and only include aspects of the map that
# won't need to change dynamically (at least, not unless the
# entire map is being torn down and recreated).
leaflet(data = sf_py) %>%
setView(-59, -23.5, zoom = 7)
})
pal <- reactive({
if(!is.null(input$choices)){
if(input$choices == "Deaths"){
return( colorNumeric(c("#FFFFFFFF" , rev(inferno(maxTotal()+1, begin = 0, end = 0.6))), domain = c(0,log(arrondi(maxTotal()+1)))) )
}else{
return( colorNumeric(c("#FFFFFFFF" , rev(inferno(maxTotal()+1, begin = 0.3, end = 0.9))), domain = c(0,log(arrondi(maxTotal()+1)))) )
}}
})
pal2 <- reactive({
if(!is.null(input$choices)){
if(input$choices == "Deaths"){
return( colorNumeric(c("#FFFFFFFF", rev(inferno(maxTotal()+1, begin = 0, end = 0.6))), domain = c(0,log(arrondi(maxTotalPrevalence()+1)))) )
}else{
return( colorNumeric(c("#FFFFFFFF", rev(inferno(maxTotal()+1, begin = 0.3, end = 0.9))), domain = c(0,log(arrondi(maxTotalPrevalence()+1)))) )
}}
})
observe({
casesDeath<- ifelse(input$choices == "Cases","Cases","Deaths")
if (!is.null(input$day1)) {
indicator<-format.Date(input$day1, "%m/%d/%y")
}else{
indicator = format.Date(max(jourDate), "%m/%d/%y")
}
if (!is.null(input$day2)) {
indicator2<-format.Date(input$day2-c(1,0), "%m/%d/%y")
}else{
indicator2 =format.Date(c(min(jourDate)-1,max(jourDate)), "%m/%d/%y")
}
if(is.null(input$variable)){
}else{
variable<- input$variable
if(variable =="Total cases/population"){
# nCases
countries2 <- merge(sf_py,
dataDpto(),
by.x = "Dpto",
by.y = "Dpto",
sort = FALSE)
country_popup <- paste0("<strong>Dpto: </strong>",
countries2$Dpto,
"<br><strong>",
"Total cases/population :",
" </strong>",
round(countries2[[indicator]]/countries2$Pop*100000,2)," /100 000",
"<br><strong>Population : </strong>",
round(countries2$Pop),
"<br><strong>Date : </strong>",
format.Date(as.Date(indicator, "%m/%d/%y"), "%Y/%m/%d"))
leafletProxy("map", data = countries2)%>%
addPolygons(fillColor = pal2()(log((countries2[[indicator]]/countries2$Pop*100000)+1)),
layerId = ~Dpto,
fillOpacity = 1,
color = "#BDBDC3",
weight = 1,
popup = country_popup)
}else if(variable =="Total cases"){
countries2 <- merge(sf_py,
dataDpto(),
by.x = "Dpto",
by.y = "Dpto",
sort = FALSE)
country_popup <- paste0("<strong>Country: </strong>",
countries2$Dpto,
"<br><strong>",
"Total ",casesDeath," :",
" </strong>",
round(countries2[[indicator]],2),
"<br><strong>Population : </strong>",
round(countries2$Pop),
"<br><strong>Date : </strong>",
format.Date(as.Date(indicator, "%m/%d/%y"), "%Y/%m/%d"))
leafletProxy("map", data = countries2)%>%
addPolygons(fillColor = pal()(log((countries2[[indicator]])+1)),
fillOpacity = 1,
layerId = ~Dpto,
color = "#BDBDC3",
weight = 1,
popup = country_popup)
}else if(variable =="New cases over period"){
dataDptoSel<-dataDpto()%>%select(Dpto, Pop)
if(indicator2[1] == format.Date(min(jourDate)-1, "%m/%d/%y")){
dataDptoSel$ncases<-dataDpto()[,indicator2[2]]
}else{
dataDptoSel$ncases<-dataDpto()[,indicator2[2]]-dataDpto()[,indicator2[1]]
}
# nCases
countries2 <- merge(sf_py,
dataDptoSel,
by.x = "Dpto",
by.y = "Dpto",
sort = FALSE)
country_popup <- paste0("<strong>Country: </strong>",
countries2$Dpto,
"<br><strong>",
"New ",casesDeath," over period :",
" </strong>",
countries2$ncases,
"<br><strong>Population : </strong>",
round(countries2$Pop),
"<br><strong>Period : </strong>",
format.Date(as.Date(indicator2[1], "%m/%d/%y"), "%Y/%m/%d"),
" - ",
format.Date(as.Date(indicator2[2], "%m/%d/%y"), "%Y/%m/%d"))
leafletProxy("map", data = countries2)%>%
addPolygons(fillColor = pal()(log(countries2$ncases+1)),
fillOpacity = 1,
color = "#BDBDC3",
layerId = ~Dpto,
weight = 1,
popup = country_popup)
}else{
dataDptoSel<-dataDpto()%>%select(Dpto, Pop)
if(indicator2[1] == format.Date(min(jourDate)-1, "%m/%d/%y")){
dataDptoSel$ncases<-dataDpto()[,indicator2[2]]
}else{
dataDptoSel$ncases<-dataDpto()[,indicator2[2]]-dataDpto()[,indicator2[1]]
}
# nCases
countries2 <- merge(sf_py,
dataDptoSel,
by.x = "Dpto",
by.y = "Dpto",
sort = FALSE)
country_popup <- paste0("<strong>Country: </strong>",
countries2$Dpto,
"<br><strong>",
"New ",casesDeath," over period / population :",
" </strong>",
round(countries2$ncases/countries2$Pop*100000,2)," /100 000",
"<br><strong>Population : </strong>",
round(countries2$Pop),
"<br><strong>Period : </strong>",
format.Date(as.Date(indicator2[1], "%m/%d/%y"), "%Y/%m/%d"),
" - ",
format.Date(as.Date(indicator2[2], "%m/%d/%y"), "%Y/%m/%d"))
leafletProxy("map", data = countries2)%>%
addPolygons(fillColor = pal2()(log(countries2$ncases/countries2$Pop*100000+1)),
fillOpacity = 1,
color = "#BDBDC3",
layerId = ~Dpto,
weight = 1,
popup = country_popup)
}
}
})
observe({
if(is.null(input$variable)){
}else{
variable<- input$variable
proxy <- leafletProxy("map", data = sf_py)
# Remove any existing legend, and only if the legend is
# enabled, create a new one.
proxy %>% clearControls()
if (input$legend) {
if(variable %in% c("Total cases/population","New cases over period/population")){
if(round(maxTotalPrevalence())>0) {
proxy %>% addLegend(position = "bottomright",
pal = pal2(),opacity = 1,
bins = log(10^(seq(0,log10(arrondi(maxTotalPrevalence())),0.5))),
value = log(1:10^(log10(arrondi(maxTotalPrevalence())))),
data =log(1:10^(log10(arrondi(maxTotalPrevalence())))),
labFormat = labelFormat(transform = function(x) round(exp(x)) ,suffix = " /100 000")
)
} else {
proxy %>% addLegend(position = "bottomright",
pal = pal2(),opacity = 1,
bins = 1, value = c(0,1), data = 0,
labFormat = labelFormat(transform = function(x) x, suffix = " /100 000")
)
}
}else{
if(maxTotal()>1) {
proxy %>% addLegend(position = "bottomright",
pal = pal(),opacity = 1,
bins = log(10^(0:log10(arrondi(maxTotal())))),
value = log(1:10^(log10(arrondi(maxTotal())))),
data = log(10^(0:log10(arrondi(maxTotal())))),
labFormat = labelFormat(transform = exp )
)
} else {
proxy %>% addLegend(position = "bottomright",
pal = pal(),opacity = 1,
bins = 1, value = c(0,1), data = 0,
labFormat = labelFormat(transform = function(x) x)
)
}
}
}
}
})
output$Slider<-renderUI({
if(is.null(input$variable)){
}else{
if(input$variable %in% c("Total cases", "Total cases/population")){
sliderInput("day1", "Day", min(jourDate), max(jourDate),
value = c(max(jourDate)),animate = T, step = 1, ticks = T
)
}else{
sliderInput("day2", "Day", min(jourDate), max(jourDate),
value = c(max(jourDate)-7,max(jourDate)),animate = T, step = 1, ticks = T
)
}
}
})
output$selection <- renderUI({
if(input$choices =="Cases"){
radioButtons("variable", choices = c("New cases over period",
"New cases over period/population","Total cases", 'Total cases/population' ),
label = "Indicator")
}else{
radioButtons("variable", choices = list("Deaths over period"="New cases over period",
"Deaths over period/population"="New cases over period/population",
"Total deaths"="Total cases",
'Total deaths/population'='Total cases/population' ),
label = "Indicator")
}
})
output$plotEvol<-renderUI({
if (input$plotEvolT) {
tagList(absolutePanel(
id = "name",
class = "panel panel-credits",
top = 10,width = "700px",
right = 10,draggable = F,
plotlyOutput(outputId = "evol",width = "600px"),
actionButton("reset", "Reset Graph"),
actionButton("clear", "Clear all traces")
))
}
})
output$evol <-renderPlotly({
if(input$variable %in% c("Total cases/population","Total cases")){
df_evo<- dataDpto()%>%filter(Dpto%in% trace$data)%>%pivot_longer(cols = -c(Dpto,Pop),
values_to = "Cases",names_to = "Date")%>%
mutate(Date= lubridate::parse_date_time(Date, orders = c("mdy")))
if(input$variable=="Total cases/population"){
plot_ly(data = df_evo,x = ~Date, y = ~Cases/Pop*100000, color = ~Dpto, type = "scatter",mode = "lines")%>%
layout(yaxis = list( title = paste(input$choices,"/ 100 000")))
}else{
plot_ly(data = df_evo,x = ~Date, y = ~Cases, color = ~Dpto, type = "scatter",mode = "lines")%>%
layout(yaxis = list( title = input$choices))
}
}else{
df_evo<- dataDpto()%>%filter(Dpto%in% trace$data)
for(i in dim( df_evo)[2]:4) df_evo[i]<- df_evo[i]- df_evo[i-1]
df_evo<- df_evo%>%pivot_longer(cols = -c(Dpto,Pop),
values_to = "Cases",names_to = "Date")%>%
mutate(Date= lubridate::parse_date_time(Date, orders = c("mdy")))
if( input$variable=="New cases over period/population"){
plot_ly(data = df_evo,x = ~Date, y = ~Cases/Pop*100000, color = ~Dpto, type = "scatter",mode = "lines")%>%
layout(yaxis = list( title = paste(input$choices,"/ 100 000/day")))
}else{
plot_ly(data = df_evo,x = ~Date, y = ~Cases, color = ~Dpto, type = "scatter",mode = "lines")%>%
layout(yaxis = list( title = paste(input$choices,"/day")))
}
}
})
trace<- reactiveValues()
observe({trace$data<-Top5()
})
observeEvent(input$reset, {
for (i in 1: length(trace$data)){
plotlyProxy("evol", session) %>%
plotlyProxyInvoke("deleteTraces",list(0))
}
if(input$variable %in% c("Total cases/population","Total cases")){
df_evo<- dataDpto()%>%filter(Dpto%in% Top5())%>%pivot_longer(cols = -c(Dpto,Pop),
values_to = "Cases",names_to = "Date")%>%
mutate(Date= lubridate::parse_date_time(Date, orders = c("mdy")))
if(input$variable=="Total cases/population"){
for (i in Top5()){
df_evoi<- df_evo%>%filter(Dpto == i)
plotlyProxy("evol", session) %>%
plotlyProxyInvoke("addTraces",
list(x =df_evoi$Date ,
name =i ,
y = df_evoi$Cases/df_evoi$Pop*100000,
type = 'scatter',
mode = 'lines'))
}
}else{
for (i in Top5()){
df_evoi<- df_evo%>%filter(Dpto == i)
plotlyProxy("evol", session) %>%
plotlyProxyInvoke("addTraces",
list(x =df_evoi$Date ,
name =i ,
y = df_evoi$Cases,
type = 'scatter',
mode = 'lines'))
}
}
}else{
df_evo<- dataDpto()%>%filter(Dpto%in% Top5())
for(i in dim(df_evo)[2]:4) df_evo[i]<-df_evo[i]-df_evo[i-1]
df_evo<-df_evo%>%pivot_longer(cols = -c(Dpto,Pop),
values_to = "Cases",names_to = "Date")%>%
mutate(Date= lubridate::parse_date_time(Date, orders = c("mdy")))
if( input$variable=="New cases over period/population"){
for (i in Top5()){
df_evoi<- df_evo%>%filter(Dpto == i)
plotlyProxy("evol", session) %>%
plotlyProxyInvoke("addTraces",
list(x =df_evoi$Date ,
name =i ,
y = df_evoi$Cases/df_evoi$Pop*100000,
type = 'scatter',
mode = 'lines'))
}
}else{
for (i in Top5()){
df_evoi<- df_evo%>%filter(Dpto == i)
plotlyProxy("evol", session) %>%
plotlyProxyInvoke("addTraces",
list(x =df_evoi$Date ,
name =i ,
y = df_evoi$Cases,
type = 'scatter',
mode = 'lines'))
}
}
}
trace$data<-Top5()
})
observeEvent(input$clear, {
for (i in 1: length(trace$data)){
plotlyProxy("evol", session) %>%
plotlyProxyInvoke("deleteTraces",list(0))
}
trace$data<- NULL
})
observeEvent(input$map_shape_click, {
country_Click<- input$map_shape_click$id
if (!country_Click%in%trace$data & input$plotEvolT){
trace$data<-c(trace$data,country_Click)
if(input$variable %in% c("Total cases/population","Total cases")){
df_click<- dataDpto()%>%filter(Dpto%in% country_Click)%>%pivot_longer(cols = -c(Dpto,Pop),
values_to = "Cases",names_to = "Date")%>%
mutate(Date= lubridate::parse_date_time(Date, orders = c("mdy")))
if(input$variable=="Total cases/population"){
plotlyProxy("evol", session) %>%
plotlyProxyInvoke("addTraces",
list(x =df_click$Date ,
name =country_Click ,
y = df_click$Cases/df_click$Pop*100000,
type = 'scatter',
mode = 'lines'))
}else{
plotlyProxy("evol", session) %>%
plotlyProxyInvoke("addTraces",
list(x =df_click$Date ,
name =country_Click ,
y = df_click$Cases,
type = 'scatter',
mode = 'lines'))
}
}else{
df_click<- dataDpto()%>%filter(Dpto%in% country_Click)
for(i in dim( df_click)[2]:4) df_click[i]<- df_click[i]- df_click[i-1]
df_click<- df_click%>%pivot_longer(cols = -c(Dpto,Pop),
values_to = "Cases",names_to = "Date")%>%
mutate(Date= lubridate::parse_date_time(Date, orders = c("mdy")))
if( input$variable=="New cases over period/population"){
plotlyProxy("evol", session) %>%
plotlyProxyInvoke("addTraces",
list(x =df_click$Date ,
name =country_Click ,
y = df_click$Cases/df_click$Pop*100000,
type = 'scatter',
mode = 'lines'))
}else{
plotlyProxy("evol", session) %>%
plotlyProxyInvoke("addTraces",
list(x =df_click$Date ,
name =country_Click ,
y = df_click$Cases,
type = 'scatter',
mode = 'lines'))
}
}
}
})
output$Credits <- renderUI({
if (input$credits) {
tagList(
absolutePanel(
id = "name",
class = "panel panel-credits",
top = "45%",
left = "45%",
HTML(
"<h1> Data Source : </h1>
<p> <li><a href='https://www.mspbs.gov.py/covid-19.php'>Ministerio de Salud Publica y Bienestar Social - Paraguay</a></li>
<li>COVID-19 Cases : <a href='https://www.mspbs.gov.py/reportes-covid19.html' target='_blank'>REPORTE PARAGUAY MSPBS COVID19</a></li>
<li>Paraguay population : <a href='https://www.dgeec.gov.py/microdatos/cuadro/b7dc2DEP01-Paraguay-Poblacion-total-por-anio-calendario-segun-sexo-y-departamento-2000-2025.csv' target='_blank'>Paraguay Population - DGEEC</a></li>
<li>Paraguay GeoJSON : <a href='http://geo.stp.gov.py/user/dgeec/api/v2/sql?q=SELECT%20*%20FROM%20dgeec.paraguay_2002_departamentos&format=GeoJSON' target='_blank'>Paraguay Departments - DGEEC</a></li>
<li>Paraguay Shapefile : <a href='http://geo.stp.gov.py/user/dgeec/tables/paraguay_2002_departamentos/public' target='_blank'>Paraguay Departments - DGEEC</a></li>
<li> <a href ='https://github.com/danielbonhaure/Corona' target='_blank'>Code on Github (Paraguay version)</a></li>
<li> <a href ='https://github.com/DrFabach/Corona' target='_blank'>Code on Github (Original version)</a></li>
<li> <a href = 'https://www.r-project.org/' target='_blank'>The R Project for Statistical Computing</a></li>
<li> <a href = 'https://shiny.rstudio.com/' target='_blank'>Shiny R package</a></li>
<li> <a href = 'https://leafletjs.com/' target='_blank'>Leaflet </a></li> </p>"
),
draggable = T
)
)
}
})
}
shinyApp(ui, server)
|
21a05fcee3ed7b2d0c55c4b96c042e0773cd2c14 | 5907502db5ee0ea54c5ebfe654605a2f60c459a6 | /Question5.R | 4620e50ab2e271615b030822711652cdaaace407 | [] | no_license | 97joseph/Parsimonius_Models | 2136a2cb349dc9a1e8d9d9fcddf8f325f88083c3 | e30fd9ea3457d50e7b94486889e513e241dc9844 | refs/heads/main | 2023-05-08T02:01:24.632639 | 2021-05-24T17:49:35 | 2021-05-24T17:49:35 | 369,995,327 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,722 | r | Question5.R | #### 5. (20 points) Partial least squares was used to model the yield of a chemical manufacturing process (Sect. 1.4). The data can be found in the AppliedPredictiveModeling package and can be loaded.
###The objective of this analysis is to find the number of PLS components that yields the optimal R2 value. PLS models with 1 through 10 components were each evaluated using five repeats of 10-fold cross-validation
#PLS finds components that summarize the variation of the predictors while simultaneously
#requiring these components to have maximum correlation with the response. PLS is a compromise between the objectives of predictor space dimension reduction and a predictive relationship with the response. PLS can be viewed as a supervised dimension reduction procedure;
#PCR is an unsupervised procedure
#if 10-fold cross-validation was repeated five times, 50 different held-out sets would be
#used to estimate model efficacy
```{r}
library(AppliedPredictiveModeling)
data(ChemicalManufacturingProcess)
```
###a. Using the "one-standard error" method, what number of PLS components provides the most parsimonious model?
##(b),(c),(d)
if two predictors are highly correlated, this implies that they are measuring the same underlying information.
Removing one should not compromise the performance of the model and might lead to a more precise and interpretable model.
one-standard error of RMSE
```{r}
#A parsimonious model is a model that has the highest level of explanation/prediction with as few predictor variables (x) as possible.
#The One Standard Error Rule can be used to compare models with different numbers of parameters in order to select the most parsimonious model with low error.
#To use, find model with minimum error, then select the simplest model whose mean falls within 1 standard deviation of the minimum
library(AppliedPredictiveModeling)
data(ChemicalManufacturingProcess)
library(caret)
chemical.df <- data.frame(ChemicalManufacturingProcess)
chemical.ZeroVar <- nearZeroVar(fingerprints.df, names = TRUE)
predictors <- dim(chemical.df)[2] - length(chemical.ZeroVar)
dat<-chemical.df
dat <- gamSim(1,n=400,dist="normal",scale=2)
b <- gam(y~s(x0)+s(x1)+s(x2)+s(x3),data=dat,method="REML")
b
## only the first 3 smoothing parameters are candidates for
## increasing here...
V <- sp.vcov(b)[1:3,1:3] ## the approx cov matrix of sps
d <- diag(V)^.5 ## sp se.
## compute the log smoothing parameter step...
d <- sqrt(2*length(d))/d
sp <- b$sp ## extract original sp estimates
sp[1:3] <- sp[1:3]*exp(d) ## apply the step
## refit with the increased smoothing parameters...
b1 <- gam(y~s(x0)+s(x1)+s(x2)+s(x3),data=dat,method="REML",sp=sp)
b;b1 ## compare fits
```
|
975376d2390352e0319b90187270680ae633f70c | dd435c8364b9d2e22bb525addb530d0b6c87b536 | /Code/R/RWithSQLServer/Pull From Local Database Example.R | 54861ad7e195a295ad24a868946f9247b41b7ed9 | [] | no_license | dmerson/ArizonaMastersCapstone | 4e671af40f94f1d008765d34fffb636314775e61 | 0077613e552c6ea2aab114492319634f41fa3d4c | refs/heads/master | 2020-03-22T18:56:25.791691 | 2018-07-23T04:10:58 | 2018-07-23T04:10:58 | 140,492,925 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,670 | r | Pull From Local Database Example.R | library(knitr)
library(tidyverse)
library(readxl)
library(readr)
library(tidyr)
library(RODBC)
#install.packages("sqldf")
#install.packages("DBI")
#remove.packages("DBI")
#install.packages("RSQLite")
library(RSQLite)
library(sqldf)
library(readxl)
getSqlQueryFromSUDB <-function(sqlQuery){
library(RODBC)
server="POWER\\POWER17" #TARDIS\\TARDIS" #POWER\\POWER17
database="SASCLONE"#ScholarshipAwardingProcess" #"ScholarshipAwardingSystems"}
conn=paste("driver={SQL Server};server=",server,";database=",database,";trusted_connection=true", sep="")
print(conn)
cn <- odbcDriverConnect(connection=conn)
resultSet <- sqlQuery(cn,sqlQuery)
odbcClose(cn)
return (resultSet)
}
#pull the algorithms
getSqlQueryFromSUDB("Select * from algorithms")
#run all algorithms
#getSqlQueryFromSUDB("RunAllAlgorithms 1, 1000, 310, 2")
# getSqlQueryFromSUDB("RunAlgorithm1 1, 1600, 300, 2")
# getSqlQueryFromSUDB("RunAlgorithm2 1, 1600, 300, 2")
# getSqlQueryFromSUDB("RunAlgorithm3 1, 1000, 310, 2")
# getSqlQueryFromSUDB("RunAlgorithm4 1, 1600, 300, 2")
# getSqlQueryFromSUDB("RunAlgorithm5 1, 1600, 300, 2")
# getSqlQueryFromSUDB("RunAlgorithm6 1, 1600, 300, 2")
# getSqlQueryFromSUDB("RunAlgorithm7 1, 1600, 300, 2")
#get Analysis
View(getSqlQueryFromSUDB("GetAnalysis 1, 1500, 130, 2, 1"))
View(getSqlQueryFromSUDB("GetAnalysis 2, 1500, 130, 2, 1"))
#get Analysis function
getAnalysis <- function (awarding_group_id,maximum_award,minimum_award,max_applicants,run_analysis_first){
sql=paste("GetAnalysis ",awarding_group_id,",", maximum_award,", ",minimum_award,", ",max_applicants,",", run_analysis_first,sep="")
#print(sql)
getSqlQueryFromSUDB(sql)
}
getAnalysis(1,1500,130,2,0)
#create awarding group
awardinggroupid=getSqlQueryFromSUDB("CreateAwardingGroup 'createdfromr'")[1,1]
#function for creating awarding group
create_awarding_group_and_get_id <- function(award_group_name){
awarding_group_id=getSqlQueryFromSUDB(paste("CreateAwardingGroup '",award_group_name,"'",sep=""))[1,1]
return (awarding_group_id)
}
create_awarding_group_and_get_id("test2")
#insert into denormalized group
result <- getSqlQueryFromSUDB("[InsertIntoDenormalizedEntry] 5,'S1',1000.00,'A1',1")
#create function to insert data
InsertDenormalizedData <- function(awardinggroupid,scholarshipName,scholarshipAmount,applicantName, applicantRank){
sql=paste("[InsertIntoDenormalizedEntry] ",awardinggroupid,",'",scholarshipName,"',", scholarshipAmount,",'",applicantName,"',",applicantRank,sep="")
#print(sql)
getSqlQueryFromSUDB(sql)
}
InsertDenormalizedData(3,'S1',1000.00,'A2',2)
#insert into database from excel file
library(readxl)
DemoData <- tbl_df(read_excel("C:/Repos/Documents/scholarshipawardingprocess/Code/R/RWithSQLServer/Example Data/DemoData.xlsx"))
#View(DemoData)
count_Of_data =nrow(DemoData)
awarding_group_id=4
for (i in 1:count_Of_data){
scholarship_name =DemoData[i,1]
scholarship_amount =DemoData[i,2]
applicant_name=DemoData[i,3]
applicant_rank =DemoData[i,4]
InsertDenormalizedData(awarding_group_id,scholarship_name,scholarship_amount,applicant_name,applicant_rank)
}
#function to pull from excel file, get awarding group, insert into database, and get analysis
create_analysis_from_spreadsheet <- function (awarding_group_name,filePath,maximum_award,minimum_award,max_applicants,run_analysis_first){
awarding_group_id_for_new <-create_awarding_group_and_get_id(awarding_group_name)
print(awarding_group_id_for_new)
library(readxl)
DemoData <- tbl_df(read_excel(filePath))
View(DemoData)
count_Of_data=nrow(DemoData)
for (i in 1:count_Of_data){
scholarship_name =DemoData[i,1]
scholarship_amount =DemoData[i,2]
applicant_name=DemoData[i,3]
applicant_rank =DemoData[i,4]
InsertDenormalizedData(awarding_group_id_for_new,scholarship_name,scholarship_amount,applicant_name,applicant_rank)
print(scholarship_name)
print(applicant_rank)
}
getAnalysis(awarding_group_id_for_new,maximum_award,minimum_award,max_applicants,run_analysis_first)
}
#create_analysis_from_spreadsheet("test case","C:/Repos/Documents/scholarshipawardingprocess/Code/R/RWithSQLServer/Example Data/DemoData.xlsx",1500,130,2,1)
#create_analysis_from_spreadsheet("test case2","C:/Repos/Documents/scholarshipawardingprocess/Code/R/RWithSQLServer/Example Data/DemoData2.xlsx",1500,130,2,1)
create_analysis_from_spreadsheet("exc","C:/Repos/Documents/scholarshipawardingprocess/Code/R/RWithSQLServer/Example Data/ImportedData/AwardingGroup1.xlsx", 12400,250,2,1)
get_analysis <-function(algorithm_id, awarding_group_id,maximum_award,minimum_award,max_applicants){
} |
3bf344a29c72f78e4402d078bb9e72685830bff2 | a3bd035667c854dd0ba974edc5e5fd9724c56a02 | /Rice_QTL_Final.R | 21b00182a960527b494126fc84516a21abd524eb | [] | no_license | tahia/RiceSaltQTLAnalysis | e45426ef15fd0c4fbf429bce7e824ddf3cd5fe80 | 22040e21bf0951fdff270ac5dd2834a97c09c2e6 | refs/heads/master | 2023-01-01T00:29:02.344301 | 2020-10-24T23:35:21 | 2020-10-24T23:35:21 | 210,711,285 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,245 | r | Rice_QTL_Final.R | library(qtl)
library(qtl2)
library(qtl2convert)
library(devtools)
library(devtools)
setwd("/home/taslima/data/JuengerLab/github/My_Gits/RiceSaltQTLAnalysis") #Choose directory
rm(list=ls())
load("Reproductive_Reload_V5_RQTL2.RData")
set.seed(1234)
cross$pheno$SEL<-ifelse(cross$pheno$SES <=5, 1,ifelse(cross$pheno$SES >= 7, 3,2))
cross$pheno$Grp<-paste(cross$pheno$Cyto_num,cross$pheno$SEL,sep = "")
cross2<-convert2cross2(cross)
map <- insert_pseudomarkers(cross2, step=1)
pr <- calc_genoprob(cross2, map, error_prob=0.002, cores=16)
############## TEST FOR SELECTION ONLY FOR REPRODUCTIVE STAGE
covar <- as.data.frame(cross2$pheno[,c(2,13)])
strata<-(covar$Cyto_num>1)
#strata<-cross$pheno$Grp
names(strata)<-cross$pheno$id
kinship_loco <- calc_kinship(pr, "loco")
#Interactive
sc1_rep_loco_tail_int <- scan1(pr, cross2$pheno, kinship_loco, cores=16,addcovar = covar,intcovar = covar)
persc1_rep_loco_tail_int <- scan1perm(pr, cross2$pheno, kinship_loco ,n_perm=1000, cores=16,addcovar = covar,perm_strata = strata,intcovar = covar)
#Additive
sc1_rep_loco_tail_add <- scan1(pr, cross2$pheno, kinship_loco, cores=16,addcovar = covar)
persc1_rep_loco_tail_add <- scan1perm(pr, cross2$pheno, kinship_loco ,n_perm=1000, cores=16,addcovar = covar,perm_strata = strata)
#Null
covar <- as.data.frame(cross2$pheno[,c(2)])
names(covar)<-"Cyto_num"
strata<-(covar$Cyto_num>1)
names(strata)<-cross2$pheno[,1]
sc1_rep_loco_tail_null <- scan1(pr, cross2$pheno, kinship_loco, cores=16,addcovar = covar)
persc1_rep_loco_tail_null <- scan1perm(pr, cross2$pheno, kinship_loco ,n_perm=1000, cores=16,addcovar = covar,perm_strata = strata)
##### get threshold for selection
sel_threshold<-c()
for(i in 3:11)
{
sel_threshold<-append(sel_threshold, as.numeric(quantile((persc1_rep_loco_tail_int[,i]- persc1_rep_loco_tail_null[,i]),probs = 0.95)))
}
###### find any QTL peak that pass this threshold
sel_sc1_rep_loco<-sc1_rep_loco_tail_int- sc1_rep_loco_tail_null
find_peaks(sel_sc1_rep_loco[,c(3:11)],map,sel_threshold)
find_peaks(sc1_rep_loco_tail_null,map,threshold = as.vector(summary(persc1_rep_loco_tail_null,alpha=0.05)))
###### Only Plant height 5@154 shows the SEL effect
############################### END OF TEST FOR THE EFFECT OF SELECTION ON REPRODUCTIVE STAGE
###### Now Start testing for additive and interactive effect of Cytoplasm as covariate
covar <- as.data.frame(cross2$pheno[,c(2)])
names(covar)<-"Cyto_num"
strata<-(covar$Cyto_num>1)
names(strata)<-cross2$pheno[,1]
#Interactive
sc1_rep_loco_cyt_int <- scan1(pr, cross2$pheno, kinship_loco, cores=16,addcovar = covar,intcovar = covar)
persc1_rep_loco_cyt_int <- scan1perm(pr, cross2$pheno, kinship_loco ,n_perm=1000, cores=16,addcovar = covar,perm_strata = strata,intcovar = covar)
#Additive
sc1_rep_loco_cyt_add <- scan1(pr, cross2$pheno, kinship_loco, cores=16,addcovar = covar)
persc1_rep_loco_cyt_add <- scan1perm(pr, cross2$pheno, kinship_loco ,n_perm=1000, cores=16,addcovar = covar,perm_strata = strata)
#Null
sc1_rep_loco_cyt_null <- scan1(pr, cross2$pheno, kinship_loco, cores=16)
persc1_rep_loco_cyt_null <- scan1perm(pr, cross2$pheno, kinship_loco ,n_perm=1000, cores=16)
##### get threshold for Interaction model where Cytoplasm is covar
int_cyt_threshold<-c()
for(i in 3:11)
{
int_cyt_threshold<-append(int_cyt_threshold, as.numeric(quantile((persc1_rep_loco_cyt_int[,i]- persc1_rep_loco_cyt_null[,i]),probs = 0.95)))
}
###### find any QTL peak that pass this threshold for Cyt int
cyt_int_sc1_rep_loco<-sc1_rep_loco_cyt_int- sc1_rep_loco_cyt_null
find_peaks(cyt_int_sc1_rep_loco[,c(3:11)],map,int_cyt_threshold)
########### PH QTL at chr5@172, FGN, FGW at chr10@58.48, SF at chr10@60, HI at chr 10@59 shows interaction that cross the threshold
#### These positions are relative to models there will move a bit up and down
##### get threshold for Additive model where Cytoplasm is covar
add_cyt_threshold<-c()
for(i in 3:11)
{
add_cyt_threshold<-append(add_cyt_threshold, as.numeric(quantile((persc1_rep_loco_cyt_add[,i]- persc1_rep_loco_cyt_null[,i]),probs = 0.95)))
}
###### find any QTL peak that pass this threshold for Cyt int
cyt_add_sc1_rep_loco<-sc1_rep_loco_cyt_add- sc1_rep_loco_cyt_null
find_peaks(cyt_add_sc1_rep_loco[,c(3:11)],map,add_cyt_threshold)
####### HI QTL at chr10@105 shows additive effect that cross the threshold
##So, overall for all all the traits for reproductive phenotype Cytoplasm will be
##considered as both additive and interactive cofactor to test for QTl
## while asking for effect of a single QTL for a given position linear model will
##be fitted and Cytoplasm as covariates will be included if and only if it pass the
##threshold of the difference between full and reduced model
###########################
RepQ<-as.data.frame(find_peaks(sc1_rep_loco_cyt_int,map,as.vector(summary(persc1_rep_loco_cyt_int,alpha=0.05))))
color <- c("slateblue", "violetred", "green3")
for(i in 3:11) {
ymx <- max(c(as.data.frame(sc1_rep_loco_cyt_int)[,i],as.data.frame(sc1_rep_loco_cyt_add)[,i],as.data.frame(sc1_rep_loco_cyt_null)[,i]),na.rm =T) # overall maximum LOD score
plot(sc1_rep_loco_cyt_null, map, lodcolumn=i, col=color[1], main=colnames(cross2$pheno)[i],
ylim=c(0, ymx*1.02))
plot(sc1_rep_loco_cyt_add, map, lodcolumn=i, col=color[2], add=TRUE)
plot(sc1_rep_loco_cyt_int, map, lodcolumn=i, col=color[3], add=TRUE, lty=2)
abline(h = as.vector(summary(persc1_rep_loco_cyt_null, alpha= 0.05))[i],col=color[1])
abline(h = as.vector(summary(persc1_rep_loco_cyt_add, alpha= 0.05))[i],col=color[2])
abline(h = as.vector(summary(persc1_rep_loco_cyt_int, alpha= 0.05))[i],col=color[3])
legend("topleft", lwd=2, col=color, c("Null", "Cyt-Add", "Cyt-Int"), bg="gray90", lty=c(1,1,2))
}
save.image("Rep_QTL_Fin_v1.RData")
########## Now fit single QTL model
###### PH
covar_factor <- factor(covar$Cyto_num)
covar_matrix <- as.matrix(model.matrix( ~ covar_factor)[ , -1])
rep_PH_Q1<-fit1(pr[[1]][,,215],cross2$pheno[,'PH'],addcovar = covar,intcovar = covar)
test<-as.data.frame(attr(scan1coef(pr[,1],cross2$pheno[,'PH'],addcovar = covar,intcovar = covar,se = T),"SE"))
test<-scan1coef(pr[,1],cross2$pheno[,'PH'],addcovar = covar,intcovar = covar,se = T)
color <- c("slateblue", "violetred", "green3")
for(i in 3:11) {
plot(test, map[1], columns =c(1:3), col=color, main=colnames(cross2$pheno)[i])
plot(test$AB, map, lodcolumn=i, col=color[2], add=TRUE)
plot(test$BB, map, lodcolumn=i, col=color[3], add=TRUE, lty=2)
legend("topleft", lwd=2, col=color, c("AA", "AB", "BB"), bg="gray90", lty=c(1,1,2))
}
test2<-attr(scan1coef(pr[,1],cross2$pheno[,'PH'],se = T),"SE")
############ Seedling Stage
rm(list=ls())
set.seed(1234)
load("Seedling_Reload_V5_RQTL2.RData")
cross2<-convert2cross2(cross)
map <- insert_pseudomarkers(cross2, step=1)
pr <- calc_genoprob(cross2, map, error_prob=0.002, cores=16)
covar <- as.data.frame(cross2$pheno[,c(2)])
names(covar)<-"Cyto_num"
strata<-(covar$Cyto_num>1)
names(strata)<-cross2$pheno[,1]
kinship_loco <- calc_kinship(pr, "loco")
###### Now Start testing for additive and interactive effect of Cytoplasm as covariate
#Interactive
sc1_seed_loco_cyt_int <- scan1(pr, cross2$pheno, kinship_loco, cores=16,addcovar = covar,intcovar = covar)
persc1_seed_loco_cyt_int <- scan1perm(pr, cross2$pheno, kinship_loco ,n_perm=1000, cores=16,addcovar = covar,perm_strata = strata,intcovar = covar)
#Additive
sc1_seed_loco_cyt_add <- scan1(pr, cross2$pheno, kinship_loco, cores=16,addcovar = covar)
persc1_seed_loco_cyt_add <- scan1perm(pr, cross2$pheno, kinship_loco ,n_perm=1000, cores=16,addcovar = covar,perm_strata = strata)
#Null
sc1_seed_loco_cyt_null <- scan1(pr, cross2$pheno, kinship_loco, cores=16)
persc1_seed_loco_cyt_null <- scan1perm(pr, cross2$pheno, kinship_loco ,n_perm=1000, cores=16)
##### get threshold for Interaction model where Cytoplasm is covar
int_cyt_seed_threshold<-c()
for(i in 3:10)
{
int_cyt_seed_threshold<-append(int_cyt_seed_threshold, as.numeric(quantile((persc1_seed_loco_cyt_int[,i]- persc1_seed_loco_cyt_null[,i]),probs = 0.95)))
}
###### find any QTL peak that pass this threshold for Cyt int
cyt_int_sc1_seed_loco<-sc1_seed_loco_cyt_int- sc1_seed_loco_cyt_null
find_peaks(cyt_int_sc1_seed_loco[,c(3:10)],map,int_cyt_seed_threshold)
###########
#### These positions are relative to models there will move a bit up and down
##### get threshold for Additive model where Cytoplasm is covar
add_cyt_seed_threshold<-c()
for(i in 3:10)
{
add_cyt_seed_threshold<-append(add_cyt_seed_threshold, as.numeric(quantile((persc1_seed_loco_cyt_add[,i]- persc1_seed_loco_cyt_null[,i]),probs = 0.95)))
}
###### find any QTL peak that pass this threshold for Cyt add
cyt_add_sc1_seed_loco<-sc1_seed_loco_cyt_add- sc1_seed_loco_cyt_null
find_peaks(cyt_add_sc1_seed_loco[,c(3:10)],map,add_cyt_seed_threshold)
####### shows additive effect that cross the threshold
##So, overall for all all the traits for reproductive phenotype Cytoplasm will be
##considered as both additive and interactive cofactor to test for QTl
## while asking for effect of a single QTL for a given position linear model will
##be fitted and Cytoplasm as covariates will be included if and only if it pass the
##threshold of the difference between full and reduced model
###########################
SeedQ<-as.data.frame(find_peaks(sc1_seed_loco_cyt_int,map,as.vector(summary(persc1_seed_loco_cyt_int,alpha=0.05))))
color <- c("slateblue", "violetred", "green3")
for(i in 3:10) {
ymx <- max(c(as.data.frame(sc1_seed_loco_cyt_int)[,i],as.data.frame(sc1_seed_loco_cyt_add)[,i],as.data.frame(sc1_seed_loco_cyt_null)[,i]),na.rm =T) # overall maximum LOD score
plot(sc1_seed_loco_cyt_null, map, lodcolumn=i, col=color[1], main=colnames(cross2$pheno)[i],
ylim=c(0, ymx*1.02))
plot(sc1_seed_loco_cyt_add, map, lodcolumn=i, col=color[2], add=TRUE)
plot(sc1_seed_loco_cyt_int, map, lodcolumn=i, col=color[3], add=TRUE, lty=2)
abline(h = as.vector(summary(persc1_seed_loco_cyt_null, alpha= 0.05))[i],col=color[1])
abline(h = as.vector(summary(persc1_seed_loco_cyt_add, alpha= 0.05))[i],col=color[2])
abline(h = as.vector(summary(persc1_seed_loco_cyt_int, alpha= 0.05))[i],col=color[3])
legend("topleft", lwd=2, col=color, c("Null", "Cyt-Add", "Cyt-Int"), bg="gray90", lty=c(1,1,2))
}
save.image("Seed_QTL_Fin_v1.RData")
###############################
#get flanking markers
dat<-read.csv("RiceSaltQTL_table2.csv",check.names = F)
colnames(dat)
#colnames(dat)[3]<-"Chromosome"
colnames(dat)[which(colnames(dat) == "Chr")]<-"Chromosome"
dat$'QTL Interval'<-paste(dat$`upstream position of peak`,dat$`downstream position of peak`,sep = " - ")
dat<-dat[,c(1:5,9,8)]
colnames(dat)
colnames(dat)[which(colnames(dat) == "Position")]<-"QTL peak"
### Will use rqtl to get the real markers, not pseudo
# get_flanking_markers<-function(cross,dat){
# up=down=c()
# for (i in 1:nrow(dat)) {
# p<-as.numeric(strsplit(dat[i,'QTL Interval'],split = " - ")[[1]]);
# chr<-dat[i,'Chromosome']
# library(qtl)
# up<-append(up,strsplit(find.marker(cross,chr,p[1]),split = "_")[[1]][3])
# down<-append(down,strsplit(find.marker(cross,chr,p[2]),split = "_")[[1]][3])
# }
# list=list(up=up,down=down)
# return(list)
# }
############ Re-write the above function because some genetic vs physical order are not same
get_flanking_markers<-function(cross,dat){
up=down=c()
for (i in 1:nrow(dat)) {
p<-as.numeric(strsplit(dat[i,'QTL Interval'],split = " - ")[[1]]);
chr<-dat[i,'Chromosome']
library(qtl)
BP<-c()
for (j in 1:length(unlist(pull.map(cross,chr))))
{
BP<-append(BP,as.numeric(strsplit(names(unlist(pull.map(cross,chr))),split = "_")[[j]][3]) )
}
CM<-as.vector(unlist(pull.map(cross,chr)))
genotab<-as.data.frame(cbind(BP=BP,CM=CM))
up<-append(up,min(genotab$BP[which(genotab$CM >= round(p[1]) & genotab$CM < round(p[2]))]))
down<-append(down,max(genotab$BP[which(genotab$CM >= round(p[1]) & genotab$CM < round(p[2]))]))
}
list=list(up=up,down=down)
return(list)
}
fbps<-get_flanking_markers(cross,dat)
dat$upBP<-fbps$up
dat$downBP<-fbps$down
write.csv(dat,"RiceSaltQTL_table2_withflankingM.csv",row.names = F)
|
8e46463e0879d622f718e1b9bf9ee77cf39033a7 | ead118e4df14ed6ed4f3724de638f4199d1f7d8b | /biostar_map.R | 10e5ccef6bcd3c4c76f7a9872e0e0b2f9c8d3d81 | [
"MIT"
] | permissive | Honglongwu/bioinformatics-scripts | 1ead902e259e09758ebad096c703dcc56f84d5c0 | 515d2f19ebfd3e7f3bb6fbe38cc7194d1af34997 | refs/heads/master | 2020-12-24T19:46:04.308493 | 2014-12-13T23:40:40 | 2014-12-13T23:40:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 518 | r | biostar_map.R | library(org.Hs.eg.db)
# Examples
x <- org.Hs.egSYMBOL
# Get the gene symbol that are mapped to an entrez gene identifiers
mapped_genes <- mappedkeys(x)
# Convert to a list
xx <- as.list(x[mapped_genes])
if(length(xx) > 0) {
# Get the SYMBOL for the first five genes
xx[1:5]
# Get the first one
xx[[1]]
}
# For the reverse map:
x <- org.Hs.egSYMBOL2EG
# Get the entrez gene identifiers that are mapped to a gene symbol
mapped_genes <- mappedkeys(x)
# Convert to a list
xx <- as.list(x[mapped_genes]) |
d8ff0308363a2197a24dfdc8fcc0b2bd8fc578c7 | 4c7b4c60c3268c1115a70a3adc7c5cc45dbc75c2 | /scripts/exitMaze.R | 0896b330f8d8b2f74a093882be350629ffcfaa1d | [] | no_license | nazabic/data_analysis | cc6d9c7519590a90fbe14b6b71f10ac32e2715aa | 8738f5a3882132c300de0bf6db8e8bc2274b6cb3 | refs/heads/master | 2021-01-16T21:03:32.350206 | 2016-12-01T17:19:16 | 2016-12-01T17:19:16 | 64,401,698 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,576 | r | exitMaze.R | rm(list=ls())
library(rethomics)
library(tools)
############# Fetch data and add conditions
files <- list.files("/Users/diana/Dropbox/Phd Stuff/ESA/data_exp/all_db_mf", pattern="*.db",full.names = T)
query <- data.table(path=files)
list_of_fields <- strsplit(basename(files),'[-.]')
fields_mat <- do.call("rbind", list_of_fields)
conditions <- as.data.table(fields_mat[,c(3,8,10,12)])
setnames(conditions, c("day","machine","DAM","fly_id"))
query <- cbind(query, conditions)
dt <- loadPsvData(query)
key(dt)
dt[, side := ifelse(x < 0.68, "left", "right")]
dt[, sleep_deprived := ifelse(DAM %in% c("005", "016", "004"), "sleep_deprived", "control")]
dt[, sex := ifelse(DAM %in% c("008", "016", "004", "009"), "female", "male")]
############ Get the data of each fly until it gets to the second part of the maze
index_end_maze <- function(x, t) {
index <- min(which(x > 0.68))
}
firstpart <- dt[, head(.SD, index_end_maze(x,t)), by=key(dt)]
lastPoints <- firstpart[x>0.67 & x<0.68 & (day %in% c('11','12','13'))]
lastPositions <- lastPoints[, list(lastx=mean(.SD$x), lasty=mean(.SD$y)), by=key(lastPoints)]
exitNumber <- function(y) {
exit <- 0
if (y > 0.05 & y <0.09) { exit <- 1 }
else if (y > 0.15 & y < 0.2) { exit <- 2}
else if (y > 0.25 & y < 0.3) {exit <- 3}
else if (y > 0.35 & y < 0.4) {exit <- 4}
else if (y >0.45 & y < 0.52) {exit <- 5}
else if (y >0.55 & y < 0.6) {exit <- 6}
exit
}
lastPositions[, exit:=sapply(lasty, exitNumber)]
hist(lastPositions$exit, col='red', breaks = 20, main="Exit distribution of ALL flies", xlab='exit #')
hist(tail(lastPositions$exit,15), breaks=20, col="red")
par(new=T)
d <- density(lastPositions$exit)
plot(d)
ggplot(data=lastPositions, aes(lastPositions$exit)) + geom_histogram(aes(y = ..density..)) + geom_density()
# test for normal distribution
shapiro.test(lastPositions$exit)
qqnorm(lastPositions$exit)
exitByFly <- lastPoints[, list(exit=lapply(lasty, exitNumber)), by=key(lastPoints)]
plot(lastPositions$exit)
plot(-y~x, firstpart)
par(new=T)
points(lastPoints$lastx, -lastPoints$lasty, col="blue")
plot(lastPoints$lasty)
abline(h=0.05)
abline(h=0.09)
abline(h=0.15)
abline(h=0.2)
abline(h=0.25)
abline(h=0.3)
abline(h=0.35)
abline(h=0.4)
abline(h=0.45)
abline(h=0.52)
abline(h=0.55)
abline(h=0.6)
myFly <- firstpart[experiment_id=="2015-08-13-Aug-44-1439462668-GGSM-005-DAM-004-FLY-20.db"]
myFly<-firstpart[experiment_id=="2015-08-11-Aug-13-1439288014-GGSM-013-DAM-005-FLY-7.db"]
find_Uturns <- function(velocity,window_size) {
window <- rep(1/window_size, window_size)
out_vel = filter(velocity, window, sides=2)
smoo <- sign(filter(velocity, window, sides=2))
pattern <- c(-1, 1)
smoo <- smoo[!is.na(smoo)]
u_turn <- convolve(smoo, rev(pattern), type='filter')
list(u_turn,out_vel)
}
listout = find_Uturns(diff(myFly$x),25)
u_turn = listout[[1]]
smoothed_velocity = listout[[2]]
plot(myFly$t[1:length(diff(myFly$x))], diff(myFly$x),type = "l")
plot(myFly$t[1:length(smoothed_velocity)], smoothed_velocity,type = "l")
plot(myFly$t[1:length(u_turn)], u_turn, type = "l", ylim=c(-2,2))
sum(u_turn >1.9)
turns <- firstpart[, list(uturns=sum(find_Uturns(diff(x),9)[[1]]>1.9)), by=c(key(firstpart), "sleep_deprived", "sex")]
hist(turns[sex=='female' & sleep_deprived =='control']$uturns)
turns_box <- ggplot(turns, aes(y=uturns, x= sex, fill=sleep_deprived)) + geom_boxplot() +xlab("") + ylab("Mean velocity in the second part of the maze (mm/s)")
|
edd89ff8ddfa2642e5aff000a34288524eb6ceac | 2012cacdb8764d8b4da3b8b4f12bb68a44022020 | /R/filterNA.R | 680d885791068d5df6451b4acc9627ce83e8fb17 | [] | no_license | cran/FuzzyStatTra | bc8ed929f803657dea67ee096e815e9c4bccc873 | 3aa549fd7a902c8656e74d33ee78dba1dcafee52 | refs/heads/master | 2021-01-13T14:09:25.316161 | 2017-02-08T11:46:14 | 2017-02-08T11:46:14 | 81,347,116 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 330 | r | filterNA.R | filterNA <-
function(F){
FoutNA<-as.matrix(F[complete.cases(F),]) # matrix without missing values
if (ncol(F)>1 & ncol(FoutNA)==1) {
FoutNA<-t(FoutNA)
}
noutNA<-nrow(FoutNA) # number of rows of the matrix F without missing values
return(list(FoutNA,noutNA))
}
|
38e87d7aeda8a1d0100abe083c468ae04d79697d | 7d5d8492c2d88b88bdc57e3c32db038a7e7e7924 | /kenya-county-profiles/_test_scripts/KACCAL_plots_functions.R | 1f3dea9a338133d318de00ff903dd22259489379 | [] | no_license | CIAT-DAPA/dapa-climate-change | 80ab6318d660a010efcd4ad942664c57431c8cce | 2480332e9d61a862fe5aeacf6f82ef0a1febe8d4 | refs/heads/master | 2023-08-17T04:14:49.626909 | 2023-08-15T00:39:58 | 2023-08-15T00:39:58 | 39,960,256 | 15 | 17 | null | null | null | null | UTF-8 | R | false | false | 5,791 | r | KACCAL_plots_functions.R | # Graphics and other functions
# H. Achicanoy
# CIAT, 2016
# ===================================================================== #
# CHIRPS data
# ===================================================================== #
# Plotting each time serie
plot(1:length(as.numeric(temp.dt[1,])), as.numeric(temp.dt[1,]), type='l', ylim=c(0,100), xlab='Days a year', ylab='Precipitation (mm)')
lapply(1:nrow(temp.dt), function(i){
lines(1:length(as.numeric(temp.dt[1,])), as.numeric(temp.dt[i,]), col=1)
})
# Plotting cumulative time serie for each site in this year
plot(colMeans(temp.dt)[-1], type='l') # plot(colMedians(temp.dt)[-1], type='l')
plot(cumsum(colMeans(temp.dt)[-1]), type='l', col=2, xlab='Days a year', ylab='Cumulative precipitation (mm)')
lapply(2:nrow(temp.dt), function(i){
lines(cumsum(as.numeric(temp.dt[i,])[-1]), col=2)
})
# ===================================================================== #
# CH2014 data
# ===================================================================== #
# Plotting each time serie
plot(1:length(as.numeric(temp.dt[1,])), as.numeric(temp.dt[1,]), type='l', ylim=c(0, 40), xlab='Days a year', ylab='Radiacion solar')
lapply(1:nrow(temp.dt), function(i){
lines(1:length(as.numeric(temp.dt[1,])), as.numeric(temp.dt[i,]), col=1)
})
# ===================================================================== #
# IMPORTANT functions
# ===================================================================== #
### Estimate 100-day wettest period in each semester of year for each pixel ***
# Function to calculate sum for n consecutive days moving forward each index
rsum.lapply <- function(x, n=3L)
{
lapply(1:(length(x)-n+1), function(i)
{
# Sum for n consecutive days
z <- sum(x[i:(i+n-1)])
# Indices used to calculate the sum
seq.sum <- as.numeric(i:(i+n-1))
# List with SUM and INDICES
results <- list(z, seq.sum)
return(results)
})
}
# Function to extract the SUM
cumulative.f.season <- function(results)
{
unlist(lapply(results, function(x){z <- x[[1]]; return(z)}))
}
# Function to extract the INDICES
cumsum.f.season <- function(results)
{
lapply(results, function(x){z <- x[[2]]; return(z)})
}
# First semester contains 181-182 days (depending on leap years) while second semester contains 184 days
# Example
# Table with data: rows correspond to pixels; columns correspond to days within the year. Each table represent a year. List of tables corresponds to a county
temp.dt <- chirps_year[[35]]
temp.dt <- chirps_year[[which(names(chirps_year)=='y1998')]]
# This object contains the SUM and list of INDICES used to calculate the SUM
pixel.first.season <- rsum.lapply(x=as.numeric(temp.dt[2,])[-1][1:181], n=100)
# This object contains list of SUMs
cumulative.test <- cumulative.f.season(pixel.first.season)
# This object contains list of INDICES
cumsum.test <- cumsum.f.season(pixel.first.season)
# Plotting cumulative curves for each sequence of 100 days within a semester
plot(1:100, cumsum(as.numeric(temp.dt[2,])[-1][cumsum.test[[1]]]), type='l', ylim=c(0,250), xlab='Days a year', ylab='Precipitation (mm)')
lapply(1:length(pixel.first.season), function(i){
lines(1:100, cumsum(as.numeric(temp.dt[2,])[-1][cumsum.test[[i]]]), col=1)
})
lines(1:100, cumsum(as.numeric(temp.dt[2,])[-1][cumsum.test[[7]]]), col=2, lwd=5)
### Creating shapefiles for each county (DONE) ***
# df <- readOGR("D:/ToBackup/Modelling/_data/Kenya_counties","County")
# lapply(1:nrow(countyList), function(i)
# {
# library(rgdal)
# library(PBSmapping)
#
# subset <- df[df$COUNTY==countyList$County[[i]],]
# writeOGR(subset, "D:/ToBackup/Modelling/_data/Kenya_counties", countyList$County[[i]], driver="ESRI Shapefile")
# return('Done!\n')
# }); rm(df)
### Clipping rasters using python ***
# Process for one shapefile
# command <- 'python'
# output <- system2(command, args=c('D:/ToBackup/Modelling/_scritps/ExtractByMask_modified.py', # Python script
# '//dapadfs/data_cluster_4/observed/gridded_products/chirps', # Input folder
# 'D:/ToBackup/Modelling/_data/CHIRPS', # Output folder
# 'D:/ToBackup/Modelling/_data/Kenya_counties/County.shp', # Shapefile
# 'ALL'), stdout=TRUE)
#
# # Recent version of R
# lapply(1:nrow(countyList), function(i)
# {
# cat('Processing:', countyList$County[[i]], 'county\n')
# outFolder <- paste('//dapadfs/workspace_cluster_8/Kenya_KACCAL/data/CHIRPS/', gsub(pattern=' ', replacement='_', countyList$County[[i]]), sep='')
# ifelse(test=dir.exists(outFolder), yes=cat('Output folder exists\n'), no=dir.create(outFolder))
# return(cat(countyList$County[[i]], 'done\n'))
# })
#
# # Process for all shapefiles
# library(parallel)
# command <- 'python'
# mclapply2(1:nrow(countyList), function(i)
# {
# cat('Processing:', countyList$County[[i]], 'county\n')
# outFolder <- paste('//dapadfs/workspace_cluster_8/Kenya_KACCAL/data/CHIRPS/', gsub(pattern=' ', replacement='_', countyList$County[[i]]), sep='')
# output <- system2(command, args=c('//dapadfs/workspace_cluster_8/Kenya_KACCAL/scripts/ExtractByMask_modified.py', # Python script
# '//dapadfs/data_cluster_4/observed/gridded_products/chirps', # Input folder
# outFolder, # Output folder
# paste('//dapadfs/workspace_cluster_8/Kenya_KACCAL/data/Kenya_counties/', countyList$County[[i]], '.shp', sep=''), # Shapefile
# 'ALL'), stdout=TRUE)
# cat(countyList$County[[i]], 'done\n')
# })
|
d9431e44c4579bc2a2dab47128c1fd79baab2f51 | 4e1b31a95fc7adac9b4a13b5ba0fe16706db2dc3 | /Telecom Complaint Analysis/ComCast Descriptive Analysis.R | 38d29115082bdbff7e4bc120d99d767c7ec1c94d | [] | no_license | Ved-05/Data-Analysis | 8e7c83a3823d251a33d60c60f40a2f10297ac600 | 594ec29acb7b8554489dd3da07d80500e8264b5d | refs/heads/master | 2023-05-13T01:19:07.041445 | 2021-06-06T18:06:49 | 2021-06-06T18:06:49 | 368,075,217 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,422 | r | ComCast Descriptive Analysis.R | # Analysis for Comcast Telecom Consumer Complaints.
# Problem :- The firm has been providing terrible customer service.
# They continue to fall short despite repeated promises to improve.
# They need help to pin down what is wrong with Comcast's customer
# service.
library(lubridate)
library(ggplot2)
library(dplyr)
# Task - Import data into the R environment.
# Set a working directory where data is stored.
setwd(choose.dir())
Comcast_Complaints <- read.csv('Comcast Telecom Complaints data.csv',
sep = ',',
strip.white = TRUE,
stringsAsFactors = FALSE)
# Check table structure
str(Comcast_Complaints)
# The column names has some dots (..), rename the column names.
Comcast_Complaints <- rename(Comcast_Complaints,
Ticket_Id = Ticket..,
Customer_Complaint = Customer.Complaint,
Received_Via = Received.Via,
Zip_Code = Zip.code,
Representative = Filing.on.Behalf.of.Someone)
# Convert columns into data structures.
Comcast_Complaints$Received_Via <- as.factor(Comcast_Complaints$Received_Via)
Comcast_Complaints$City <- as.factor(Comcast_Complaints$City)
Comcast_Complaints$State <- as.factor(Comcast_Complaints$State)
Comcast_Complaints$Zip_Code <- as.factor(Comcast_Complaints$Zip_Code)
Comcast_Complaints$Status <- as.factor(Comcast_Complaints$Status)
Comcast_Complaints$Representative <- as.factor(Comcast_Complaints$Representative)
# Check table structure to be in-place to proceed further.
str(Comcast_Complaints)
# Date variable looks to be using different formats.
# Considering date as dd/mm/yy so convert format to make date consistent.
Comcast_Complaints$Date[grepl('/', Comcast_Complaints$Date)] <- sapply(
Comcast_Complaints$Date[grepl('/', Comcast_Complaints$Date)],
function(date) {
paste0(unlist(strsplit(date, '/'))[c(1,2,3)],
collapse = '-')
}, USE.NAMES = FALSE)
Comcast_Complaints$Date <- as.Date(Comcast_Complaints$Date, "%d-%m-%y")
# Check for empty values.
empty_values <- sum(is.na(Comcast_Complaints))
if(empty_values != 0) print(empty_values)
# Add a new categorical variable to categorize complaint type
# Domains of complaints
domains <- c(Charges = c("arge","price","hike","bill","saction","fun"),
Service = c("custom","serv"),
Network = c("network", "call", "signal"),
Usage= c("data","cap","usage"),
Internet = c("internet", "speed"),
Ethics = c("fraud","rac","mono", "not"))
complaint_type <- sapply(domains,
grepl,
Comcast_Complaints$Customer_Complaint,
ignore.case = TRUE)
complaint_type <- apply(complaint_type, 1, function(r) names(which(r)))
complaint_type <- sapply(complaint_type,
function(s) if (length(s) == 0) "Other"
else names(which.max(table(
unlist(strsplit(
gsub('[[:digit:]]+', '', s), " ")))))
)
Comcast_Complaints$Complaint_Domain <- as.factor(complaint_type)
# Check table structure
str(Comcast_Complaints)
# Create a new categorical variable with value as Open and
# Closed. Open & Pending is to be categorized as Open and Closed &
# Solved is to be categorized as Closed.
# Checks status and returns updated status as Open or Closed.
resolution <- function(status) {
status <- ifelse(status == "Solved" | status == "Closed",
"Closed", "Open")
return(as.factor(status))
}
# Add a new variable to categorize resolution status as Open/Closed.
Comcast_Complaints <- transform(Comcast_Complaints,
Resolution = resolution(Status))
# Check table structure
str(Comcast_Complaints)
# Task:- Provide the trend chart for the number of complaints at monthly
# and daily granularity levels.
Month_Wise_Data <- transform(Comcast_Complaints,
Month = month(Date, label = TRUE))
monthly_count <- table(Month_Wise_Data$Month)
daily_count <- table(Comcast_Complaints$Date)
# Check monthly mean and median data to know some details.
mean(monthly_count) # 185.3333
median(monthly_count) # 57
mean(daily_count) # 24.43956
median(daily_count) # 17
# Visualize month wise complaints
ggplot(as.data.frame(monthly_count),
aes(Var1, Freq, group=1, label=Freq)) +
geom_line(size=1.2) +
geom_point(size = 2) +
geom_text(nudge_y = 50) +
labs(y = "Number of complaints",
x="",
title = "Monthly granularity level") +
theme_minimal()
# Visualize date wise complaints
ggplot(as.data.frame(daily_count),
aes(Var1, Freq, group=1, label=Freq)) +
geom_line(size=1) +
geom_point(size = 2) +
geom_text(nudge_y = 5) +
labs(y = "Number of complaints",
x="",
title = "Daily granularity level") +
theme(axis.text.x = element_text(angle = 90, size = 6))
# Insights. Reason behind spike in June
Month_Wise_Data <- filter(Month_Wise_Data, Month == "Jun")
# Function to get insights by states on the sample under test.
sample_insights_by_state <- function(sample_df) {
sample_df <- group_by(sample_df, State, Complaint_Domain)
sample_df <- summarise(sample_df, Count = n())
sample_df <- group_by(sample_df, State) %>%
mutate(Distribution = round(Count*100/sum(Count), digits=0))
ggplot(sample_df,
aes(x= Count, y = reorder(State, Count),
fill = Complaint_Domain)) +
geom_bar(position = "stack", stat = "identity") +
geom_text(size=2.5, position = position_stack(vjust = 0.5),
aes(label = Distribution))+
scale_fill_brewer(palette="Paired") +
labs(x = "Number of complaints",
y = "",
title = paste("State wise complaint (percentages) ")) +
theme_minimal()
}
sample_insights_by_state(Month_Wise_Data)
# Task:- Provide a table with the frequency of complaint types.
# Which complaint types are maximum i.e., around internet, network
# issues, or across any other domains.
table(Comcast_Complaints$Complaint_Domain)
# Above table defines that maximum complaints are around any other
# domains (580) followed by Charges (527) and Internet(476).
# Task:- Provide state wise status of complaints in a stacked bar
# chart. Use the categorized variable from Q3.
# Create a new data frame which specifies resolution status by states
# in percentages.
Grouped_By_State <- group_by(Comcast_Complaints, State, Resolution)
Grouped_By_State <- summarise(Grouped_By_State, Count = n())
Grouped_By_State <- group_by(Grouped_By_State, State) %>%
mutate(Distribution = round(Count*100/sum(Count), digits=0))
ggplot(Grouped_By_State,
aes(x= Count, y = reorder(State, Count),
label = paste(Distribution, "% (", Count,")"),
fill = Resolution)) +
geom_bar(position = "stack", stat = "identity") +
geom_text(size = 3, position = position_stack(vjust = 0.8)) +
scale_fill_brewer(palette="Pastel1", direction = -1) +
labs(x = "Number of complaints", y = "",
title = "State wise complaint resolution") +
theme(legend.position = "top")
# Provide insights on:
# Which state has the maximum complaints
# Ans - Top 3 states as per graph are Georgia, Florida and California.
# Which state has the highest percentage of unresolved complaints
Grouped_by_Unresolved <- Grouped_By_State %>% filter(Resolution == "Open")
total_unresolved <- sum(Grouped_by_Unresolved$Count)
Grouped_by_Unresolved <- Grouped_by_Unresolved %>% mutate(
Distribution = round(Count*100/total_unresolved, digits=0))
head(arrange(Grouped_by_Unresolved, desc(Distribution)))
# Ans- Georgia(80) with 15%, California(61) with 12% and Tennessee(47) with 9%.
# Insights on open complaints w.r.t complaint domains
Grouped_by_Unresolved <- Comcast_Complaints %>% filter(Resolution == "Open")
sample_insights_by_state(Grouped_by_Unresolved)
ggplot(as.data.frame(table(Grouped_by_Unresolved$Complaint_Domain)),
aes(x = "", y = Freq, fill = Var1,
label = paste(round(Freq*100/sum(Freq), digits = 0), "%"))) +
geom_bar(stat = "identity", width = 1) +
geom_text(size=5, position = position_stack(vjust = 0.5),
show.legend = FALSE,
aes(label = paste(round(Freq*100/sum(Freq),
digits = 0), "%")))+
coord_polar("y", start = 0) +
scale_fill_brewer(palette="Paired", direction = -1) +
labs(x = "", y = "", fill = "Complaint Types",
title = "Domain distribution for unresolved complaints") +
theme_minimal()
# Internet, usage and charges related complaints are unresolved.
# This might be due to the lack of technical support from the company.
# Task :- Provide the percentage of complaints resolved till date,
# which were received through the Internet and customer care calls.
# Create a new data frame which specifies resolution status by
# received through the Internet and customer care calls in percentages.
Grouped_By_Type <- summarise(group_by(Comcast_Complaints,
Received_Via, Resolution), Count = n())
Grouped_By_Type <- group_by(Grouped_By_Type, Received_Via) %>%
mutate(Distribution = round(Count*100/sum(Count), digits=0))
# Visualize data.
ggplot(Grouped_By_Type,
aes(x = Received_Via, y = Count, fill = Resolution,
label = paste(Distribution, "%"))) +
geom_bar(position = "stack", stat = "identity", width = 0.5) +
geom_text(size = 4, position = position_stack(vjust = 0.5)) +
scale_fill_brewer(palette="Pastel1", direction = -1) +
labs(x = "", y = "", title = "Complaint Resolution Status") +
theme_minimal()
# The percentage of complaints resolved via. Customer Care Calls are
# found to be slightly better as compared to the Internet. |
9452bbf4896fd7302318ee3c5f32b8bd2c8e8c62 | 08143898e6f093d699035414b57fb9cad8e9baee | /ui.R | 5ea4745f16c4bf749bc42a75fbe0c85ed68cf374 | [] | no_license | margiehertneck/HighAltBaking | 7ae0e8c5e40e0a3af42d96a844b167e86d95dc49 | ce1aab5c08d929db92fe24239075368a27c24d1d | refs/heads/master | 2018-12-31T09:47:16.029180 | 2015-06-08T23:28:34 | 2015-06-08T23:28:34 | 37,022,378 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,045 | r | ui.R | #### ui.R for High Altitude Baking
# Recommendations for adjustments come from
# http://www.kingarthurflour.com/recipe/high-altitude-baking.html
# load shiny library
library(shiny)
# Set up header and input fields in sidebar
shinyUI(fluidPage(
titlePanel("Adjustments to Time and Temperature for High Altitude Baking"),
br(),
sidebarLayout(
sidebarPanel(
h4('If you bake at an elevation of 3000 feet or more,
answer the questions below using the information in your recipe.'),
h4('The adjusted time and temperature you should use will be on the right.'),
br(),
radioButtons("chocolate", "Is this a chocolate or delicate batter?",
c("Yes" = "1",
"No" = "2"), "2"),
numericInput('minutes', 'Enter bake time (in minutes):', "60"),
numericInput('temp', 'Enter bake temperature (in Fahrenheit degrees):', "325"),
helpText('IMPORTANT: This app is in beta. Use at your own risk.')
),
# set up text in Main Panel (output)
mainPanel(
h3('Make the following changes:'),
br(),
#verbatimTextOutput("tempfunc") ,
#verbatimTextOutput("timefunc") ,
h4(textOutput("tempfunc")),
h4(textOutput("timefunc")),
br(),
p('I hope you found this high altitude baking calculator helpful. These recommendations are based on those found at the King Arthur Flour website.'),
# create a reference line to source material
tags$div(class="header", checked=NA,
tags$p("For more information:"),
tags$a(href="http://www.kingarthurflour.com/recipe/high-altitude-baking.html", "King Arthur Flour")
)
)
)
)
) |
e0ebb49ff44a842f8eabf867a43b3ebe363e7b26 | f93f30c1735d38d48ddfe21d997ce01791896ad7 | /man/algo_leverage.Rd | 29afca90d48241a16d28d007c28f87206ac6c0d7 | [
"MIT"
] | permissive | stevebroll/stsci6520hw2 | 58f1d6d3cec32483414f4e1b2d5bec253ab6e21c | 93464f6e6a486b3c500ecab838af9e2c91709a25 | refs/heads/master | 2023-05-04T20:32:34.298506 | 2021-05-26T01:10:49 | 2021-05-26T01:10:49 | 362,645,513 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,556 | rd | algo_leverage.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/algo_leverage.R
\name{algo_leverage}
\alias{algo_leverage}
\title{Algorithmic Leveraging}
\usage{
algo_leverage(x, y, subset_size, num_sample = 500, method = "both")
}
\arguments{
\item{x}{Predictor matrix x, with n rows and p columns}
\item{y}{Response vector y, with n elements}
\item{subset_size}{Size of subset(s) on which regression model(s) will be fit}
\item{num_sample}{Number of subsets to take when method = 'both'}
\item{method}{One of 'both', 'uniform', or 'leverage'. 'both' will produce
summary outputs for both uniform and leverage sampling over num_sample
subsets, while 'uniform' and 'leverage' output one draw of estimated Betas
for uniform and leverage sampling, respectively.}
}
\value{
When method = 'both' and X has 1 column, boxplots will be produced
showing the distribution of betas from models fit on samples drawn using both
uniform and leverage sampling. When method = 'both' and X has more than 1
column, a line graph will compare the average values of each Beta for both
uniform and leverage sampling. When method = 'uniform' or 'leverage', the
model will be fit for one sample using uniform or leverage sampling,
respectively, and the Beta values will be returned.
}
\description{
algo_leverage fits linear regression models on subsets of data sampled by
both uniform and leverage sampling
}
\examples{
x = rt(500, 6)
y = -x + rnorm(500)
algo_leverage(x, y, subset_size = 100, num_sample = 500)
algo_leverage(x, y, 100, method = 'leverage')
}
|
f7bf7530bf5b4f22e5d509220f604f5ca416d098 | 3591ec3e8c3f969a166652aa1b9ef84ba6a9d720 | /tests/testthat/test-main.R | 5d329957d99823b13ea81f23e9c5e3dcf5956a24 | [] | no_license | M-E-Rademaker/cSEM.DGP | 837eafd53a3b17a28070309c472417d37fd9bc46 | aefd1600cdfa5385fef9a3d5025f88a3dcc7d22a | refs/heads/master | 2023-08-07T22:29:51.237284 | 2020-02-05T15:53:43 | 2020-02-05T15:53:43 | 208,060,390 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,615 | r | test-main.R | ################################################################################
#
# Purpose: central file to be sourced to several test-xxx.R files
#
################################################################################
## What is allowed:
#
# A maximum of 5 exogenous constructs is allowed if a structural model is supplied;
# otherwise an unlimited number is possible as long as all construct correlations
# are supplied by the user:
# 1. If there is 1 exogenous construct : a maximum of 7 endogenous constructs is allowed
# 2. If there are 2 exogenous constructs: a maximum of 6 endogenous constructs is allowed
# 3. If there are 3 exogenous constructs: a maximum of 5 endogenous constructs is allowed
# 4. If there are 4 exogenous constructs: a maximum of 4 endogenous constructs is allowed
# 5. If there are 5 exogenous constructs: a maximum of 4 endogenous constructs is allowed
#
## Function to compare estimates vs population parameters
comparecSEM <- function(.object, .what, .pop_parameters) {
# .object: cSEM object
# .what: what to compare
# .pop_parameters: a vector of population values
x <- cSEM::summarize(.object)
if(inherits(.object, "cSEMResults_2ndorder")) {
x1 <- x$First_stage$Estimates
x2 <- x$Second_stage$Estimates
} else {
x1 <- NULL
x2 <- x$Estimates
}
if(.what == "Path_estimates") {
est <- x2$Path_estimates[, c("Name", "Estimate")]
} else if(.what == "Loading_estimates") {
# Only for common factors
est <- rbind(x1$Loading_estimates[x1$Loading_estimates$Construct_type == "Common factor",
c("Name", "Estimate")],
x2$Loading_estimates[x2$Loading_estimates$Construct_type == "Common factor",
c("Name", "Estimate")])
} else if(.what == "Weight_estimates") {
## Compare only weights for composites, since only those weights are
## specified when creating the DGP
x1$Weight_estimates
est <- rbind(
x1$Weight_estimates[x1$Weight_estimates$Construct_type == "Composite",
c("Name", "Estimate")],
x2$Weight_estimates[x2$Weight_estimates$Construct_type == "Composite",
c("Name", "Estimate")])
} else if(.what == "Residual_correlation") {
est <- x2$Residual_correlation[, c("Name", "Estimate")]
} else if(.what == "Indicator_correlation") {
est <- x2$Indicator_correlation[, c("Name", "Estimate")]
} else if(.what == "Exo_construct_correlation") {
est <- x2$Exo_construct_correlation[, c("Name", "Estimate")]
} else {
stop("Error")
}
df <- data.frame(est,
"Pop_value" = unname(.pop_parameters),
stringsAsFactors = FALSE)
df
}
#===============================================================================
### Some data generating processes that cSEM.DGP can handle --------------------
#===============================================================================
### 1 exogenous construct and <= 7 endogenous constructs------------------------
dgp_1exo_ok <- " # less than 7 endogenous; random order to check ordering
eta1 ~ 0.5*xi1
eta2 ~ 0.4*eta1 + 0.3*xi1
eta4 ~ 0.4*eta1 + 0.3*eta2 + 0.2*eta3
eta3 ~ 0.4*eta2
eta4 =~ 0.7*y41 + 0.8*y42 + 0.6*y43
eta1 =~ 0.7*y11 + 0.8*y12
eta2 =~ 0.7*y21 + 0.8*y22
eta3 <~ 2*y31 + 1*y32
xi1 =~ 0.7*x1 + 0.8*x2
y42 ~~ 0.2*y43
y31 ~~ 0.35*y32
"
model_1exo_ok <- " # less than 7 endogenous; random order to check ordering
eta1 ~ xi1
eta2 ~ eta1 + xi1
eta4 ~ eta1 + eta2 + eta3
eta3 ~ eta2
eta4 =~ y41 + y42 + y43
eta1 =~ y11 + y12
eta2 =~ y21 + y22
eta3 <~ y31 + y32
xi1 =~ x1 + x2
y42 ~~ y43
"
pop_paths <- c(0.5, 0.3, 0.4, 0.4, 0.4, 0.3, 0.2)
pop_loadings <- c(0.7, 0.8, 0.7, 0.8, 0.7, 0.8, 0.7, 0.8, 0.6)
w_unscaled <- c(2, 1)
pop_indicator_cor <- c(0.35)
Sigma_jj <- matrix(c(1, 0.35, 0.35, 1), nrow = length(w_unscaled))
pop_weights <- w_unscaled / c(sqrt(w_unscaled %*% Sigma_jj %*% w_unscaled))
pop_residual_cor <- c(0.2)
dat_1exo <- generateData(dgp_1exo_ok, .empirical = TRUE)
out <- csem(dat_1exo, model_1exo_ok)
path <- comparecSEM(out, .what = "Path_estimates", pop_paths)
loadings <- comparecSEM(out, .what = "Loading_estimates", pop_loadings)
weights <- comparecSEM(out, .what = "Weight_estimates", pop_weights)
residual_cor <- comparecSEM(out, .what = "Residual_correlation", pop_residual_cor)
indicator_cor <- comparecSEM(out, .what = "Indicator_correlation", pop_indicator_cor)
test_that("1 exo construct; <= 7 endo constructs; random order in model; works", {
expect_equal(path$Estimate, path$Pop_value)
expect_equal(loadings$Estimate, loadings$Pop_value)
expect_equal(weights$Estimate, weights$Pop_value)
expect_equal(residual_cor$Estimate, residual_cor$Pop_value)
expect_equal(indicator_cor$Estimate, indicator_cor$Pop_value)
})
### 2 exogenous construct and <= 5 endogenous constructs------------------------
dgp_3exo_ok <- " # less than 5 endogenous; random order to check ordering
eta2 ~ 0.4*eta1 + 0.3*xi1
eta1 ~ 0.5*xi1 + 0.4*xi2 + 0.25*xi3
eta3 ~ 0.4*eta2
eta1 =~ 0.7*y11 + 0.8*y12 + 0.7*y13 + 0.5*y14
eta2 =~ 0.7*y21 + 0.8*y22
eta3 =~ 0.7*y31 + 0.75*y32
xi1 =~ 0.7*x11 + 0.8*x12
xi2 =~ 0.7*x21 + 0.8*x22
xi3 =~ 0.7*x31 + 0.8*x32
y11 ~~ 0.2*y14
y11 ~~ 0.1*y13
y13 ~~ 0.3*y14
xi1 ~~ 0.5*xi2
xi3 ~~ 0.3*xi2
xi1 ~~ 0.4*xi3
"
pop_paths <- c(0.5, 0.4, 0.25, 0.3, 0.4, 0.4)
pop_loadings <- c(0.7, 0.8, 0.7, 0.8, 0.7, 0.8, 0.7, 0.8, 0.7, 0.5, 0.7, 0.8, 0.7, 0.75)
pop_residual_cor <- c(0.1, 0.2, 0.3)
pop_exo_construct_cor <- c(0.5, 0.4, 0.3)
dat_3exo <- generateData(dgp_3exo_ok, .empirical = TRUE)
out <- csem(dat_3exo, dgp_3exo_ok)
path <- comparecSEM(out, .what = "Path_estimates", pop_paths)
loadings <- comparecSEM(out, .what = "Loading_estimates", pop_loadings)
residual_cor <- comparecSEM(out, .what = "Residual_correlation", pop_residual_cor)
exo_construct_cor <- comparecSEM(out, .what = "Exo_construct_correlation", pop_exo_construct_cor)
test_that("1 exo construct; <= 7 endo constructs; random order in model; works", {
expect_equal(path$Estimate, path$Pop_value)
expect_equal(loadings$Estimate, loadings$Pop_value)
expect_equal(residual_cor$Estimate, residual_cor$Pop_value)
expect_equal(exo_construct_cor$Estimate, exo_construct_cor$Pop_value)
})
### 3 exogenous constructs and 3 endogenous; 1 second-order --------------------
dgp2ndorder <- "
# Structural model
eta1 ~ 0.5*xi1 + 0.4*xi2 + 0.4*xi3
eta2 ~ 0.5*eta1
eta3 ~ 0.4*eta2 + 0.6*xi2
# Measurement model
xi1 =~ 0.7*x11 + 0.8*x12
xi2 =~ 0.7*x21 + 0.8*x22
xi3 =~ 0.7*x31 + 0.8*x32
c1 =~ 0.6*y41 + 0.85*y42
c2 =~ 0.6*y51 + 0.85*y52
eta3 =~ 0.8*y21 + 0.8*y22
c3 =~ 0.6*y61 + 0.85*y62
eta1 =~ 0.6*y11 + 0.7*y12 + 0.8*y13
# Exogenous construct correlation
xi1 ~~ 0.3*xi2
xi3 ~~ 0.3*xi2
xi1 ~~ 0.4*xi3
# 2nd order specification
eta2 =~ 0.4*c1 + 0.6*c2 + 0.2*c3
"
model_2ndorder <- "
# Structural model
eta1 ~ xi1 + xi2 + xi3
eta2 ~ eta1
eta3 ~ eta2 + xi2
# Measurement model
xi1 =~ x11 + x12
xi2 =~ x21 + x22
xi3 =~ x31 + x32
c1 =~ y41 + y42
c2 =~ y51 + y52
eta3 =~ y21 + y22
c3 =~ y61 + y62
eta1 =~ y11 + y12 + y13
# 2nd order specification
eta2 =~ c1 + c2 + c3
"
pop_paths <- c(0.5, 0.4, 0.4, 0.5, 0.6, 0.4)
pop_loadings <- c(0.7, 0.8, 0.7, 0.8, 0.7, 0.8, 0.6, 0.85, 0.6, 0.85, 0.6,
0.85, 0.6, 0.7, 0.8, 0.8, 0.8, 0.4, 0.6, 0.2)
pop_exo_construct_cor <- c(0.3, 0.4, 0.3)
dat_2ndorder <- generateData(dgp2ndorder, .empirical = TRUE)
out <- csem(dat_2ndorder, model_2ndorder)
path <- comparecSEM(out, .what = "Path_estimates", pop_paths)
loadings <- comparecSEM(out, .what = "Loading_estimates", pop_loadings)
exo_construct_cor <- comparecSEM(out, .what = "Exo_construct_correlation", pop_exo_construct_cor)
test_that("3 exo constructs; <= 7 endo constructs; 2ndorder; works", {
expect_equal(path$Estimate, path$Pop_value)
expect_equal(loadings$Estimate, loadings$Pop_value)
expect_equal(exo_construct_cor$Estimate, exo_construct_cor$Pop_value)
})
### 4 Construct correlations are supplied instead of a structural model --------
dgp_only_correlations <- "
# Construct correlations
EXPE ~~ 0.3*IMAG
# Composite model
EXPE =~ 0.7*expe1 + 0.8*expe2
IMAG <~ 0.7*imag1 + 0.9*imag2
# Indicator correlation
imag1 ~~ 0.4*imag2
"
pop_loadings <- c(0.7, 0.8, 0.7, 0.8)
pop_construct_cor <- 0.3
pop_indicator_cor <- 0.4
dat_only_correlations <- generateData(dgp_only_correlations, .empirical = TRUE)
out <- csem(dat_only_correlations, dgp_only_correlations,
.PLS_weight_scheme_inner = "centroid")
loadings <- comparecSEM(out, .what = "Loading_estimates", pop_loadings)
construct_cor <- comparecSEM(out, .what = "Exo_construct_correlation", pop_construct_cor)
indicator_cor <- comparecSEM(out, .what = "Indicator_correlation", pop_indicator_cor)
test_that("Correlation instead of structural model works", {
expect_equal(loadings$Estimate, loadings$Pop_value)
expect_equal(construct_cor$Estimate, construct_cor$Pop_value)
expect_equal(indicator_cor$Estimate, indicator_cor$Pop_value)
})
#===============================================================================
### Data generating processes that cSEM.DGP can not handle ---------------------
#===============================================================================
### 1 Exogenous construct and > 7 endogenous constructs ------------------------
dgp_1exo_error <- " # more than 7 endogenous constructs
eta1 ~ 0.4*xi1
eta2 ~ 0.4*eta1 + 0.4*xi1
eta3 ~ 0.4*eta2 + 0.3*eta1
eta4 ~ 0.4*eta1 + 0.2*eta2 + 0.4*eta3
eta5 ~ 0.4*eta4
eta6 ~ 0.4*eta4 + 0.2*eta5
eta7 ~ 0.4*eta6
eta8 ~ 0.4*eta7
xi1 =~ 0.7*x1 + 0.8*x2
eta1 =~ 0.7*y11 + 0.8*y12
eta2 =~ 0.7*y21 + 0.8*y22
eta3 =~ 0.7*y31 + 0.8*y32
eta4 =~ 0.7*y41 + 0.8*y42
eta5 =~ 0.7*y51 + 0.8*y52
eta6 =~ 0.7*y61 + 0.8*y62
eta7 =~ 0.7*y71 + 0.8*y72
eta8 =~ 0.7*y81 + 0.8*y82
"
test_that("1 exo construct; > 7 endo constructs; fails",
expect_error(generateData(dgp_1exo_error)))
### More than 5 exogenous construct --------------------------------------------
dgp_5exo_error <- " # more than 5 exogenous
eta1 ~ 0.4*xi1 + 0.4*xi2 + 0.4*xi3 + 0.4*xi4
eta2 ~ 0.4*eta1 + 0.4*xi5 + 0.5*xi6
xi1 =~ 0.7*x1 + 0.8*x2
xi2 =~ 0.7*y11 + 0.8*y12
xi3 =~ 0.7*y21 + 0.8*y22
xi4 =~ 0.7*y31 + 0.8*y32
xi5 =~ 0.7*y41 + 0.8*y42
xi6 =~ 0.7*y51 + 0.8*y52
eta1 =~ 0.7*y71 + 0.8*y72
eta2 =~ 0.7*y81 + 0.8*y82
"
test_that("1 exo construct; > 7 endo constructs; fails",
expect_error(generateData(dgp_5exo_error)))
|
e93207c36dc1ef3d58076f8cd9a43365870b8f3c | 85979b19f7cc84ff4c55d6af0a25a68ee4b7b2d8 | /COMPARISON_DAILY_Ground_HIS_Time_series_00_04.R | ac5dd9c9c801c9f017f220aebadc8fef8822e7cd | [] | no_license | karafede/amal_scripts | c7b57ea99624e6a8157357471263200ca2b68747 | 44b06eb9f805cc81349eb2460241f8cada91dd96 | refs/heads/master | 2018-10-13T02:25:57.230891 | 2018-07-26T05:44:51 | 2018-07-26T05:44:51 | 110,553,054 | 0 | 0 | null | null | null | null | WINDOWS-1252 | R | false | false | 44,056 | r | COMPARISON_DAILY_Ground_HIS_Time_series_00_04.R |
library(readr)
library(dplyr)
library(lubridate)
library(raster)
library(rgdal)
library(NISTunits)
library(stringr)
library(reshape2)
setwd("Z:/Data_analysis")
# merge extracted WRF-data HISTORICAL with ground observations 2000-2004--------------------------------------------
####################################################################################################
############################# Extract HISTORICAL Variables #########################################
####################################################################################################
extracted_WRF_HIS_Rain <- read.csv("Z:/Data_analysis/HIS_tif/extracted_WRF_Rain_2000_2004.csv")
extracted_WRF_HIS_RH <- read.csv("Z:/Data_analysis/HIS_tif/extracted_WRF_RH_2000_2004.csv")
extracted_WRF_HIS_SWD <- read.csv("Z:/Data_analysis/HIS_tif/extracted_WRF_SWD_2000_2004.csv")
extracted_WRF_HIS_Temp <- read.csv("Z:/Data_analysis/HIS_tif/extracted_WRF_Temp_2000_2004.csv")
extracted_WRF_HIS_WS <- read.csv("Z:/Data_analysis/HIS_tif/extracted_WRF_WS_2000_2004.csv")
all_WRF_data_HIS <- extracted_WRF_HIS_Rain %>%
left_join(extracted_WRF_HIS_RH, by = c("station", "DateTime"))
all_WRF_data_HIS <- all_WRF_data_HIS %>%
left_join(extracted_WRF_HIS_SWD, by = c("station", "DateTime"))
all_WRF_data_HIS <- all_WRF_data_HIS %>%
left_join(extracted_WRF_HIS_Temp, by = c("station", "DateTime"))
all_WRF_data_HIS <- all_WRF_data_HIS %>%
left_join(extracted_WRF_HIS_WS, by = c("station", "DateTime"))
all_WRF_data_HIS <- all_WRF_data_HIS %>%
dplyr::select(DateTime,
WRF_Rain_2000_2004,
WRF_RH_2000_2004,
WRF_SWD_2000_2004,
WRF_Temp_2000_2004,
WRF_WS_2000_2004,
station)
write.csv(all_WRF_data_HIS, "Z:/Data_analysis/HIS_tif/WRF_HIS_time_series.csv") #hourly climate readings
all_WRF_data_HIS <- all_WRF_data_HIS %>%
mutate(Date = date(DateTime))
all_WRF_data_HIS_DAILY <- all_WRF_data_HIS %>%
group_by(Date,
station) %>%
summarize(WS_WRF_AVG = mean(WRF_WS_2000_2004),
TEMP_WRF_AVG = mean(WRF_Temp_2000_2004),
RH_WRF_AVG = mean(WRF_RH_2000_2004),
RAIN_WRF = sum(WRF_Rain_2000_2004),
SWD_WRF_AVG = mean(WRF_SWD_2000_2004))
write.csv(all_WRF_data_HIS_DAILY, "Z:/Data_analysis/HIS_tif/WRF_HIS_time_series_DAILY.csv") #hourly climate readings
#################################################################################
############## Ground observations ##############################################
#################################################################################
GROUND_Mezaira_all <- read.csv("Z:/Data_analysis/Ground_Observations/Final/Mezaira03_05.csv") #RH, Temp, WS not rainfall
GROUND_AbuDhabi_all <- read.csv("Z:/Data_analysis/Ground_Observations/Final/AbuDhabi00_05.csv")
GROUND_AlAin_all <- read.csv("Z:/Data_analysis/Ground_Observations/Final/AlAin00_04.csv")
GROUND_Mezaira_Rainfall <- read.csv("Z:/Data_analysis/Ground_Observations/Final/Mezaira_Rainfall_03_04.csv")
GROUND_AD_AlAin_Rainfall <- read.csv("Z:/Data_analysis/Ground_Observations/Final/Rainfall_AD_Ain_00_04.csv")
############## Join and Unify Ground observations for Mezaira ###########################
#########################################################################################
GROUND_Mezaira_all <- GROUND_Mezaira_all %>% #all climate variables excluding rain
mutate(DateTime = mdy_hm(DateTime))
GROUND_Mezaira_all <- GROUND_Mezaira_all %>%
mutate(Date = date(DateTime))
GROUND_Mezaira_all_DAILY <- GROUND_Mezaira_all %>%
group_by(Date) %>%
summarize(WS_AVG = mean(WS),
TEMP_AVG = mean(Temp),
RH_AVG = mean(RH),
MAX_WS = max(WS),
MAX_TEMP = max(Temp),
MAX_RH = max(RH),
MIN_WS = min(WS),
MIN_RH = min(RH),
MIN_TEMP = min(Temp))
GROUND_Mezaira_Rainfall$Date <- paste0(GROUND_Mezaira_Rainfall$Year, "-", GROUND_Mezaira_Rainfall$Month, "-",
GROUND_Mezaira_Rainfall$Day)
GROUND_Mezaira_Rainfall <- GROUND_Mezaira_Rainfall %>%
mutate(Date = ymd(Date))
Ground_Mezaira_ALL_rain <- GROUND_Mezaira_all_DAILY %>% #all climate variables including rain
left_join(GROUND_Mezaira_Rainfall, by = c("Date"))
Ground_Mezaira_ALL_rain <- Ground_Mezaira_ALL_rain %>%
dplyr::select(-Year,
- Month,
- Day)
Ground_Mezaira_ALL_rain$station <- "MEZAIRA"
############## Join and Unify Ground observations for Abu Dhabi##########################
#########################################################################################
GROUND_AbuDhabi_all <- GROUND_AbuDhabi_all %>%
mutate(DateTime = mdy_hm(DateTime))
GROUND_AbuDhabi_all <- GROUND_AbuDhabi_all %>%
mutate(Date = date(DateTime))
GROUND_AbuDhabi_all_DAILY <- GROUND_AbuDhabi_all %>%
group_by(Date) %>%
summarize(WS_AVG = mean(WS),
TEMP_AVG = mean(Temp),
RH_AVG = mean(RH),
MAX_WS = max(WS),
MAX_TEMP = max(Temp),
MAX_RH = max(RH),
MIN_WS = min(WS),
MIN_RH = min(RH),
MIN_TEMP = min(Temp))
GROUND_AD_AlAin_Rainfall$Date <- paste0(GROUND_AD_AlAin_Rainfall$Year, "-", GROUND_AD_AlAin_Rainfall$Month, "-",
GROUND_AD_AlAin_Rainfall$Day)
GROUND_AD_AlAin_Rainfall <- GROUND_AD_AlAin_Rainfall %>%
mutate(Date = ymd(Date))
GROUND_AbuDhabi_ALL_rain <- GROUND_AbuDhabi_all_DAILY %>% #all climate variables including rain
left_join(GROUND_AD_AlAin_Rainfall, by = c("Date"))
GROUND_AbuDhabi_ALL_rain <- GROUND_AbuDhabi_ALL_rain %>%
dplyr::select(-Year,
- Month,
- Day,
- Al.Ain)
names(GROUND_AbuDhabi_ALL_rain)[names(GROUND_AbuDhabi_ALL_rain) == 'Abu.Dhabi'] <- 'Rain'
GROUND_AbuDhabi_ALL_rain$station <- "ABU DHABI INTL"
############## Join and Unify Ground observations for Al Ain ##########################
#########################################################################################
GROUND_AlAin_all <- GROUND_AlAin_all %>%
mutate(DateTime = mdy_hm(DateTime))
GROUND_AlAin_all <- GROUND_AlAin_all %>%
mutate(Date = date(DateTime))
GROUND_AlAin_all_DAILY <- GROUND_AlAin_all %>%
group_by(Date) %>%
summarize(WS_AVG = mean(WS),
TEMP_AVG = mean(Temp),
RH_AVG = mean(RH),
MAX_WS = max(WS),
MAX_TEMP = max(Temp),
MAX_RH = max(RH),
MIN_WS = min(WS),
MIN_RH = min(RH),
MIN_TEMP = min(Temp))
GROUND_AD_AlAin_Rainfall$Date <- paste0(GROUND_AD_AlAin_Rainfall$Year, "-", GROUND_AD_AlAin_Rainfall$Month, "-",
GROUND_AD_AlAin_Rainfall$Day)
GROUND_AD_AlAin_Rainfall <- GROUND_AD_AlAin_Rainfall %>%
mutate(Date = ymd(Date))
GROUND_AlAin_All_rain <- GROUND_AlAin_all_DAILY %>% #all climate variables including rain
left_join(GROUND_AD_AlAin_Rainfall, by = c("Date"))
GROUND_AlAin_All_rain <- GROUND_AlAin_All_rain %>%
dplyr::select(-Year,
- Month,
- Day,
- Abu.Dhabi)
names(GROUND_AlAin_All_rain)[names(GROUND_AlAin_All_rain) == 'Al.Ain'] <- 'Rain'
GROUND_AlAin_All_rain$station <- "AL AIN INTL"
################################################################################
################################################################################
# bind all the data-sets
GROUND_ALL_DAILY <- rbind(GROUND_AlAin_All_rain,
GROUND_AbuDhabi_ALL_rain,
Ground_Mezaira_ALL_rain)
write.csv(GROUND_ALL_DAILY, "Z:/Data_analysis/HIS_tif/ALL_GROUND_OBSERVATION_DAILY.csv")
##########################################################################################################
######### JOINING WRF HIS runs with the Ground Observations data #########################################
##########################################################################################################
# load HISTORICAL WRF runs and GROUND OBSERVATIONS
WRF_HIS_DAILY <- read.csv("Z:/Data_analysis/HIS_tif/WRF_HIS_time_series_DAILY.csv")
GROUND_ALL_DAILY <- read.csv("Z:/Data_analysis/HIS_tif/ALL_GROUND_OBSERVATION_DAILY.csv")
str(WRF_HIS_DAILY)
str(GROUND_ALL_DAILY)
WRF_HIS_DAILY$Date <- ymd(WRF_HIS_DAILY$Date)
GROUND_ALL_DAILY$Date <- ymd(GROUND_ALL_DAILY$Date)
# join WRF HISTORICAL with GROUND OBSERVATIONS
WRF_HIS_OBSERVATIONS <- WRF_HIS_DAILY %>%
left_join(GROUND_ALL_DAILY, by = c("Date", "station"))
write.csv(WRF_HIS_OBSERVATIONS, "Z:/Data_analysis/HIS_tif/ALL_GROUND_WRF_HIS_DAILY.csv")
##############################################################
##############################################################
##############################################################
##############################################################
##############################################################
################ TEMPERATURE Time-Series ####################
##############################################################
WRF_HIS_OBSERVATIONS <- read.csv("Z:/Data_analysis/HIS_tif/ALL_GROUND_WRF_HIS_DAILY.csv")
str(WRF_HIS_OBSERVATIONS)
WRF_HIS_OBSERVATIONS$Date <- ymd(WRF_HIS_OBSERVATIONS$Date)
WRF_HIS_OBSERVATIONS <- WRF_HIS_OBSERVATIONS %>%
filter(TEMP_WRF_AVG > 0)
WRF_HIS_OBSERVATIONS$Date <- as.POSIXct(WRF_HIS_OBSERVATIONS$Date)
library(ggplot2)
library(scales)
plot <- ggplot(WRF_HIS_OBSERVATIONS, aes(Date, TEMP_WRF_AVG)) +
theme_bw() +
geom_line(aes(y = TEMP_WRF_AVG, col = "TEMP_WRF_AVG"), alpha=1, col="blue") +
geom_line(aes(y = TEMP_AVG, col = "TEMP_AVG"), alpha=1, col="red") +
scale_color_discrete(name = "Y series", labels = c("TEMP_WRF_AVG", "TEMP_AVG")) +
stat_smooth(method = "lm") +
# geom_smooth(method="lm", aes(y = TEMP_AVG, col = "TEMP_AVG"), formula = y ~ poly(x, 26), size = 1, fill = "yellow", col = "black") +
facet_wrap(~ station) +
theme(strip.text = element_text(size = 10)) +
ylab(expression(paste("Daily Average Temperature ", " (", ~degree~C, ")"))) +
theme(axis.title.x=element_blank(),
axis.text.x = element_text(angle=90, vjust=0.5, hjust = 0.5, size=9, colour = "black", face="bold")) +
theme(axis.title.y = element_text(face="bold", colour="black", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=7, colour = "black")) +
ylim(10, 45) +
scale_x_datetime(breaks = date_breaks("1 year"), labels = date_format("%Y"))
plot
#### save plot ###############################################################
##############################################################################
output_folder <- "Z:/Data_analysis/plots_comparison/"
png(paste0(output_folder,"TREND_Temp_GROUND_HIS_Time_series_comparison.png"), width = 1800, height = 900,
units = "px", pointsize = 50,
bg = "white", res = 200)
print(plot)
dev.off()
#############################################################################
# make an average of all daily concentrations over the last 5 years (seasonality)
OBSERVATIONS_mean <- WRF_HIS_OBSERVATIONS %>%
mutate(year = year(Date),
month = month(Date),
day = day(Date)) %>%
group_by(month,
day,
station) %>%
summarise(daily_SEASONALITY_TEMP_mean = mean(TEMP_AVG, na.rm = TRUE))
seasonality_means <- rbind(OBSERVATIONS_mean[1:1083,],
OBSERVATIONS_mean[1:1092,],
OBSERVATIONS_mean[1:1089,],
OBSERVATIONS_mean[1:1089,],
OBSERVATIONS_mean[1:1092,])
seasonality_means <- as.data.frame(seasonality_means)
WRF_HIS_OBSERVATIONS <- cbind(WRF_HIS_OBSERVATIONS, seasonality_means)
######################################################
# ### fit function and label for Ground Observation ---------
#### this funtion FORCE regression to pass through the origin
######################################################
lm_eqn <- function(df){
m <- lm(WRF_HIS_OBSERVATIONS ~ -1 + seasonality_means, df);
eq <- substitute(italic(y) == b %.% italic(x)*","~~italic(r)^2~"="~r2,
list(b = format(coef(m)[1], digits = 2),
r2 = format(summary(m)$r.squared, digits = 3)))
as.character(as.expression(eq));
}
## this function includes the intercept~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#####################################################################################
# plot with regression line-----
# # jpeg('Z:/Data_analysis/plots_comparison/Ground_vs_HIS_WRF.jpg',
# jpeg('Z:/Data_analysis/plots_comparison/Ground_vs_HIS_WRF.jpg',
# quality = 100, bg = "white", res = 200, width = 7, height = 7, units = "in")
# par(mar=c(4, 10, 9, 2) + 0.3)
# oldpar <- par(las=1)
#
#
# # define regression equation for each season
# eq <- ddply(seasonality_means, .(OBSERVATIONS_mean),lm_eqn)
#
#
# # ggplot(PM25_AOD, aes(x=Val_AOD, y=Val_PM25, color = season)) +
# ggplot(PM25_AOD, aes(x=Val_AOD, y=Val_PM25)) +
# theme_bw() +
# # geom_point(size = 2) +
# geom_jitter(colour=alpha("black",0.15) ) +
# facet_grid(season ~ .) +
# theme( strip.text = element_text(size = 20)) +
# scale_color_manual(values = c("#ff0000", "#0000ff", "#000000", "#ffb732")) +
# # geom_smooth(method="lm") + # Add linear regression line
# geom_smooth(method = "lm", formula = y ~ -1 + x) + # force fit through the origin
# ylab(expression(paste(PM[2.5], " (µg/",m^3, ")"))) +
# xlab(expression("AOD (MODIS)")) +
# ylim(c(0,200)) +
# xlim(c(0.25,2)) +
# theme(axis.title.y = element_text(face="bold", colour="black", size=15),
# axis.text.y = element_text(angle=0, vjust=0.5, size=15)) +
# theme(axis.title.x = element_text(face="bold", colour="black", size=15),
# axis.text.x = element_text(angle=0, vjust=0.5, size=15)) +
#
# geom_text(data = eq, aes(x = 1.7, y = 20, label = V1),
# parse = TRUE, inherit.aes=FALSE, size = 5, color = "black" ) +
# facet_grid(season ~.)
#
# # geom_text(x = 1.5, y = 55, label = lm_eqn(PM25_AOD),
# # size = 5,
# # color = "red",
# # parse = TRUE)
#
#
# par(oldpar)
# dev.off()
#
#
#
# fit <- lm(Val_PM25 ~ -1 + Val_AOD, PM25_AOD)
# summary(fit)
###############################################################
##############################################################
######################################################
# fit the smoothing curve for the seasonality data ###
######################################################
AAA <- WRF_HIS_OBSERVATIONS
AAA$Date <- as.numeric(AAA$Date) # need Date to be numeric
fit <- lm(formula = AAA$daily_SEASONALITY_TEMP_mean ~ poly(AAA$Date, 26))
# fit$residuals
smoothed_curve_Temp <- predict(lm(formula = AAA$daily_SEASONALITY_TEMP_mean ~ poly(AAA$Date, 26)))
summary(smoothed_curve_Temp)
#Function for Root Mean Squared Error
RMSE <- function(error) {sqrt(mean(error^2))}
RMSE(fit$residuals)
#Function for Mean Absolute Error
mae <- function(error) {mean(abs(error))}
mae(fit$residuals)
WRF_HIS_OBSERVATIONS <- cbind(WRF_HIS_OBSERVATIONS, smoothed_curve_Temp)
WRF_HIS_OBSERVATIONS$BIAS_Temp <- WRF_HIS_OBSERVATIONS$TEMP_AVG - WRF_HIS_OBSERVATIONS$smoothed_curve_Temp
# WRF_HIS_OBSERVATIONS$BIAS <- WRF_HIS_OBSERVATIONS$TEMP_AVG - WRF_HIS_OBSERVATIONS$daily_SEASONALITY_TEMP_mean
WRF_HIS_OBSERVATIONS$Corr_TEMP_WRF <- WRF_HIS_OBSERVATIONS$TEMP_WRF_AVG - WRF_HIS_OBSERVATIONS$BIAS_Temp
write.csv(WRF_HIS_OBSERVATIONS, "WRF_HIS_OBSERVATIONS.csv")
#############################################################################################################
### copy the file WRF_HIS_OBSERVATIONS.csv manually and get the BIAS column saved alone with date and station
WRF_HIS_OBSERVATIONS <- as.data.frame(WRF_HIS_OBSERVATIONS)
# BIAS_Temp <- WRF_HIS_OBSERVATIONS %>%
# dplyr::select(Date,
# station,
# BIAS_Temp)
plot <- ggplot(WRF_HIS_OBSERVATIONS, aes(Date, TEMP_WRF_AVG)) +
theme_bw() +
geom_line(aes(y = TEMP_WRF_AVG, col = "TEMP_WRF_AVG"), alpha=1, col="blue") +
geom_line(aes(y = TEMP_AVG, col = "TEMP_AVG"), alpha=1, col="black") +
# geom_line(aes(y = smoothed_curve_Temp, col = "smoothed_curve_Temp"), alpha=1, col="black") +
geom_line(aes(y = Corr_TEMP_WRF, col = "Corr_TEMP_WRF"), alpha=1, col="green") +
scale_color_discrete(name = "Y series", labels = c("TEMP_WRF_AVG", "TEMP_AVG", "daily_SEASONALITY_TEMP_mean")) +
# stat_smooth(method = "lm") +
# geom_smooth(method="lm", aes(y = TEMP_AVG, col = "TEMP_AVG"), formula = y ~ poly(x, 26), size = 1, fill = "yellow", col = "black") +
facet_wrap(~ station, nrow=3) +
theme(strip.text = element_text(size = 10)) +
ylab(expression(paste("Daily Average Temperature ", " (", ~degree~C, ")"))) +
theme(axis.title.x=element_blank(),
axis.text.x = element_text(angle=90, vjust=0.5, hjust = 0.5, size=9, colour = "black", face="bold")) +
theme(axis.title.y = element_text(face="bold", colour="black", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=7, colour = "black")) +
ylim(10, 45) +
scale_x_datetime(breaks = date_breaks("1 year"), labels = date_format("%Y"))
plot
##############################################################################
#### save plot ###############################################################
##############################################################################
output_folder <- "Z:/Data_analysis/plots_comparison/"
png(paste0(output_folder,"Corrected_Temp_GROUND_HIS_Time_series_comparison.png"), width = 1800, height = 900,
units = "px", pointsize = 50,
bg = "white", res = 200)
print(plot)
dev.off()
##############################################################################
# plot bias_Temp##############################################################
##############################################################################
plot <- ggplot(WRF_HIS_OBSERVATIONS, aes(Date, BIAS_Temp)) +
theme_bw() +
geom_line(aes(y = BIAS_Temp, col = "BIAS_Temp"), alpha=1, col="blue") +
scale_color_discrete(name = "Y series", labels = c("BIAS_Temp")) +
# stat_smooth(method = "lm", col="red") +
geom_smooth(method="lm", aes(y = BIAS_Temp, col = "BIAS_Temp"), formula = y ~ poly(x, 26), size = 0.5, fill = "yellow", col = "black") +
facet_wrap(~ station, nrow=3) +
theme(strip.text = element_text(size = 10)) +
ylab(expression(paste("Daily Average Temperature ", " (", ~degree~C, ")"))) +
theme(axis.title.x=element_blank(),
axis.text.x = element_text(angle=90, vjust=0.5, hjust = 0.5, size=9, colour = "black", face="bold")) +
theme(axis.title.y = element_text(face="bold", colour="black", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=7, colour = "black")) +
ylim(-10, 10) +
scale_x_datetime(breaks = date_breaks("1 year"), labels = date_format("%Y"))
plot
output_folder <- "Z:/Data_analysis/plots_comparison/"
png(paste0(output_folder,"BIAS_Temp_Time_series_comparison_WRFHIS_GroundObservations2000_2004.png"), width = 1800, height = 900,
units = "px", pointsize = 50,
bg = "white", res = 200)
print(plot)
dev.off()
#######################################################################################################
#######################################################################################################
#######################################################################################################
################ Maximum Temperautre Time-Series #####################################################
#######################################################################################################
#######################################################################################################
WRF_HIS_OBSERVATIONS <- read.csv("Z:/Data_analysis/HIS_tif/ALL_GROUND_WRF_HIS_DAILY.csv")
Temp_STATISTICS_2000_2004 <- read.csv("Z:/Data_analysis/HIS_tif/Temp_STATISTICS_2000_2004.csv")
names(Temp_STATISTICS_2000_2004)[names(Temp_STATISTICS_2000_2004) == 'DATE'] <- 'Date'
WRF_HIS_OBSERVATIONS <- WRF_HIS_OBSERVATIONS %>%
left_join(Temp_STATISTICS_2000_2004, by = c("Date", "station"))
# join MIN and MAX temp ground observations
write.csv(WRF_HIS_OBSERVATIONS, "Z:/Data_analysis/HIS_tif/WRF_HIS_Tmax_time_series_DAILY.csv") #hourly climate readings
str(WRF_HIS_OBSERVATIONS)
WRF_HIS_OBSERVATIONS$Date <- ymd(WRF_HIS_OBSERVATIONS$Date)
WRF_HIS_OBSERVATIONS <- WRF_HIS_OBSERVATIONS %>%
filter(daily_MAX_WRF_TEMP > 0)
WRF_HIS_OBSERVATIONS$Date <- as.POSIXct(WRF_HIS_OBSERVATIONS$Date)
library(ggplot2)
library(scales)
plot <- ggplot(WRF_HIS_OBSERVATIONS, aes(Date, daily_MAX_WRF_TEMP)) +
theme_bw() +
geom_line(aes(y = daily_MAX_WRF_TEMP, col = "daily_MAX_WRF_TEMP"), alpha=1, col="blue") +
geom_line(aes(y = MAX_TEMP, col = "MAX_TEMP"), alpha=1, col="red") +
scale_color_discrete(name = "Y series", labels = c("daily_MAX_WRF_TEMP", "MAX_TEMP")) +
stat_smooth(method = "lm") +
facet_wrap(~ station) +
theme(strip.text = element_text(size = 10)) +
ylab(expression(paste("Daily Maximum Temperature ", " (", ~degree~C, ")"))) +
theme(axis.title.x=element_blank(),
axis.text.x = element_text(angle=90, vjust=0.5, hjust = 0.5, size=9, colour = "black", face="bold")) +
theme(axis.title.y = element_text(face="bold", colour="black", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=7, colour = "black")) +
ylim(10, 45) +
scale_x_datetime(breaks = date_breaks("1 year"), labels = date_format("%Y"))
plot
#### save plot ###############################################################
##############################################################################
output_folder <- "Z:/Data_analysis/plots_comparison/"
png(paste0(output_folder,"TREND_Tmax_GROUND_HIS_Time_series_comparison.png"), width = 1800, height = 900,
units = "px", pointsize = 50,
bg = "white", res = 200)
print(plot)
dev.off()
#############################################################################
# make an average of all daily concentrations over the last 5 years (seasonality)
OBSERVATIONS_mean <- WRF_HIS_OBSERVATIONS %>%
mutate(year = year(Date),
month = month(Date),
day = day(Date)) %>%
group_by(month,
day,
station) %>%
summarise(daily_SEASONALITY_Tmax_mean = mean(MAX_TEMP, na.rm = TRUE))
seasonality_means <- rbind(OBSERVATIONS_mean[1:1083,],
OBSERVATIONS_mean[1:1092,],
OBSERVATIONS_mean[1:1089,],
OBSERVATIONS_mean[1:1089,],
OBSERVATIONS_mean[1:1092,])
seasonality_means <- as.data.frame(seasonality_means)
WRF_HIS_OBSERVATIONS <- cbind(WRF_HIS_OBSERVATIONS, seasonality_means)
######################################################
# fit the smoothing curve for the seasonality data ###
######################################################
AAA <- WRF_HIS_OBSERVATIONS
AAA$Date <- as.numeric(AAA$Date) # need Date to be numeric
fit <- lm(formula = AAA$daily_SEASONALITY_Tmax_mean ~ poly(AAA$Date, 26))
# fit$residuals
smoothed_curve_Tmax <- predict(lm(formula = AAA$daily_SEASONALITY_Tmax_mean ~ poly(AAA$Date, 26)))
summary(smoothed_curve_Tmax)
WRF_HIS_OBSERVATIONS <- cbind(WRF_HIS_OBSERVATIONS, smoothed_curve_Tmax)
WRF_HIS_OBSERVATIONS$BIAS_Tmax <- WRF_HIS_OBSERVATIONS$MAX_TEMP - WRF_HIS_OBSERVATIONS$smoothed_curve_Tmax
# WRF_HIS_OBSERVATIONS$BIAS <- WRF_HIS_OBSERVATIONS$MAX_TEMP - WRF_HIS_OBSERVATIONS$daily_SEASONALITY_TEMP_mean
WRF_HIS_OBSERVATIONS$Corr_MAX_TEMP_WRF <- WRF_HIS_OBSERVATIONS$daily_MAX_WRF_TEMP + WRF_HIS_OBSERVATIONS$BIAS_Tmax
write.csv(WRF_HIS_OBSERVATIONS, "WRF_HIS_OBSERVATIONS.csv")
######################### make a copy of the BIAS_Tmax manualy ############################################
plot <- ggplot(WRF_HIS_OBSERVATIONS, aes(Date, daily_MAX_WRF_TEMP)) +
theme_bw() +
geom_line(aes(y = daily_MAX_WRF_TEMP, col = "daily_MAX_WRF_TEMP"), alpha=1, col="blue") +
geom_line(aes(y = MAX_TEMP, col = "MAX_TEMP"), alpha=1, col="black") +
#geom_line(aes(y = smoothed_curve, col = "smoothed_curve"), alpha=1, col="black") +
geom_line(aes(y = Corr_MAX_TEMP_WRF, col = "Corr_MAX_TEMP_WRF"), alpha=1, col="green") +
scale_color_discrete(name = "Y series", labels = c("TEMP_WRF_AVG", "TEMP_AVG", "daily_SEASONALITY_TEMP_mean")) +
# stat_smooth(method = "lm") +
# geom_smooth(method="lm", aes(y = TEMP_AVG, col = "TEMP_AVG"), formula = y ~ poly(x, 26), size = 1, fill = "yellow", col = "black") +
facet_wrap(~ station, nrow=3) +
theme(strip.text = element_text(size = 10)) +
ylab(expression(paste("Daily Average Temperature ", " (", ~degree~C, ")"))) +
theme(axis.title.x=element_blank(),
axis.text.x = element_text(angle=90, vjust=0.5, hjust = 0.5, size=9, colour = "black", face="bold")) +
theme(axis.title.y = element_text(face="bold", colour="black", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=7, colour = "black")) +
ylim(10, 45) +
scale_x_datetime(breaks = date_breaks("1 year"), labels = date_format("%Y"))
plot
#### save plot ###############################################################
##############################################################################
output_folder <- "Z:/Data_analysis/plots_comparison/"
png(paste0(output_folder,"Corrected_Tmax_GROUND_HIS_Time_series_comparison.png"), width = 1800, height = 900,
units = "px", pointsize = 50,
bg = "white", res = 200)
print(plot)
dev.off()
##############################################################################
# plot bias_Temp##############################################################
##############################################################################
plot <- ggplot(WRF_HIS_OBSERVATIONS, aes(Date, BIAS_Tmax)) +
theme_bw() +
geom_line(aes(y = BIAS_Tmax, col = "BIAS_Tmax"), alpha=1, col="blue") +
scale_color_discrete(name = "Y series", labels = c("BIAS_Tmax")) +
# stat_smooth(method = "lm", col="red") +
geom_smooth(method="lm", aes(y = BIAS_Tmax, col = "BIAS_Tmax"), formula = y ~ poly(x, 26), size = 0.5, fill = "yellow", col = "black") +
facet_wrap(~ station, nrow=3) +
theme(strip.text = element_text(size = 10)) +
ylab(expression(paste("Daily Max Temperature Bias ", " (", ~degree~C, ")"))) +
theme(axis.title.x=element_blank(),
axis.text.x = element_text(angle=90, vjust=0.5, hjust = 0.5, size=9, colour = "black", face="bold")) +
theme(axis.title.y = element_text(face="bold", colour="black", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=7, colour = "black")) +
ylim(-10, 10) +
scale_x_datetime(breaks = date_breaks("1 year"), labels = date_format("%Y"))
plot
output_folder <- "Z:/Data_analysis/plots_comparison/"
png(paste0(output_folder,"BIAS_Tmax_Time_series_comparison_WRFHIS_GroundObservations2000_2004.png"), width = 1800, height = 900,
units = "px", pointsize = 50,
bg = "white", res = 200)
print(plot)
dev.off()
#######################################################################################################
#######################################################################################################
#######################################################################################################
#######################################################################################################
################ Minimum Temperautre Time-Series ######################################################
#######################################################################################################
WRF_HIS_OBSERVATIONS <- read.csv("Z:/Data_analysis/HIS_tif/ALL_GROUND_WRF_HIS_DAILY.csv")
Temp_STATISTICS_2000_2004 <- read.csv("Z:/Data_analysis/HIS_tif/Temp_STATISTICS_2000_2004.csv")
names(Temp_STATISTICS_2000_2004)[names(Temp_STATISTICS_2000_2004) == 'DATE'] <- 'Date'
WRF_HIS_OBSERVATIONS <- WRF_HIS_OBSERVATIONS %>%
left_join(Temp_STATISTICS_2000_2004, by = c("Date", "station"))
# join MIN and MAX temp ground observations
write.csv(WRF_HIS_OBSERVATIONS, "Z:/Data_analysis/HIS_tif/WRF_HIS_Tmin_time_series_DAILY.csv") #hourly climate readings
str(WRF_HIS_OBSERVATIONS)
WRF_HIS_OBSERVATIONS$Date <- ymd(WRF_HIS_OBSERVATIONS$Date)
WRF_HIS_OBSERVATIONS <- WRF_HIS_OBSERVATIONS %>%
filter(daily_MIN_WRF_TEMP > 0)
WRF_HIS_OBSERVATIONS$Date <- as.POSIXct(WRF_HIS_OBSERVATIONS$Date)
library(ggplot2)
library(scales)
plot <- ggplot(WRF_HIS_OBSERVATIONS, aes(Date, daily_MIN_WRF_TEMP)) +
theme_bw() +
geom_line(aes(y = daily_MIN_WRF_TEMP, col = "daily_MIN_WRF_TEMP"), alpha=1, col="blue") +
geom_line(aes(y = MIN_TEMP, col = "MIN_TEMP"), alpha=1, col="red") +
scale_color_discrete(name = "Y series", labels = c("daily_MIN_WRF_TEMP", "MIN_TEMP")) +
stat_smooth(method = "lm") +
facet_wrap(~ station) +
theme(strip.text = element_text(size = 10)) +
ylab(expression(paste("Daily Minimum Temperature ", " (", ~degree~C, ")"))) +
theme(axis.title.x=element_blank(),
axis.text.x = element_text(angle=90, vjust=0.5, hjust = 0.5, size=9, colour = "black", face="bold")) +
theme(axis.title.y = element_text(face="bold", colour="black", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=7, colour = "black")) +
ylim(0, 40) +
scale_x_datetime(breaks = date_breaks("1 year"), labels = date_format("%Y"))
plot
#### save plot ###############################################################
##############################################################################
output_folder <- "Z:/Data_analysis/plots_comparison/"
png(paste0(output_folder,"TREND_Tmin_GROUND_HIS_Time_series_comparison.png"), width = 1800, height = 900,
units = "px", pointsize = 50,
bg = "white", res = 200)
print(plot)
dev.off()
#############################################################################
# make an average of all daily concentrations over the last 5 years (seasonality)
OBSERVATIONS_mean <- WRF_HIS_OBSERVATIONS %>%
mutate(year = year(Date),
month = month(Date),
day = day(Date)) %>%
group_by(month,
day,
station) %>%
summarise(daily_SEASONALITY_Tmin_mean = mean(MIN_TEMP, na.rm = TRUE))
seasonality_means <- rbind(OBSERVATIONS_mean[1:1083,],
OBSERVATIONS_mean[1:1083,],
OBSERVATIONS_mean[1:1088,],
OBSERVATIONS_mean[1:1088,],
OBSERVATIONS_mean[1:1088,])
seasonality_means <- as.data.frame(seasonality_means)
WRF_HIS_OBSERVATIONS <- cbind(WRF_HIS_OBSERVATIONS, seasonality_means)
######################################################
# fit the smoothing curve for the seasonality data ###
######################################################
AAA <- WRF_HIS_OBSERVATIONS
AAA$Date <- as.numeric(AAA$Date) # need Date to be numeric
fit <- lm(formula = AAA$daily_SEASONALITY_Tmin_mean ~ poly(AAA$Date, 26))
# fit$residuals
smoothed_curve_Tmin <- predict(lm(formula = AAA$daily_SEASONALITY_Tmin_mean ~ poly(AAA$Date, 26)))
summary(smoothed_curve_Tmin)
WRF_HIS_OBSERVATIONS <- cbind(WRF_HIS_OBSERVATIONS, smoothed_curve_Tmin)
WRF_HIS_OBSERVATIONS$BIAS_Tmin <- WRF_HIS_OBSERVATIONS$MIN_TEMP - WRF_HIS_OBSERVATIONS$smoothed_curve_Tmin
# WRF_HIS_OBSERVATIONS$BIAS <- WRF_HIS_OBSERVATIONS$MIN_TEMP - WRF_HIS_OBSERVATIONS$daily_SEASONALITY_TEMP_mean
WRF_HIS_OBSERVATIONS$Corr_MIN_TEMP_WRF <- WRF_HIS_OBSERVATIONS$daily_MIN_WRF_TEMP + WRF_HIS_OBSERVATIONS$BIAS_Tmin
write.csv(WRF_HIS_OBSERVATIONS, "WRF_HIS_OBSERVATIONS.csv")
####################### Make a copy of the BIAS_Tmin manually ###########################
plot <- ggplot(WRF_HIS_OBSERVATIONS, aes(Date, daily_MIN_WRF_TEMP)) +
theme_bw() +
geom_line(aes(y = daily_MIN_WRF_TEMP, col = "daily_MIN_WRF_TEMP"), alpha=1, col="blue") +
geom_line(aes(y = MIN_TEMP, col = "MIN_TEMP"), alpha=1, col="black") +
#geom_line(aes(y = smoothed_curve, col = "smoothed_curve"), alpha=1, col="black") +
geom_line(aes(y = Corr_MIN_TEMP_WRF, col = "Corr_MIN_TEMP_WRF"), alpha=1, col="green") +
scale_color_discrete(name = "Y series", labels = c("daily_MIN_WRF_TEMP", "MIN_TEMP", "daily_SEASONALITY_Tmin_mean")) +
# stat_smooth(method = "lm") +
# geom_smooth(method="lm", aes(y = MIN_TEMP, col = "MIN_TEMP"), formula = y ~ poly(x, 26), size = 1, fill = "yellow", col = "black") +
facet_wrap(~ station, nrow=3) +
theme(strip.text = element_text(size = 10)) +
ylab(expression(paste("Daily Minimum Temperature ", " (", ~degree~C, ")"))) +
theme(axis.title.x=element_blank(),
axis.text.x = element_text(angle=90, vjust=0.5, hjust = 0.5, size=9, colour = "black", face="bold")) +
theme(axis.title.y = element_text(face="bold", colour="black", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=7, colour = "black")) +
ylim(0, 40) +
scale_x_datetime(breaks = date_breaks("1 year"), labels = date_format("%Y"))
plot
#### save plot ###############################################################
##############################################################################
output_folder <- "Z:/Data_analysis/plots_comparison/"
png(paste0(output_folder,"Corrected_Tmin_GROUND_HIS_Time_series_comparison.png"), width = 1800, height = 900,
units = "px", pointsize = 50,
bg = "white", res = 200)
print(plot)
dev.off()
##############################################################################
# plot bias_Temp##############################################################
##############################################################################
plot <- ggplot(WRF_HIS_OBSERVATIONS, aes(Date, BIAS_Tmin)) +
theme_bw() +
geom_line(aes(y = BIAS_Tmin, col = "BIAS_Tmin"), alpha=1, col="blue") +
scale_color_discrete(name = "Y series", labels = c("BIAS_Tmin")) +
# stat_smooth(method = "lm", col="red") +
geom_smooth(method="lm", aes(y = BIAS_Tmin, col = "BIAS_Tmin"), formula = y ~ poly(x, 26), size = 0.5, fill = "yellow", col = "black") +
facet_wrap(~ station, nrow=3) +
theme(strip.text = element_text(size = 10)) +
ylab(expression(paste("Daily Min Temperature Bias ", " (", ~degree~C, ")"))) +
theme(axis.title.x=element_blank(),
axis.text.x = element_text(angle=90, vjust=0.5, hjust = 0.5, size=9, colour = "black", face="bold")) +
theme(axis.title.y = element_text(face="bold", colour="black", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=7, colour = "black")) +
ylim(-10, 10) +
scale_x_datetime(breaks = date_breaks("1 year"), labels = date_format("%Y"))
plot
output_folder <- "Z:/Data_analysis/plots_comparison/"
png(paste0(output_folder,"BIAS_Tmin_Time_series_comparison_WRFHIS_GroundObservations2000_2004.png"), width = 1800, height = 900,
units = "px", pointsize = 50,
bg = "white", res = 200)
print(plot)
dev.off()
#######################################################################################################
#######################################################################################################
#######################################################################################################
######################################################################################################
################ Relative Humidity Time-Series #######################################################
######################################################################################################
#######################################################################################################
#######################################################################################################
#######################################################################################################
### NOT COMPLETE
library(readr)
library(dplyr)
library(lubridate)
library(raster)
library(rgdal)
library(NISTunits)
library(stringr)
library(reshape2)
library(stats)
setwd("Z:/Data_analysis")
WRF_HIS_OBSERVATIONS <- read.csv("Z:/Data_analysis/HIS_tif/ALL_GROUND_WRF_HIS_DAILY.csv")
str(WRF_HIS_OBSERVATIONS)
WRF_HIS_OBSERVATIONS$Date <- ymd(WRF_HIS_OBSERVATIONS$Date)
WRF_HIS_OBSERVATIONS <- WRF_HIS_OBSERVATIONS %>%
filter(RH_WRF_AVG > 0)
WRF_HIS_OBSERVATIONS$Date <- as.POSIXct(WRF_HIS_OBSERVATIONS$Date)
library(ggplot2)
library(scales)
plot <- ggplot(WRF_HIS_OBSERVATIONS, aes(Date, RH_WRF_AVG)) +
theme_bw() +
geom_line(aes(y = RH_WRF_AVG, col = "RH_WRF_AVG"), alpha=1, col="blue") +
geom_line(aes(y = RH_AVG, col = "RH_AVG"), alpha=1, col="red") +
scale_color_discrete(name = "Y series", labels = c("RH_WRF_AVG", "RH_AVG")) +
stat_smooth(method = "lm") +
# geom_smooth(method="lm", aes(y = TEMP_AVG, col = "RH_AVG"), formula = y ~ poly(x, 26), size = 1, fill = "yellow", col = "black") +
facet_wrap(~ station) +
theme(strip.text = element_text(size = 10)) +
ylab(expression(paste("Daily Average Relative Humidity ", " (", ~degree~C, ")"))) +
theme(axis.title.x=element_blank(),
axis.text.x = element_text(angle=90, vjust=0.5, hjust = 0.5, size=9, colour = "black", face="bold")) +
theme(axis.title.y = element_text(face="bold", colour="black", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=7, colour = "black")) +
ylim(0, 100) +
scale_x_datetime(breaks = date_breaks("1 year"), labels = date_format("%Y"))
plot
#### save plot ###############################################################
##############################################################################
output_folder <- "Z:/Data_analysis/plots_comparison/"
png(paste0(output_folder,"TREND_RH_GROUND_HIS_Time_series_comparison.png"), width = 1800, height = 900,
units = "px", pointsize = 50,
bg = "white", res = 200)
print(plot)
dev.off()
#############################################################################
# make an average of all daily concentrations over the last 5 years (seasonality)
OBSERVATIONS_mean <- WRF_HIS_OBSERVATIONS %>%
mutate(year = year(Date),
month = month(Date),
day = day(Date)) %>%
group_by(month,
day,
station) %>%
summarise(daily_SEASONALITY_RH_mean = mean(RH_AVG, na.rm = TRUE))
# WRF_HIS_OBSERVATIONS <- WRF_HIS_OBSERVATIONS[order(WRF_HIS_OBSERVATIONS$station),]
# bind the above averaged to the data to create a typical seasonal trend
# seasonality_means <- rbind(OBSERVATIONS_mean[1:360, ], # 2000
# OBSERVATIONS_mean[1:364, ], # 2001
# OBSERVATIONS_mean[1:363, ], # 2002
# OBSERVATIONS_mean[1:363, ], # 2003
# OBSERVATIONS_mean[1:365, ]) # 2004
# repeat seasonality by 3 times (3 stations)
# seasonality_means <- rbind(seasonality_means,seasonality_means,seasonality_means)
seasonality_means <- rbind(OBSERVATIONS_mean[1:1083,],
OBSERVATIONS_mean[1:1092,],
OBSERVATIONS_mean[1:1089,],
OBSERVATIONS_mean[1:1089,],
OBSERVATIONS_mean[1:1092,])
seasonality_means <- as.data.frame(seasonality_means)
WRF_HIS_OBSERVATIONS <- cbind(WRF_HIS_OBSERVATIONS, seasonality_means)
######################################################
# fit the smoothing curve for the seasonality data ###
######################################################
AAA <- WRF_HIS_OBSERVATIONS
AAA$Date <- as.numeric(AAA$Date) # need Date to be numeric
fit <- lm(formula = AAA$daily_SEASONALITY_RH_mean ~ poly(AAA$Date, 25))
# fit$residuals
smoothed_curve_RH <- predict(lm(formula = AAA$daily_SEASONALITY_RH_mean ~ poly(AAA$Date, 26)))
summary(smoothed_curve_RH)
WRF_HIS_OBSERVATIONS <- WRF_HIS_OBSERVATIONS[1:5355 , ]
WRF_HIS_OBSERVATIONS <- cbind(WRF_HIS_OBSERVATIONS, smoothed_curve_RH)
WRF_HIS_OBSERVATIONS$BIAS_RH <- WRF_HIS_OBSERVATIONS$RH_AVG - WRF_HIS_OBSERVATIONS$smoothed_curve_RH
# WRF_HIS_OBSERVATIONS$BIAS <- WRF_HIS_OBSERVATIONS$TEMP_AVG - WRF_HIS_OBSERVATIONS$daily_SEASONALITY_TEMP_mean
WRF_HIS_OBSERVATIONS$Corr_RH_WRF <- WRF_HIS_OBSERVATIONS$RH_WRF_AVG - WRF_HIS_OBSERVATIONS$BIAS_RH
write.csv(WRF_HIS_OBSERVATIONS, "WRF_HIS_OBSERVATIONS.csv")
plot <- ggplot(WRF_HIS_OBSERVATIONS, aes(Date, RH_WRF_AVG)) +
theme_bw() +
geom_line(aes(y = RH_WRF_AVG, col = "RH_WRF_AVG"), alpha=1, col="blue") +
geom_line(aes(y = smoothed_curve_RH, col = "smoothed_curve_RH"), alpha=1, col="black") +
geom_line(aes(y = Corr_RH_WRF, col = "Corr_RH_WRF"), alpha=1, col="green") +
# scale_color_discrete(name = "Y series", labels = c("RH_WRF_AVG", "RH_AVG", "smoothed_curve_RH")) +
facet_wrap(~ station, nrow=3) +
theme(strip.text = element_text(size = 10)) +
ylab(expression(paste("Daily Average Relative Humidity %"))) +
theme(axis.title.x=element_blank(),
axis.text.x = element_text(angle=90, vjust=0.5, hjust = 0.5, size=9, colour = "black", face="bold")) +
theme(axis.title.y = element_text(face="bold", colour="black", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=7, colour = "black")) +
ylim(0, 100) +
scale_x_datetime(breaks = date_breaks("1 year"), labels = date_format("%Y"))
plot
#### save plot ###############################################################
##############################################################################
output_folder <- "Z:/Data_analysis/plots_comparison/"
png(paste0(output_folder,"Corrected_Temp_GROUND_HIS_Time_series_comparison.png"), width = 1800, height = 900,
units = "px", pointsize = 50,
bg = "white", res = 200)
print(plot)
dev.off()
# plot bias
plot <- ggplot(WRF_HIS_OBSERVATIONS, aes(Date, BIAS)) +
theme_bw() +
geom_line(aes(y = BIAS, col = "BIAS"), alpha=1, col="blue") +
scale_color_discrete(name = "Y series", labels = c("BIAS")) +
# stat_smooth(method = "lm", col="red") +
geom_smooth(method="lm", aes(y = BIAS, col = "BIAS"), formula = y ~ poly(x, 26), size = 0.5, fill = "yellow", col = "black") +
facet_wrap(~ station, nrow=3) +
theme(strip.text = element_text(size = 10)) +
ylab(expression(paste("Daily Average Temperature ", " (", ~degree~C, ")"))) +
theme(axis.title.x=element_blank(),
axis.text.x = element_text(angle=90, vjust=0.5, hjust = 0.5, size=9, colour = "black", face="bold")) +
theme(axis.title.y = element_text(face="bold", colour="black", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=7, colour = "black")) +
ylim(-10, 10) +
scale_x_datetime(breaks = date_breaks("1 year"), labels = date_format("%Y"))
plot
output_folder <- "Z:/Data_analysis/plots_comparison/"
png(paste0(output_folder,"BIAS_Time_series_comparison_WRFHIS_GroundObservations2000_2004.png"), width = 1800, height = 900,
units = "px", pointsize = 50,
bg = "white", res = 200)
print(plot)
dev.off()
|
3209dd6519a214c30564c407dd3d1f2da7293ab5 | 4d292de6f127f4793560727944c7ee62f44e3918 | /Quiz Week 1/13.R | 83453c769bf2be7f280f8ea05809d034cf559df5 | [] | no_license | armandyne/R-Programming | f58475e8c2540ea9259ddb039ba979581dfb8ae9 | 507f5eea0701fe0838bb1aae8e3e8a9fc59cd8a1 | refs/heads/master | 2021-07-07T02:51:08.077314 | 2017-10-02T19:18:09 | 2017-10-02T19:18:09 | 104,900,184 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 88 | r | 13.R | setwd("D:/coursera/R Programming")
x<-read.csv("quiz1_data/hw1_data.csv")
print(nrow(x)) |
eb760558b03f1877292f60a1000ebbbd29d99933 | 405f0cc8bf4fc2302c924015d4367e1c3cf7c09a | /man/str_count_setdiff.Rd | 861ad529c5f80e1381ef0d364c0646d58c92f353 | [] | no_license | cran/textTools | 46c777abf428fefe560c52f7d096adea58860e4b | 83bcb2e07bf66ccc2e65075dad052de8287cac39 | refs/heads/master | 2023-02-27T12:42:59.510094 | 2021-02-05T08:00:05 | 2021-02-05T08:00:05 | 336,253,138 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 666 | rd | str_count_setdiff.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/text.table.R
\name{str_count_setdiff}
\alias{str_count_setdiff}
\title{Count the words in a vector that don't intersect with another vector (only counts unique words).}
\usage{
str_count_setdiff(x, y)
}
\arguments{
\item{x}{A vector of words.}
\item{y}{A vector of words to test against.}
}
\value{
A number, the count of unique words in x not also in y
}
\description{
Count the words in a vector that don't intersect with another vector (only counts unique words).
}
\examples{
str_count_setdiff(
x = c("a", "dog", "dog", "went", "to", "the", "store"),
y = c("dog", "to", "store")
)
}
|
66f7761de212432bc079e741d3b4263d070a3fe9 | e35593d0a185a626cf27d511a4dc87de0f9517f6 | /solutions/common-issues-04_embedded-subheaders.R | ee5e34e3a0b972caf772fc49cbdcc7dbabf591c6 | [] | no_license | gauravgupta88/data-cleaning-course | f26676635981cf45c7f022f9ff84a9065bbbbbef | ae6b48987493e9677e95e9cc6a14a4ecec254483 | refs/heads/master | 2023-04-24T15:49:37.317489 | 2021-05-12T02:23:02 | 2021-05-12T02:23:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,341 | r | common-issues-04_embedded-subheaders.R | ## %######################################################%##
# #
#### Missing, implicit, or misplaced ####
#### grouping variables - your turn ####
# #
## %######################################################%##
# Load the `primates2017` dataset bundled with 📦 `unheadr`
# Create a new column that groups the different species by taxonomic family.
# In biology, taxonomic families all end in the suffix "_DAE_"
# How many different ways can you identify the embedded subheaders in these data?
# load packages -----------------------------------------------------------
library(unheadr)
library(dplyr)
library(stringr)
# load data ---------------------------------------------------------------
data("primates2017")
# untangle subheaders to new column ---------------------------------------
primates2017 %>%
untangle2(regex = "DAE$", orig = scientific_name, new = family)
# How many different ways can you identify the embedded subheaders in these data?
# 2, because families are also the only strings in all upper case
primates2017 %>%
untangle2(regex = "^[A-Z]+$", orig = scientific_name, new = family) %>%
mutate(family = str_to_title(family)) # change to title case
|
9fd9c67debe411ab74d2103473235e4b48bb8678 | a83ffaeff962ab30f902c1460dfcefbc581dac75 | /9.R | cb0c6e0e7b8c66c1d5b1cac4a9f67839205d1960 | [] | no_license | nilesh-hegde-060/DSR-Lab-Programs | b97e0ff11f58e5352a0bdfc0a8c56b7072926ffe | 1642b13e1609c9784730f8651acf78d67984501e | refs/heads/master | 2020-09-08T04:36:04.547470 | 2019-11-11T16:03:59 | 2019-11-11T16:03:59 | 221,017,642 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 446 | r | 9.R | install.packages("rpart.plot")
library("rpart")
library("rpart.plot")
p <- read.csv("C:\\Users\\niles\\Desktop\\DSR-Lab\\DT.csv")
p2 <- data.frame(p)
p2
fit=rpart(play ~ outlook + Temperature +Humidity +Wind,method="class",data=p2,control=rpart.control(minsplit=1),parms=list(split='information'))
fit
str(fit)
rpart.plot(fit,type=4,extra=2,clip.right.labs=FALSE,varlen=0,faclen=0,main="Nilesh Hegde CS060")
print("Nilesh Hegde CS060") |
eb67ec48e19cbed016c71133637a03bddb317f5c | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/shape/examples/writelabel.Rd.R | 5c5cc33017f535f3205672fcffd6d757763a87b8 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 241 | r | writelabel.Rd.R | library(shape)
### Name: writelabel
### Title: adds a label next to a plot
### Aliases: writelabel
### Keywords: aplot
### ** Examples
plot(runif(2), main = "writelabel")
writelabel("A")
writelabel("B", at = 0)
writelabel("C", at = 1)
|
0766b66c81dd00d2eff66471d0ea03b1e16cb1c2 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/animalTrack/examples/dead_reckoning.Rd.R | 55805d4f6b4168e7419f5b53a67df3b300bfd571 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,994 | r | dead_reckoning.Rd.R | library(animalTrack)
### Name: dead_reckoning
### Title: Create a course steered and course made good using the
### principles of navigation by dead reckoning.
### Aliases: dead_reckoning
### ** Examples
## Import the "missionbay2" dataset. See help(missionbay2)
## for full documentation.
data(missionbay2)
trial.1 <- missionbay2[missionbay2$trial == 1,]
trial.2 <- missionbay2[missionbay2$trial == 2,]
trial.3 <- missionbay2[missionbay2$trial == 3,]
trial.4 <- missionbay2[missionbay2$trial == 4,]
## Calculate the course made good for the four trials. Each returns
## to the starting position.
CS1 <- dead_reckoning(trial.1$speed, trial.1$heading_geo,angle="radian")
CS2 <- dead_reckoning(trial.2$speed, trial.2$heading_geo,angle="radian")
CS3 <- dead_reckoning(trial.3$speed, trial.3$heading_geo,angle="radian")
CS4 <- dead_reckoning(trial.4$speed, trial.4$heading_geo,angle="radian")
## Plot the course steered for each trial.
plot(CS1$CSx,CS1$CSy,type='l',col='blue',xlab="X-coordinate (unprojected)",
ylab="Y-coordinate (unprojected)",ylim=c(-400,150))
lines(CS2$CSx,CS2$CSy,col='green')
lines(CS3$CSx,CS3$CSy,col='red')
lines(CS4$CSx,CS4$CSy,col='magenta')
legend(-300,100,legend=c("Run1","Run2","Run3","Run4"),col=c("blue","green",
"red","magenta"),lty=c(1,1,1,1),bty="n")
title('Course Steered for Mission Bay Trials')
grid()
## Plot the course steered vs. course made good
plot(CS1$CSx,CS1$CSy,type='l',col='blue',xlab="X-coordinate (unprojected)",
ylab="Y-coordinate (unprojected)",ylim=c(-400,150))
lines(CS1$CMGx,CS1$CMGy,col='black')
t.set <- paste("Track 1, Set Angle: ",as.character(round(CS1$set*(180/pi),2)))
t.drift <- paste("Track 1, Drift: ",as.character(round(CS1$drift,2))," m/s")
t.error <- paste("Track 1, Error Distance: ",as.character(round(CS1$errordistance,2))," m")
title(paste(t.set,"\n",t.drift,"\n",t.error))
legend(-300,100,legend=c("Course Steered","Course Made Good"),
col=c("blue","black"),lty=c(1,1),bty="n")
grid()
|
3bff1c90b6d2d05e3fc8a7a5186392d1044d2e77 | f7d6406e64c9a1ae95680a0b9b5d9e85efc87c8d | /cachematrix.R | d3d1a14310b1ece755d701e81839c7291b206dbc | [] | no_license | KenHopwood/ProgrammingAssignment2 | 47cfc444f5a1d0c500f90495dcaee9ff78afa985 | 560f9c2772dea7aa72aebc6b5b204dc77b17ac1c | refs/heads/master | 2021-01-18T12:29:28.105438 | 2015-04-17T05:39:26 | 2015-04-17T05:39:26 | 32,506,063 | 0 | 0 | null | 2015-03-19T07:04:07 | 2015-03-19T07:04:07 | null | UTF-8 | R | false | false | 1,051 | r | cachematrix.R | ## cachematrix.R
## Ken Hopwood 16-APR-2015
##
## This set of functions will create a matrix and it's inverse. Caching is used
## to speed processing time.
## makeCacheMatrix provides matrix get and set functions. It also provides get
## and set functions for the inverse of the matrix. The inverse matrix will be
## cached for later retrieval saving computational time.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## cacheSolve will check to see if the inverse of the matrix has been previously cached.
## If not, the inverse of the matrix will be calculated.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data)
x$setinverse(m)
m
}
|
dc0cc94630632a9d5afc16d6307728613f7ff376 | 2648e10a256a28937fcf83ae6f2e3b055396a812 | /Exercises/Chapter1_Exercises.R | e8ab9006d18036c7426a8049b2e1d75bbd446134 | [] | no_license | rumorej/ModernStatsforModernBiol | 6e16b60b9b0edff0477c2b89ec52900afa86bf2f | fe895999483b74569d7aac944a9082360847e4a7 | refs/heads/master | 2020-07-31T13:17:33.009569 | 2019-12-05T13:01:03 | 2019-12-05T13:01:03 | 210,615,183 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,471 | r | Chapter1_Exercises.R | ##Chapter 1 Exercises
##Exercise 1.1
#When computing tail probabilities such as p(X>a)
#you can use pnorm and pbinom
##Helpful scripts for downloading R files for Modern Stats for Modern Biol Text Book
#Run on CMD
##wget -r -np -nH --cut-dirs=3 web.stanford.edu/class/bios221/book/Rfiles/
##Helpful Script for renaming directories on Git
#git mv oldfilename newfilename
##Exercise 1.2
##dbinom(X, n, p)
library(epitools)
#Calcualte the probability mass distribution of X=2, B(10,0.3)
prob <- dbinom(0:10, prob = 0.3, size = 10)
round(prob, 2)
#Calculate the cumulative distribution at the value 2, corresponding to P(X<=2)
dbinom(0, size = 10, prob=0.3) +
dbinom(1, size =10, prob =0.3) +
dbinom(2, size =10, prob = 0.3)
#Alternatively, we can use the cumulative probability function for the binomial distribution pbinom
pbinom(2, size=10, prob=0.3)
#Answer: The probability of 2 or less by random in a sample size of 10 is ~38%
##Exercise 1.3
#Poisson=(x, lamda)
##Difficulties with this one...
##Exercise 1.4
#And this one due to above statement
##Exercise 1.5
#Use a simulation to find the probability of having a maximum of 9 or larger in 100 trials
maxes <- replicate(10000000, {
max(rpois(100,0.5))
})
table(maxes)
#Approximation for P[Xmax >=9]
mean(maxes >=9)
#P(X<= x) is the cumulative distribution function
?ppois
#Calculates the probability P[X>x]
ppois(9, 0.5, lower.tail = FALSE)
##Exercise 1.6
##Look at distributions in R
?Distributions
#dbeta: Beta Distribution; Continuous
?dbeta
x_beta <- seq(0, 1, by = 0.02)
y_beta <- dbeta(x_beta, shape1 = 2, shape2 = 5)
plot(y_beta)
#dbinomial:Binomial Distribution; Discrete
x <- dbinom(0:10, 10, 0.5)
barplot(x)
#dcauchy: Normal Distribution; Continuous
x_dcauchy <- seq(0, 1, by = 0.02)
y_dcauchy <- dcauchy(x_dcauchy, scale = 5)
plot(y_dcauchy)
#dchisq: Chi Square Distribution; Continuous
?dchisq
x_dchisq <- seq(0, 20, by = 0.5)
y_dchisq <- dchisq(x_dchisq, df = 5)
plot(y_dchisq)
#dexp: Exponential; Continuous
#dgamma:Tukey Distribution; Continuous
#dgeom: Geometric Distribution; Discrete
#dhyper: Hypergeometric Distribution; Discrete
#dlnorm: Log Normal Distribution; Continuous
#dmultinom: Multinomial Distribution; Discrete
#dnbinom: Negative Bionomial Distribution;
#dnorm: Continuous
#dpois: Discrete
#dt: Student's t Distribution; Continuous
#dunif: Uniform Distribution; Continuous
#dweibull: Weibull Distribution; Continuous
##Exercise 1.7
x <- rpois(100, 0.3)
mean(x)
var(x)
##Exercise 1.8
#if (!requireNamespace("BiocManager", quietly = TRUE))
+ install.packages("BiocManager")
#Load C. elegans Genome
library(BSgenome.Celegans.UCSC.ce2)
library(Biostrings)
#View C. elegans genome for sequence names
Celegans
#Explore nucleotide frequencies of chrM
LF <- letterFrequency(Celegans$chrM, letters = "ACGT", OR=0)
LF
lengthC <- sum(LF)
LF / lengthC
#Test whether C. elegans data is consistent with the uniform model
#Under Null
prob = rep(0.25, 4)
obsunder0 <- rmultinom(1, lengthC, p = prob)
dim(obsunder0)
t(obsunder0)
#Expected
##Expected value = 13794/4; 3448.5
expected0 = prob * 13794
expected0
#Difficulties with this one...had to look at answers for guidance
oenull = replicate(10000, oestat(e = expected0, o = rmultinom(1, n, p = prob)))
oenull
#Histogram of the null distribution
hist(oenull, breaks = 100, col = "lightblue")
?hist
|
7e6a6086c619f939e85dcf46f59e42307b951a77 | b310f8412ea3cfef7be133959c331d029bd1899f | /man/lavaan2xlsx.Rd | 1167eb6a210f3309d812d411bed8c1c2dc06e157 | [] | no_license | clbustos/rcebn | 9444f387b4b46e5870f140f40ec90ba6a43f87db | 7173891eae8b10ab93087a229aa677ff2674c004 | refs/heads/master | 2023-05-10T20:08:26.821587 | 2023-04-28T17:17:17 | 2023-04-28T17:17:17 | 29,193,481 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 668 | rd | lavaan2xlsx.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lavaan2xlsx.r
\name{lavaan2xlsx}
\alias{lavaan2xlsx}
\title{Generate a xlsx file for a lavaan object}
\usage{
lavaan2xlsx(x, extra.matrix.f = NULL, order.matrix.f = NULL)
}
\arguments{
\item{x}{lavaan object}
\item{extra.matrix.f}{extra matrix to add to x object}
\item{order.matrix.f}{name of columns to which order the cor matrix}
}
\value{
a openxlsx object
}
\description{
Generate a xlsx file for a lavaan object
}
\examples{
\dontrun{
library(psych)
library(lavaan)
data(Dwyer)
cfa.1<-cfa("g=~V1+V2+V3+V4+V5+V6+V7+V8",sample.cov=Dwyer,sample.nobs=1000)
lavaan2xlsx(cfa.1)
}
}
|
503e718862253565c28035691683e5cf87fa0b11 | c18691bfbc70d8c869d542017cf285078a763052 | /feature creation.R | 1e3a65432ee53f6300e5355855c128a16d987983 | [] | no_license | ofigue/BNP-ParibasCardif | bbc56aee2aa525edaa64cc20713b46675b412691 | 89fdc9fcb8b81aa8303755480cdbe205fd41ece9 | refs/heads/master | 2016-09-12T14:20:06.186820 | 2016-05-06T21:25:45 | 2016-05-06T21:25:45 | 58,234,579 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,164 | r | feature creation.R | ##########################
# train
##########################
train$fe1<-train$v50*train$v10
train$fe2<-train$v50*train$v12
train$fe3<-train$v50*train$v14
train$fe4<-train$v50*train$v114
train$fe5<-train$v129*train$v40
train$fe6<-train$v129*train$v34
train$fe7<-train$v129*train$v21
train$fe8<-train$v114*train$v10
train$fe9<-train$v114*train$v12
train$fe10<-train$v114*train$v14
train$fe11<-train$v21*train$v50
train$fe12<-train$v21*train$v10
train$fe13<-train$v21*train$v12
train$fe14<-train$v21*train$v14
alfa<-train %>%
select(v50, v10, v12, v14)
train$fe15<-rowSums(as.matrix(alfa))
train$fe16<-rowMeans(as.matrix(alfa))
train$fe17<-rowSds(as.matrix(alfa))
rm(alfa)
alfa<-train %>%
select(v129, v40, v10, v114)
train$fe18<-rowSums(as.matrix(alfa))
train$fe19<-rowMeans(as.matrix(alfa))
train$fe20<-rowSds(as.matrix(alfa))
rm(alfa)
alfa<-train %>%
select(v40, v34, v21)
train$fe21<-rowSums(as.matrix(alfa))
train$fe22<-rowMeans(as.matrix(alfa))
train$fe23<-rowSds(as.matrix(alfa))
rm(alfa)
alfa<-train %>%
select(v21, v129, v14)
train$fe24<-rowSums(as.matrix(alfa))
train$fe25<-rowMeans(as.matrix(alfa))
train$fe26<-rowSds(as.matrix(alfa))
rm(alfa)
alfa<-train %>%
select(v98, v100, v39)
train$fe27<-rowSums(as.matrix(alfa))
train$fe28<-rowMeans(as.matrix(alfa))
train$fe29<-rowSds(as.matrix(alfa))
rm(alfa)
train$fe30<-train$v50*train$v56
train$fe31<-train$v50*train$v71
train$fe32<-train$v66*train$v66
train$fe33<-train$v50*train$v50
train$fe34<-train$v50*train$v52
train$fe35<-train$v50*train$v62
train$fe36<-train$v47*train$v47
##########################
# test
##########################
test$fe1<-test$v50*test$v10
test$fe2<-test$v50*test$v12
test$fe3<-test$v50*test$v14
test$fe4<-test$v50*test$v114
test$fe5<-test$v129*test$v40
test$fe6<-test$v129*test$v34
test$fe7<-test$v129*test$v21
test$fe8<-test$v114*test$v10
test$fe9<-test$v114*test$v12
test$fe10<-test$v114*test$v14
test$fe11<-test$v21*test$v50
test$fe12<-test$v21*test$v10
test$fe13<-test$v21*test$v12
test$fe14<-test$v21*test$v14
alfa<-test %>%
select(v50, v10, v12, v14)
test$fe15<-rowSums(as.matrix(alfa))
test$fe16<-rowMeans(as.matrix(alfa))
test$fe17<-rowSds(as.matrix(alfa))
rm(alfa)
alfa<-test %>%
select(v129, v40, v10, v114)
test$fe18<-rowSums(as.matrix(alfa))
test$fe19<-rowMeans(as.matrix(alfa))
test$fe20<-rowSds(as.matrix(alfa))
rm(alfa)
alfa<-test %>%
select(v40, v34, v21)
test$fe21<-rowSums(as.matrix(alfa))
test$fe22<-rowMeans(as.matrix(alfa))
test$fe23<-rowSds(as.matrix(alfa))
rm(alfa)
alfa<-test %>%
select(v21, v129, v14)
test$fe24<-rowSums(as.matrix(alfa))
test$fe25<-rowMeans(as.matrix(alfa))
test$fe26<-rowSds(as.matrix(alfa))
rm(alfa)
alfa<-test %>%
select(v98, v100, v39)
test$fe27<-rowSums(as.matrix(alfa))
test$fe28<-rowMeans(as.matrix(alfa))
test$fe29<-rowSds(as.matrix(alfa))
rm(alfa)
test$fe30<-test$v50*test$v56
test$fe31<-test$v50*test$v71
test$fe32<-test$v66*test$v66
test$fe33<-test$v50*test$v50
test$fe34<-test$v50*test$v52
test$fe35<-test$v50*test$v62
test$fe36<-test$v47*test$v47 |
6e738f256ba1dea310434ac2e67d026e80dd9614 | 3b6d3f3a17580fc270707254e7bfc823bc86ae06 | /cachematrix.R | 17a29ef4c2ecbf1da6db80d8885dd28e26b24506 | [] | no_license | abaldinelli/ProgrammingAssignment2 | 00f511d4595d9809a515093be61c48f409ff75ee | 7974e714832a556354d07d799159beb98243a2df | refs/heads/master | 2022-09-16T10:11:03.125355 | 2020-05-26T18:54:45 | 2020-05-26T18:54:45 | 267,109,814 | 0 | 0 | null | 2020-05-26T17:36:18 | 2020-05-26T17:36:17 | null | UTF-8 | R | false | false | 1,894 | r | cachematrix.R | ## This file defines a set of functions for creating and manage a special kind of
## matrix that stores its inverse internally. There is some code for testing
## proposals.
##
## This function creates an special type of matrix to cache internally its inverse.
## This type is composed by 4 functions:
## - set: sets a matrix to be stored;
## - get: returns the original matrix;
## - getInverse: returns the storedInverse value;
## - setInverse: sets the storedInverse value, and clean the cached data;
##
makeCacheMatrix = function(x = matrix()) {
storedInverse <<- NULL
list(set = function(matrixToSet) {
x <<- matrixToSet
storedInverse <<- NULL
},
get = function() x,
setInverse = function(inverse) storedInverse <<- inverse,
getInverse = function() storedInverse)
}
##
## This function returns the inverse of the given special matrix (created using previously defined makeCacheMatrix).
## - If the inverse has been already calculated, it is returned from cache;
## - In case cache is not available, then inverse of the given matrix is calculated and stored in the internal cache
## using the function setInverse.
##
cacheSolve <- function(x, ...)
{
cachedData = x$getInverse()
if(!is.null(cachedData))
{
message("Reading from cache...")
}
else
{
message("Calculating...")
cachedData = solve(x$get(),...)
x$setInverse(cachedData)
}
cachedData
}
##########################################
## Code for testing proposed functions
##########################################
message ("Testing...")
message ("Making New Matrix")
matrix <- makeCacheMatrix(matrix(1:4, 2))
cacheSolve(matrix) # must calculate
cacheSolve(matrix) # must call cached data
message ("Making New")
matrix$set(matrix(5:8, 2))
cacheSolve(matrix) # must calculate
cacheSolve(matrix) # must call cached data
message ("End Testing") |
89386a987fa93008e695bf6da3594a5f1d3bf235 | 5ada63667fdfb87eaff4a087e70c6f5a4267ea74 | /R/OutcomeImputationCOXINVERSION.R | 46fd6e75018b44a38d705826840b8c968c8e2c48 | [] | no_license | lbeesleyBIOSTAT/MultiCure | d3a00f23e40c87b4e984e2e315b50ee0a9226056 | f33c994ce2aa2565f0163c9a559fccad2ab277ab | refs/heads/master | 2022-02-03T21:46:32.511343 | 2019-07-08T16:10:44 | 2019-07-08T16:10:44 | 103,183,325 | 3 | 2 | null | 2017-09-11T20:34:18 | 2017-09-11T20:14:41 | null | UTF-8 | R | false | false | 6,586 | r | OutcomeImputationCOXINVERSION.R |
#' UNEQUALCENSIMPUTECOXINVERSION
#' @description The function UNEQUALCENSIMPUTECOXINVERSION will perform an imputation algorithm to handle unequal follow-up for recurrence and death. This function can be applied when we assume COX baseline hazards. This function performs imputation through inverting the survival function of the target distribution.
#' @param datWIDE defined as in MultiCure
#' @param beta A vector containing the most recent estimates of beta
#' @param alpha A vector containing the most recent estimates of alpha
#' @param ImputeDat This is a list with the following elements:
#' \itemize{
#' \item UnequalCens: A vector taking value 1 if the subject has unequal follow-up. Note: If subject is assumed cured in datWIDE, they are listed as UnequalCens = 0.
#' \item CovMissing: A matrix indicating which elements of Cov are missing. Not needed for this imputation.
#' \item CovImp: A list containing a single imputation of Cov
#' \item GImp: A vector with a recent single imputation of G
#' \item YRImp: A vector with a recent single imputation of Y_R
#' \item deltaRImp: A vector with a recent single imputation of delta_R
#' \item y: The integral of the target kernel over Yr0 to Yd
#' \item Basehaz13: A matrix containing the estimate of the baseline hazard function for the 1->3 transition specified intervals
#' \item Basehaz24: A matrix containing the estimate of the baseline hazard function for the 2->4 transition specified intervals
#' \item Basehaz14: A matrix containing the estimate of the baseline hazard function for the 1->4 transition specified intervals
#' \item Basehaz34: A matrix containing the estimate of the baseline hazard function for the 3->4 transition specified intervals
#' }
#' @param TransCov defined as in MultiCure
#'
#' @return a list containing
#' \itemize{
#' \item [[1]]: deltaRImp, A single imputation of delta_R
#' \item [[2]]: YRImp, A single imputation of Y_R
#'}
#' @export
UNEQUALCENSIMPUTECOXINVERSION = function(datWIDE, beta, alpha, ImputeDat, TransCov){
##################
### Initialize ###
##################
UnequalCens = ImputeDat[[1]]
CovImp = as.data.frame(ImputeDat[[3]])
GImp = ImputeDat[[4]]
YRImp = ImputeDat[[5]]
deltaRImp = ImputeDat[[6]]
y = ImputeDat[[7]]
Basehaz13 = ImputeDat[[8]]
Basehaz24 = ImputeDat[[9]]
Basehaz14 = ImputeDat[[10]]
Basehaz34 = ImputeDat[[11]]
Nobs = length(datWIDE[,1])
A1 = length(TransCov$Trans13)
A2 = length(TransCov$Trans24)
A3 = length(TransCov$Trans14)
A4 = length(TransCov$Trans34)
TRANS = c(rep(1,A1), rep(2,A2), rep(3,A3), rep(4,A4))
XB_beta13 = as.numeric(beta[TRANS==1] %*% t(cbind(CovImp[,TransCov$Trans13])))
XB_beta24 = as.numeric(beta[TRANS==2] %*% t(cbind(CovImp[,TransCov$Trans24])))
XB_beta14 = as.numeric(beta[TRANS==3] %*% t(cbind(CovImp[,TransCov$Trans14])))
XB_beta34 = as.numeric(beta[TRANS==4] %*% t(cbind(CovImp[,TransCov$Trans34])))
BasehazFun_13 = stepfun(x= Basehaz13[,2], y = c(Basehaz13[,3],0), right = F)
BasehazFun_24 = stepfun(x= Basehaz24[,2], y = c(Basehaz24[,3],0), right = F)
BasehazFun_14 = stepfun(x= Basehaz14[,2], y = c(Basehaz14[,3],0), right = F)
BasehazFun_34 = stepfun(x= Basehaz34[,2], y = c(Basehaz34[,3],0), right = F)
S1_D = exp(-as.numeric(sapply(datWIDE$Y_D,Baseline_Hazard, Basehaz13))*exp(XB_beta13))*
exp(-as.numeric(sapply(datWIDE$Y_D,Baseline_Hazard, Basehaz14))*exp(XB_beta14))
h14_D = BasehazFun_14(datWIDE$Y_D)*exp(XB_beta14)
TAU = max(datWIDE$Y_R[datWIDE$delta_R==1])
YRImp = ifelse(GImp==0,datWIDE$Y_D, ifelse(GImp==1 & UnequalCens == 0,datWIDE$Y_R,rep(NA,Nobs) ))
deltaRImp = ifelse(GImp==0,rep(0,Nobs), ifelse(GImp==1 & UnequalCens == 0,datWIDE$delta_R,rep(NA,Nobs) ))
######################
### Impute Delta R ###
######################
num = y
denom = (h14_D^datWIDE$delta_D)*S1_D
ratio = ifelse(num==0,num,num/(num + denom)) [GImp==1 & UnequalCens == 1]
deltaRImp[GImp==1 & UnequalCens == 1] = apply(matrix(ratio), 1,mSample)
YRImp[GImp==1 & UnequalCens == 1 & deltaRImp==0] = datWIDE$Y_D[GImp==1 & UnequalCens == 1 & deltaRImp==0]
INDICES = which(is.na(YRImp))
########################
### Define Functions ###
########################
if('T_R' %in% TransCov$Trans34){
fdCOX<-function(v, m){
XB_beta34MOD = as.numeric(beta[TRANS==4][TransCov$Trans34!= 'T_R'] %*% t(cbind(CovImp[[i]][m,TransCov$Trans34[TransCov$Trans34!='T_R']])))
XB_beta34MOD = XB_beta34MOD + as.numeric(beta[TRANS==4][TransCov$Trans34== 'T_R'] %*% t(cbind(v)))
Cumhazard13_temp = exp(XB_beta13[m])*as.numeric(Baseline_Hazard(v, Basehaz13 ))
Cumhazard14_temp = exp(XB_beta14[m])*as.numeric(Baseline_Hazard(v, Basehaz14 ))
Cumhazard34_temp = exp(XB_beta34MOD)*as.numeric(Baseline_Hazard(datWIDE$Y_D[m]-v, Basehaz34) )
Surv1_temp = exp(-Cumhazard13_temp-Cumhazard14_temp)
Surv3_temp = exp(-Cumhazard34_temp)
hazard13_temp = exp(XB_beta13[m])*BasehazFun_13(v)
hazard34_temp = ifelse(v == datWIDE$Y_D[m],0,exp(XB_beta34MOD)*BasehazFun_34(datWIDE$Y_D[m]-v))
return(hazard13_temp*Surv1_temp* Surv3_temp*((hazard34_temp)^datWIDE$delta_D[m]))
}
}else{
fdCOX<-function(v, m){
Cumhazard13_temp = exp(XB_beta13[m])*as.numeric(Baseline_Hazard(v, Basehaz13 ))
Cumhazard14_temp = exp(XB_beta14[m])*as.numeric(Baseline_Hazard(v, Basehaz14 ))
Cumhazard34_temp = exp(XB_beta34[m])*as.numeric(Baseline_Hazard(datWIDE$Y_D[m]-v, Basehaz34) )
Surv1_temp = exp(-Cumhazard13_temp-Cumhazard14_temp)
Surv3_temp = exp(-Cumhazard34_temp)
hazard13_temp = exp(XB_beta13[m])*BasehazFun_13(v)
hazard34_temp = ifelse(v == datWIDE$Y_D[m],0,exp(XB_beta34[m])*BasehazFun_34(datWIDE$Y_D[m]-v))
return(hazard13_temp*Surv1_temp* Surv3_temp*((hazard34_temp)^datWIDE$delta_D[m]))
}
}
DrawVAL = function(TIME, U, m){
g=cubature::adaptIntegrate(Vectorize(fdCOX), lowerLimit = TIME, upperLimit = datWIDE$Y_D[m],m, maxEval=10)
ZERO=(g$integral/y[m])-U
return(ZERO)
}
##################
### Impute T_R ### (By inverting the survival function of T_R)
##################
DrawVALWRAPPER = function(s){
m = INDICES[s]
U1 = runif(n=1, min = 0, max = 1)
draw = stats::uniroot(DrawVAL, interval = c(datWIDE$Y_R[m], datWIDE$Y_D[m]),U1, m, tol = 0.01, maxiter = 20)$root
if(draw >= datWIDE$Y_D[m] ){draw = datWIDE$Y_D[m] - (datWIDE$Y_D[m]/1000)}
if(draw <= datWIDE$Y_R[m] ){draw = datWIDE$Y_R[m] + (datWIDE$Y_R[m]/1000)}
#print(m)
return(draw)
}
DRAWS = sapply(as.numeric(c(1:length(INDICES))), DrawVALWRAPPER)
YRImp[INDICES] = DRAWS
return(list(deltaRImp, YRImp))
}
|
d83345e095f21f4db0818d1508dd44c56b2f2717 | 217cc788dbe051e6921a7376dc830d91b7189488 | /man/cronjob-package.Rd | 7ea24ef3fd66b1de1b5e4403c21b29b8ebf16bfc | [
"MIT"
] | permissive | mkearney/cronjob | b1503b12544459d961f6c901c70e007e27afc33d | c9e1c36d62375545017d010074eef200fd4783bf | refs/heads/master | 2022-04-09T21:34:39.022011 | 2020-03-29T01:30:56 | 2020-03-29T01:30:56 | 250,391,631 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 572 | rd | cronjob-package.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cronjob-package.R
\docType{package}
\name{cronjob-package}
\alias{cronjob}
\alias{cronjob-package}
\title{cronjob: Manage Cron Jobs}
\description{
Create and manage crontabs.
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/mkearney/cronjob}
\item Report bugs at \url{https://github.com/mkearney/cronjob/issues}
}
}
\author{
\strong{Maintainer}: Michael W. Kearney \email{kearneymw@missouri.edu} (\href{https://orcid.org/0000-0002-0730-4694}{ORCID})
}
\keyword{internal}
|
c5e0a88f9599bd03b7a6db7e4e27e7216fbe4816 | 378cd513fc4083b2b66fd7fcff2597081341bdb7 | /man/chickadees.Rd | a55c896ef8707a904cded3177e746e5949d19148 | [] | no_license | ChristinaMasco/song | f303f12b663c05cb3f6d9b16c6ec42820600a5c3 | 0a68a626b1b9be706ceb758aaf0984b406105eb6 | refs/heads/master | 2021-01-18T11:11:12.716513 | 2016-07-11T16:42:13 | 2016-07-11T16:42:13 | 31,434,988 | 2 | 1 | null | 2015-02-27T19:16:57 | 2015-02-27T19:16:56 | R | UTF-8 | R | false | false | 814 | rd | chickadees.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/song.R
\docType{data}
\name{chickadees}
\alias{chickadees}
\title{An interaction between a black-capped chickadee and a playback stimulus.}
\format{A data frame with 50 rows and 3 variables}
\source{
Baker TM, Wilson DR, Mennill DJ. 2012. Vocal signals predict attack
during aggressive interactions in black-capped chickadees. Anim Behav. 84:
965-974.
}
\usage{
chickadees
}
\description{
A data set containing the start and end times of each song produced during
an interaction between a black-capped chickadee and a fixed-interval
playback stimulus. These data are from a study by Baker, Wilson and Mennill
(2012) designed to evaluate whether song overlap predicts the attack of a
rival in this species.
}
\keyword{datasets}
|
b6e2920f20244fadae6ad7e7ecad4bcd7fe5e0dd | 8fcee8357f48413bd2977542fed0b3eb6c353602 | /OCC5.R | a968219f0a9111f583329cc2a444fa4a28e29f6b | [] | no_license | HobsonB/occ_sim | 0c2dfefa59d0dded53400869aa476a772da08087 | a0f19c57186d2d947f605cb96090687f98abaa3a | refs/heads/master | 2022-11-22T01:31:40.545921 | 2020-07-22T16:15:30 | 2020-07-22T16:15:30 | 281,723,189 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,985 | r | OCC5.R | dev.off()
rm(list=ls())
cat("\014")
#----Occupant-Centric Controls Function 5: Preferred Illuminance Setpoint----
# Function for extracting information relating to occupant-centric controls based on:
# H. B. Gunay, W. O'Brien, I. Beausoleil-Morrison, and S. Gilani,
# "Development and implementation of an adaptive lighting and blinds control algorithm,"
# Build. Environ., vol. 113, pp. 185-199, 2017.
# Part of IEA EBC Annex 79 Subtask 4 Activity 2.2.1
# Author(s): Brodie W. Hobson
# Load or install required packages
if ("lubridate" %in% rownames(installed.packages()) == FALSE) {install.packages("lubridate")}
require(lubridate)
if ("DescTools" %in% rownames(installed.packages()) == FALSE) {install.packages("DescTools")}
require(DescTools)
# Specify the file path of .csv file, must have \\ between levels of directory
file_path <- "roomData.csv"
# Specify date format used in .csv
date_format = "%Y-%m-%d %H:%M"
# Specify the confidence for the upper and lower occupancy profiles
confidence <- 0.9
#----Read in data----
# Pass file path information to variable
# Data should be a .csv with dates in the first column followed by light on/off, lux, and motion data for each room in order
# The date data should be in a single, continuous timestep applicable to data
# The column names of the should reflect the room number, or have some identifying information
# Read in data from the specified file path
temp <- read.csv(file = file_path)
# Convert date format to POSIXct
dateStr <- as.POSIXct(temp[,1], format = date_format, tz = "GMT")
# Isolate light keypress, motion detector, and lux readings
dat_light <- temp[,grep('light',colnames(temp))]
dat_md <- temp[,grep('md',colnames(temp))]
dat_lux <- temp[,grep('lux',colnames(temp))]
# Find the timestep of the data for calculation purposes, first and second timesteps must be representative of whole time series
timestep_min <- as.numeric(difftime(dateStr[2],dateStr[1]))
# Create blank vectors to store parameters
threshold <- vector()
keypress_on <- vector()
mdl_fit <- list()
# Create a loop to find the occupied period of each room
for (n in 1:ncol(dat_lux)){
# Create a temporary data frame to store the light keypress, motion detector, and lux readings from a single office for analysis
dat <- cbind.data.frame(dateStr, dat_md[,n], dat_light[,n], dat_lux[,n])
colnames(dat) <- c("Time", colnames(dat_md[n]), colnames(dat_light[n]), colnames(dat_lux[n]))
# Find the lux readings at the timesteps when the light are on and the room is occupied
luxOn <- dat[which(wday(dat[,1]) %in% c(2:6) & dat[,2]==1 & c(dat[-1,3],dat[1,3])==1),4]
# Create a dataframe with the light on (1) for the y-axis and the lux levels
luxOn <- cbind.data.frame(luxOn, rep(1, length(luxOn)))
# Name the columns so they can be combined vertically
colnames(luxOn) <- c("lux","binary")
# Find the lux readings when the lights are off and the room is occupied
luxOff <- dat[which(wday(dat[,1]) %in% c(2:6) & dat[,2]==1 & c(dat[-1,3],dat[1,3])==0 & dat[,4]!=0),4]
# Create a dataframe with the light off (0) for the y-axis and the lux levels
luxOff <- cbind.data.frame(luxOff, rep(0, length(luxOff)))
# Name the columns so they can be combined vertically
colnames(luxOff) <- c("lux","binary")
# Combine the columns vertically so they can be plotted
temp_dat <- rbind.data.frame(luxOn,luxOff)
# Create a binomial logistic regression model using the data
glm_mdl <- glm(binary~., family = binomial(link='logit'), data = temp_dat)
# Fit the data to the logistic regression model to get the shape of the curve
mdl_fit[[length(mdl_fit)+1]] <- predict(glm_mdl, newdata = data.frame(lux = c(0:1000)), type = "response")
keypress_on[length(keypress_on)+1] <- round(nrow(luxOn)/sum(nrow(luxOn)+nrow(luxOff)), digits = 2)
# Find the quantile for lux measurements when light is turned on if always above the confidence threshold
if(is.na(mdl_fit[which(mdl_fit[,2] >= confidence)[1],1])){
threshold[length(threshold)+1] <- RoundTo(which(unlist(mdl_fit[n]) <= 0.1)[1]-1, multiple = 10, FUN = 'round')
}else{
threshold[length(threshold)+1] <- RoundTo(mdl_fit[which(mdl_fit[,2] >= confidence)[1],1], multiple = 10, FUN = round)
}
}
# Store results from binomial logistic regression model for plotting
light_glm_forPlot <- as.data.frame(matrix(unlist(mdl_fit), ncol = ncol(dat_lux), byrow = F))
colnames(light_glm_forPlot) <- colnames(dat_lux)
write.csv(light_glm_forPlot,"light_glm.csv", row.names = F)
# Bind together results from each room into a single dataframe
results <- rbind.data.frame(threshold,keypress_on)
rownames(results) <- c("Light Off","Keypresses")
colnames(results) <- colnames(dat_light)
# Output a .csv containing lighting thresholds for each office
write.csv(results, "OCC5.csv") |
4fd7bba50b0ea5850761e6d59e708871d88bbe92 | 2432e21f46897965e9feac59e0d05bd3964e7c03 | /classification.multiple.ho.R | 0512c9a7500069d991b39e40622a576bd26e2e7f | [] | no_license | xime377/UAV-classification | 89b4bcacef49863bc13e1d8a65cc89b5c633cb39 | e300fefd1c4fdca27340c713a14b70d5f691649b | refs/heads/master | 2020-05-18T12:59:01.799268 | 2020-03-04T16:30:39 | 2020-03-04T16:30:39 | 24,793,989 | 1 | 4 | null | null | null | null | UTF-8 | R | false | false | 3,293 | r | classification.multiple.ho.R | ######## Function to perform multiple classification algorithms using caret ######
classification.multiple.ho <- function(data, #dataset
train.ind,
fmla, #formula
k, #number of folds
times, #number of resamples
methods, #chaclassifier,
tuneLength, #number of parameters to test for training the model
print.report, #True or False
output.name)
{
library(caret)
library(knitr)
library(ProjectTemplate)
data=get_all_vars(fmla,data)
####### CREATE SETS
train=data[train.ind,]
test=data[-train.ind,]
####### CREATE FOLDS FOR TRAINING THE CLASSIFIER
MyFolds.ho <- createMultiFolds(train[,1],k=k,times=times)
########### TRAIN MODELS
MyControl.ho <- trainControl(method = "repeatedCV", index = MyFolds.ho)
models.ho=list()
for (i in 1:length(methods)){
models.ho[[i]] <- train(fmla,train,method=methods[i],
trControl=MyControl.ho,tuneLength=tuneLength) #with cross-validation
}
names(models.ho)=methods
########## OVERVIEW OF RESULTS
#######hold out
newdata=predict(models.ho,test)
conf.mat.ho=vector("list", length(models.ho))
for (i in 1:length(models.ho)){
var=eval(parse(text=paste0("test$",as.character(fmla[[2]]))))
conf.mat.ho[[i]]=confusionMatrix(data = newdata[[i]],var)
}
names(conf.mat.ho)=names(newdata)
#######aggregate results in a matrix
results=matrix(NA,nrow=2+(2*length(levels(data[,1]))),ncol=length(models.ho))
colnames(results)=names(models.ho)
rownames(results)=c("Overall_Acc","Kappa",paste("Prod_Acc",gsub("Class: ","",names(conf.mat.ho[[1]]$byClass[,1])),sep="."),
paste("User_Acc",gsub("Class: ","",names(conf.mat.ho[[1]]$byClass[,1])),sep="."))
for (z in 1:length(models.ho)){
results[1,z]=conf.mat.ho[[z]]$overall[1]#OA
results[2,z]=conf.mat.ho[[z]]$overall[2]#Kappa
results[3:(2+length(levels(data[,1]))),z]=conf.mat.ho[[z]]$byClass[,1]#PA
results[(3+length(levels(data[,1]))):dim(results)[1],z]=conf.mat.ho[[z]]$byClass[,3]#PA
}
if (print.report==TRUE){
mainDir <- "./6_Classification/reports"
subDir <- output.name
dir.create(file.path(mainDir, subDir))
knit("../lib/report_classification_ho_multiple.Rmd",output=paste0(file.path(
mainDir, subDir),"/report_ho"),envir=new.env())
knit2html(paste0(file.path(mainDir, subDir),"/report_ho"))
file.rename("report_ho.html", paste0(file.path(mainDir, subDir),"/report_ho.html"))
file.rename("report_ho.txt", paste0(file.path(mainDir, subDir),"/report_ho.txt"))
write.table(results,paste0(file.path(mainDir, subDir),"/results_ho.csv"),sep=",")
}
results.ho=list(conf.mat=conf.mat.ho,models=models.ho,fmla=fmla,times=times,k=k,
data=data,results=results)
save(results.ho,file=paste0(file.path(mainDir, subDir),"/results_ho.Rdata"))
return(results.ho)
} |
600fbd04a2374517e89605bf502cf0c2e204c78b | b3024e8047b5fc811fbb69ca0f651e1a0a43fd18 | /man/dnsaf.Rd | eb20b80be45c12bde9d1078dc4b4651d0f846e4f | [
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ed-lau/calcDNSAF | 83733539506546d517942a3a68ce9b247233d953 | 06d73662eb1d9cadf021ceb8667a2dccdb1d0a1d | refs/heads/master | 2020-03-30T02:42:42.045692 | 2019-09-07T17:09:28 | 2019-09-07T17:09:28 | 150,645,269 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 992 | rd | dnsaf.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dnsaf_functions.R
\name{dnsaf}
\alias{dnsaf}
\title{Calculate dNSAF}
\usage{
dnsaf(percolatorFile, fasta, test_run = F, q_cutoff = 0.01,
nonUniqueDummyCount = 0.5)
}
\arguments{
\item{percolatorFile}{a tibble read from the percolator target psms file}
\item{fasta}{a Biostrings AA string set containing a proteomics database used in the search}
\item{test_run}{Whether to do a short run of only 100 search results? Defaults to FALSE}
\item{q_cutoff}{Percolator q vsalue cutoff to consider as confident peptide. Defaults to 0.01}
\item{nonUniqueDummyCount}{Unique counts to give to a protein without unique peptide. Defaults to 0.5}
}
\description{
This function calculates distributed normalized spectral abundance factor from a percolator target psms file
}
\examples{
calcDNSAF::dnsaf(percolatorFile = percolatorFile, fasta = Mm_UniProt, test_run=T, q_cutoff=0.01)
}
\keyword{dNSAF}
\keyword{proteomics}
|
24961dcfa80048d6fe67bfaf6cb2466d4077a0e3 | 3b0be5721a5478b1bac4e6b08cdcd1b88e3a4046 | /inst/snippets/Figure4.12.R | 3d934d2671db8fd62ad3ea9e47991cc312e9207a | [] | no_license | stacyderuiter/Lock5withR | b7d227e5687bc59164b9e14de1c8461cb7861b14 | 417db714078dc8eaf91c3c74001b88f56f09b562 | refs/heads/master | 2020-04-06T06:33:39.228231 | 2015-05-27T11:41:42 | 2015-05-27T11:41:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 157 | r | Figure4.12.R | RandomizationDist <- do(1000) * rflip(10, .5) # 10 because n=10
head(RandomizationDist)
histogram(~prop, label=TRUE, width = 1/10, data=RandomizationDist)
|
3693510fab9e30dfaaa7912e83d5a4a935555829 | e2a5cdf2dcbd788ac7c091897b5a027a809c302a | /man/winterTemperatures.Rd | f8840f4ed012854d67337e1d905b8328be6d24bd | [] | no_license | lindbrook/cholera | 3d20a0b76f9f347d7df3eae158bc8a357639d607 | 71daf0de6bb3fbf7b5383ddd187d67e4916cdc51 | refs/heads/master | 2023-09-01T01:44:16.249497 | 2023-09-01T00:32:33 | 2023-09-01T00:32:33 | 67,840,885 | 138 | 13 | null | 2023-09-14T21:36:08 | 2016-09-10T00:19:31 | R | UTF-8 | R | false | true | 338 | rd | winterTemperatures.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/winterTemperatures.R
\name{winterTemperatures}
\alias{winterTemperatures}
\title{Average Winter Temperatures.}
\usage{
winterTemperatures()
}
\description{
Gareth Stedman Jones Appendix 2, Table 12, p.384.
}
\examples{
plot(winterTemperatures(), "1859-6-1")
}
|
b78964096d36793d8113adb3809db8c7a1e01bce | fe6081add8edaa493c70d5128dc282e32a862f44 | /plot2.R | 390e1c068013f1bce97ca0658bbc93a1e8b287df | [] | no_license | robinmd/ExData_Plotting1 | 60606842da691305655237c3b2455be0a28139aa | 790e2132729483aeed8694d881ecce55cbf53182 | refs/heads/master | 2021-01-18T10:37:23.710404 | 2015-08-09T09:14:47 | 2015-08-09T09:14:47 | 40,286,561 | 0 | 0 | null | 2015-08-06T05:35:30 | 2015-08-06T05:35:28 | null | UTF-8 | R | false | false | 1,064 | r | plot2.R | # Generate Plot2
# Exploratory Data Analysis
# Project 1
#
# download data, unzip, load data and generate Plot2
#
if(!file.exists("./data")){
dir.create("./data")
}
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
fileD <- "./data/household_power_consumption.zip"
if(!file.exists(fileD)){
download.file(fileUrl,destfile=fileD,mode="wb")
}
unzip(fileD, exdir="./data")
hpc <- read.csv2("./data/household_power_consumption.txt",
na.strings = "?")
hpc <- transform(hpc, Global_active_power = as.numeric(Global_active_power),
DateTime = strptime(paste(Date,Time), "%d/%m/%Y %H:%M:%S")
)
hpc <- transform(hpc,gapkw = Global_active_power/1000 )
hpcFeb <- subset(hpc, DateTime >= strptime("01/02/2007","%d/%m/%Y") &
DateTime < strptime("03/02/2007","%d/%m/%Y"))
png("plot2.png",width = 480, height = 480)
plot(x=hpcFeb$DateTime,y=hpcFeb$gapkw,
type='l',
xlab="",
ylab="Global Active Power (kilowatts)")
dev.off()
|
48604c8a16406db47c674f207b641816a7cbc31c | 56053ecb70a673c879d4c4293c21eb18380f508b | /R/LOB_meanDB.R | 5aac7e2dcb2a029ebc04b0e182d2b11bd9a1c98c | [] | no_license | hholm/LOB_tools | e47e945303fad328e834e91c0e623fed0036d5f6 | 785d79c80c83777c08eb8c19de96766459a50dc4 | refs/heads/master | 2023-06-22T05:16:28.332620 | 2023-06-08T19:02:02 | 2023-06-08T19:02:02 | 172,795,196 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,107 | r | LOB_meanDB.R |
### Double Bond Average Computation ###
LOB_meanDB <- function(LOBpeaklist, samples, class = NULL){
#Cut down to certian classes if desired
if(!is.null(class)){
run <- LOBpeaklist[which(LOBpeaklist$species %in% class),]
}else{
run <- LOBpeaklist
}
done <- data.frame()
for (j in 1:length(unique(run$species))){
# Get rows of certain class, Get sample columns that are samples
run_samps <- LOBpeaklist[which(LOBpeaklist$species==unique(run$species)[j]),samples]
# Make a matrix of percents
run_samps <- t(t(run_samps)/rowSums(t(run_samps)))
run_samps <- as.data.frame(run_samps)
### Add the db values back in.
run_samps$FA_total_no_DB <- LOBpeaklist[which(LOBpeaklist$species==unique(run$species)[j]),"FA_total_no_DB"]
### Now a for loop to sum our percents with same DBs
### First make a storage frame
storage<- data.frame(matrix(nrow=(ncol(run_samps)), ncol=16))
for (i in 0:15) {
summed<- as.data.frame(colSums(run_samps[which(run_samps[,"FA_total_no_DB"]==i),]))
summed["FA_total_no_DB",]<-i
colnames(summed)<- i
storage[,i+1] <-summed
colnames(storage)[i+1] <- i
row.names(storage) <- row.names(summed)
}
###add a column of 0s for the averages
storage$dbaverage <- rep(0)
###calculate the weighted average for each sample (applyed by row)
for(y in 1:nrow(storage)-1){
av<-(storage[y,"0"]*0)+(storage[y,"1"]*1)+(storage[y,"2"]*2)+(storage[y,"3"]*3)+(storage[y,"4"]*4)+(storage[y,"5"]*5)+(storage[y,"6"]*6)+(storage[y,"7"]*7)+(storage[y,"8"]*8)+(storage[y,"9"]*9)+(storage[y,"10"]*10)+(storage[y,"11"]*11)+(storage[y,"12"]*12)+(storage[y,"13"]*13)+(storage[y,"14"]*14)+(storage[y,"15"]*15)
###store the average
storage[["dbaverage"]][y] <- av
}
if(j==1){
done <- data.frame(storage$dbaverage)
colnames(done) <- paste(unique(run$species)[j],"_db_mean",sep = "")
}else{
out <- data.frame(storage$dbaverage)
colnames(out) <- paste(unique(run$species)[j],"_db_mean",sep = "")
done <- cbind(done,out)
}
}
rownames(done) <- rownames(storage)
return(done)
}
|
8e0d5498d41995af0ceba5ce8ba5dc6ef77fa3e3 | 42c520e9b518197bbfbf937551888501ef993329 | /9-2 구글핍즈.R | df6f0496564c7a41136b36a14f036ef9fca06f16 | [] | no_license | Lee-jun-young98/Data_visualization | b8d945c4a2a4d5b22471f9bb5aee4f7ccfbb2861 | 77fef70b88f36d75f32e91af4df1c86310d2837c | refs/heads/main | 2023-06-26T05:23:38.188186 | 2021-07-25T03:38:11 | 2021-07-25T03:38:11 | 389,252,584 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,373 | r | 9-2 구글핍즈.R | library(tidyverse)
library(googleVis)
setwd("C://R_data")
##서울 지하철 1-4호선 이용승객현황
line <- read.csv("1-4호선승하차승객수.csv", header=T, sep=",")
line
##열이름 영문으로 변경
colnames(line) <- c("line_no", "time", "out", "in")
line
##google 모션 차트 찍기
t1 <- gvisMotionChart(line,idvar="line_no",timevar="time",
options=list(width=1000, height=500))
plot(t1)
##구글맵을 이용한 이동경로 표시
data(Andrew)
##gvisMap(데이터, 위치정보, 맵에 표시할 텍스트)
storm1 <- gvisMap(Andrew, "LatLong", "Tip")
plot(storm1)
##구글 Gauge 차트로 실적표시하기
CityPopularity
ex1 <- gvisGauge(CityPopularity)
plot(ex1)
##구글 Gauge차트 실적 표시 심화
ex2 <- gvisGauge(CityPopularity, options=list(min=0, max=800, #최소 최대값 설정
greenFrom=500, greenTo=800, #색영역 지정
yellowFrom=300, yellowTo=500,
redFrom=0, redTo=300,
width=400, height=300)) #프레임 넓이,높이 설정
plot(ex2)
##구글 파이차트
CityPopularity
pie1 <- gvisPieChart(CityPopularity, options=list(width=400, height=300))
plot(pie1)
|
c38b1f6c74211d78ef5cd9f145e4330eb98d411e | c723b14038ea8628ceb74ce04e291c281cc976d4 | /man/reg_coef.Rd | 14522920fd94957d20ab30deac2d660adb4ecff7 | [
"MIT"
] | permissive | hokerl/ltxtab | a14b12dc3951789aa4e7f6a6fddb28267f56a1c0 | 483d2678d533b78f8620f347859572ea257e9717 | refs/heads/master | 2021-10-19T08:29:44.423944 | 2019-02-19T14:48:27 | 2019-02-19T14:48:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 508 | rd | reg_coef.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reg_coef.R
\name{reg_coef}
\alias{reg_coef}
\title{Get regression coefficients and significance stars}
\usage{
reg_coef(reg, vcov = NULL, sig = c(0.1, 0.05, 0.01))
}
\arguments{
\item{reg}{regression object}
\item{vcov}{covariance matrix, defaults to OLS}
\item{sig}{cutoff values for significance stars}
}
\value{
data frame with coefficients and statistics
}
\description{
Get regression coefficients and significance stars
}
|
1067d7daf2d3ede064f38de8dc4247660999cd64 | 331b734a213e6d2b8038ed90797d1440de1b1094 | /src/multiple_covariate_correction.R | ecc382034ff9cf30f50fa4c0c52ea612b72e9ae8 | [] | no_license | leekgroup/networks_correction | b51d0fd36d91e3206a1b54008899561501f0d320 | ef24a718c86bd9c4ad5b9b15e2f64703bc3ffc51 | refs/heads/master | 2021-10-28T07:35:46.170680 | 2019-04-22T16:58:08 | 2019-04-22T16:58:08 | 106,853,810 | 15 | 4 | null | null | null | null | UTF-8 | R | false | false | 2,340 | r | multiple_covariate_correction.R | # load libraries
library(dplyr)
library(recount)
# load data
inputargs <- commandArgs(TRUE)
exp.fn <- inputargs[1]
# exp.fn <- "/work-zfs/abattle4/parsana/networks_correction/data/raw_subset.Rdata"
tiss.pve.fn <- inputargs[2]
# tiss.pve.fn <- "/work-zfs/abattle4/parsana/networks_correction/results/tissue_pve.Rds"
load(exp.fn)
tiss.pve.list <- readRDS(tiss.pve.fn)
save.fn <- inputargs[3]
# save.fn <- "/work-zfs/abattle4/parsana/networks_correction/data/mc_corrected.Rdata"
pve.plot <- tiss.pve.list$pve_plot
tss.rss <- tiss.pve.list$tss_rss
# variables to be used for multiple correction in each tissue
sub.var <- pve.plot %>% filter(.id == "Subcutaneous") %>%
filter(!variable %in% tss.rss$Subcutaneous$remove) %>%
filter(value >= 0.01) %>% select(variable)
lung.var <- pve.plot %>% filter(.id == "Lung") %>%
filter(!variable %in% tss.rss$Lung$remove) %>%
filter(value >= 0.01) %>% select(variable)
muscle.var <- pve.plot %>% filter(.id == "Muscle") %>%
filter(!variable %in% tss.rss$Muscle$remove) %>%
filter(value >= 0.01) %>% select(variable)
thyroid.var <- pve.plot %>% filter(.id == "Thyroid") %>%
filter(!variable %in% tss.rss$Thyroid$remove) %>%
filter(value >= 0.01) %>% select(variable)
blood.var <- pve.plot %>% filter(.id == "Blood") %>%
filter(!variable %in% tss.rss$Blood$remove) %>%
filter(value >= 0.01) %>% select(variable)
artery.var <- pve.plot %>% filter(.id == "Artery_tibial") %>%
filter(!variable %in% tss.rss$Artery_Tibial$remove) %>%
filter(value >= 0.01)
nerve.var <- pve.plot %>% filter(.id == "Nerve_tibial") %>%
filter(!variable %in% tss.rss$Nerve_Tibial$remove) %>%
filter(value >= 0.01)
skin.var <- pve.plot %>% filter(.id == "Skin") %>%
filter(!variable %in% tss.rss$Skin$remove) %>%
filter(value >= 0.01)
var.regress <- list(Subcutaneous = sub.var, Lung = lung.var, Thyroid = thyroid.var, Muscle = muscle.var, Blood = blood.var, Artery_tibial = artery.var,
Nerve_tibial = nerve.var, Skin = skin.var)
# regressed data
gtex.rse.multicorr <- mapply(function(x,y){
cov <- x@colData
cov <- cov[,which(colnames(cov) %in% y$variable)]
exp.dat <- SummarizedExperiment::assay(x, 1)
dat.corrected <- lm(t(exp.dat)~., data = cov)$residual
SummarizedExperiment::assay(x, 1) <- t(dat.corrected)
x
}, dat.expr, var.regress)
dat.expr <- gtex.rse.multicorr
save(dat.expr, file = save.fn)
|
b87147153b3fa71b92649140796dc28246b471a4 | b395c5da4cc380705d56ab636c7a778668474629 | /plot1.R | f053af1f9c23e5b5699b7b1af2c8993ce73dee12 | [] | no_license | Zhen-hao/ExpDataAn | 61b85b50a83b1dadf897a6424e0d6177a915b5af | c27cb4909ad0963d8eca8de89c7898b884432c07 | refs/heads/master | 2021-01-23T04:09:54.237537 | 2015-04-25T13:18:55 | 2015-04-25T13:18:55 | 34,568,834 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 834 | r | plot1.R |
# Reading files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
##Reshaping data for plot1
library(reshape2)
meltData <- melt(NEI,id = "year", measure.vars="Emissions")
yearEmissions <- dcast(meltData, year ~ variable,sum)
#Coverting the units from ton to million tons
yearEmissions$Emissions <- yearEmissions$Emissions/1000000
png("plot1.png")
par(mar=c(5,5,4,2))
with(yearEmissions,
plot(year,Emissions,main="Yearly Total PM2.5 Emissions",type="p", pch = 17,col=year, xlab="year",ylab="Total PM2.5 Emissions of the year \n (million tons)")
)
with(yearEmissions,
lines(x=year , y=Emissions, type="l")
)
abline(h = yearEmissions$Emissions, lwd = 2, lty = 2,col=yearEmissions$year)
legend("topright", pch = 17, col = yearEmissions$year, legend = yearEmissions$year)
dev.off()
|
fe87817c809a1be8b8f96573cc0784666cfa8312 | 20d2719276549686b09793d0636919ce09f56312 | /data_cleaning/old/frequency_map.R | bd9ac712b13ab3b36ab7d79537682be7ac355c0e | [] | no_license | franciscothebrady/weather_forecasts | 288249844e66c5e6b955e936635f632e8fdfe525 | 9acb879461779e3a5f5dcc6ed9726ac7dd3cd21d | refs/heads/master | 2021-01-19T07:52:40.451718 | 2019-02-03T20:53:46 | 2019-02-03T20:53:46 | 87,580,631 | 1 | 2 | null | 2017-08-03T20:34:36 | 2017-04-07T19:34:57 | R | UTF-8 | R | false | false | 12,315 | r | frequency_map.R | # event frequencies
# combining 15 years of data on storm events to build a frequency measure for severe rain events in CONUS
# timeframe: 2000-2015
setwd("~/weather_forecasts/")
library(stringr)
library(dplyr)
library(ggmap)
library(ggplot2)
library(maps)
# install.packages("mapdata")
library(mapdata)
#install.packages("choroplethr")
#install.packages("choroplethrMaps")
library(choroplethr)
library(choroplethrMaps)
# read in 2000
zero <- as.data.frame(read.csv("data/StormEvents_details-ftp_v1.0_d2000_c20170717.csv.gz", stringsAsFactors = FALSE))
# filter out events outside CONUS, HI, and AK.
zero <- dplyr::filter(zero, STATE_FIPS < 57 & STATE_FIPS > 0)
# filter out episodes with missing damage reports
zero <- dplyr::filter(zero, !(DAMAGE_PROPERTY == "") & !(DAMAGE_CROPS == ""))
#-- filter for PRCP related events=="Heavy Rain"
zero_precip <- dplyr::filter(zero, EVENT_TYPE == "Heavy Rain")
rm(zero)
# read in 2001
one <- as.data.frame(read.csv("data/StormEvents_details-ftp_v1.0_d2001_c20170717.csv.gz", stringsAsFactors = FALSE))
# filter out events outside CONUS, HI, and AK.
one <- dplyr::filter(one, STATE_FIPS < 57 & STATE_FIPS > 0)
# filter out episodes with missing damage reports
one <- dplyr::filter(one, !(DAMAGE_PROPERTY == "") & !(DAMAGE_CROPS == ""))
#-- filter for PRCP related events=="Heavy Rain"
one_precip <- dplyr::filter(one, EVENT_TYPE == "Heavy Rain")
rm(one)
# read in 2002
two <- as.data.frame(read.csv("data/StormEvents_details-ftp_v1.0_d2002_c20170717.csv.gz", stringsAsFactors = FALSE))
# filter out events outside CONUS, HI, and AK.
two <- dplyr::filter(two, STATE_FIPS < 57 & STATE_FIPS > 0)
# filter out episodes with missing damage reports
two <- dplyr::filter(two, !(DAMAGE_PROPERTY == "") & !(DAMAGE_CROPS == ""))
#-- filter for PRCP related events=="Heavy Rain"
two_precip <- dplyr::filter(two, EVENT_TYPE == "Heavy Rain")
rm(two)
# read in 2003
three <- as.data.frame(read.csv("data/StormEvents_details-ftp_v1.0_d2003_c20170717.csv.gz", stringsAsFactors = FALSE))
# filter out events outside CONUS, HI, and AK.
three <- dplyr::filter(three, STATE_FIPS < 57 & STATE_FIPS > 0)
# filter out episodes with missing damage reports
three <- dplyr::filter(three, !(DAMAGE_PROPERTY == "") & !(DAMAGE_CROPS == ""))
#-- filter for PRCP related events=="Heavy Rain"
three_precip <- dplyr::filter(three, EVENT_TYPE == "Heavy Rain")
rm(three)
# read in 2004
four <- as.data.frame(read.csv("data/StormEvents_details-ftp_v1.0_d2004_c20170717.csv.gz", stringsAsFactors = FALSE))
# filter out events outside CONUS, HI, and AK.
four <- dplyr::filter(four, STATE_FIPS < 57 & STATE_FIPS > 0)
# filter out episodes with missing damage reports
four <- dplyr::filter(four, !(DAMAGE_PROPERTY == "") & !(DAMAGE_CROPS == ""))
#-- filter for PRCP related events=="Heavy Rain"
four_precip <- dplyr::filter(four, EVENT_TYPE == "Heavy Rain")
rm(four)
# read in 2005
five <- as.data.frame(read.csv("data/StormEvents_details-ftp_v1.0_d2005_c20170717.csv.gz", stringsAsFactors = FALSE))
# filter out events outside CONUS, HI, and AK.
five <- dplyr::filter(five, STATE_FIPS < 57 & STATE_FIPS > 0)
# filter out episodes with missing damage reports
five <- dplyr::filter(five, !(DAMAGE_PROPERTY == "") & !(DAMAGE_CROPS == ""))
#-- filter for PRCP related events=="Heavy Rain"
five_precip <- dplyr::filter(five, EVENT_TYPE == "Heavy Rain")
rm(five)
# read in 2006
six <- as.data.frame(read.csv("data/StormEvents_details-ftp_v1.0_d2006_c20170717.csv.gz", stringsAsFactors = FALSE))
# filter out events outside CONUS, HI, and AK.
six <- dplyr::filter(six, STATE_FIPS < 57 & STATE_FIPS > 0)
# filter out episodes with missing damage reports
six <- dplyr::filter(six, !(DAMAGE_PROPERTY == "") & !(DAMAGE_CROPS == ""))
#-- filter for PRCP related events=="Heavy Rain"
six_precip <- dplyr::filter(six, EVENT_TYPE == "Heavy Rain")
rm(six)
# read in 2007
seven <- as.data.frame(read.csv("data/StormEvents_details-ftp_v1.0_d2007_c20170717.csv.gz", stringsAsFactors = FALSE))
# filter out events outside CONUS, HI, and AK.
seven <- dplyr::filter(seven, STATE_FIPS < 57 & STATE_FIPS > 0)
# filter out episodes with missing damage reports
seven <- dplyr::filter(seven, !(DAMAGE_PROPERTY == "") & !(DAMAGE_CROPS == ""))
#-- filter for PRCP related events=="Heavy Rain"
seven_precip <- dplyr::filter(seven, EVENT_TYPE == "Heavy Rain")
rm(seven)
# read in 2008
eight <- as.data.frame(read.csv("data/StormEvents_details-ftp_v1.0_d2008_c20170718.csv.gz", stringsAsFactors = FALSE))
# filter out events outside CONUS, HI, and AK.
eight <- dplyr::filter(eight, STATE_FIPS < 57 & STATE_FIPS > 0)
# filter out episodes with missing damage reports
eight <- dplyr::filter(eight, !(DAMAGE_PROPERTY == "") & !(DAMAGE_CROPS == ""))
#-- filter for PRCP related events=="Heavy Rain"
eight_precip <- dplyr::filter(eight, EVENT_TYPE == "Heavy Rain")
rm(eight)
# read in 2009
nine <- as.data.frame(read.csv("data/StormEvents_details-ftp_v1.0_d2009_c20170816.csv.gz", stringsAsFactors = FALSE))
# filter out events outside CONUS, HI, and AK.
nine <- dplyr::filter(nine, STATE_FIPS < 57 & STATE_FIPS > 0)
# filter out episodes with missing damage reports
nine <- dplyr::filter(nine, !(DAMAGE_PROPERTY == "") & !(DAMAGE_CROPS == ""))
#-- filter for PRCP related events=="Heavy Rain"
nine_precip <- dplyr::filter(nine, EVENT_TYPE == "Heavy Rain")
rm(nine)
# read in 2010
ten <- as.data.frame(read.csv("data/StormEvents_details-ftp_v1.0_d2010_c20170726.csv.gz", stringsAsFactors = FALSE))
# filter out events outside CONUS, HI, and AK.
ten <- dplyr::filter(ten, STATE_FIPS < 57 & STATE_FIPS > 0)
# filter out episodes with missing damage reports
ten <- dplyr::filter(ten, !(DAMAGE_PROPERTY == "") & !(DAMAGE_CROPS == ""))
#-- filter for PRCP related events=="Heavy Rain"
ten_precip <- dplyr::filter(ten, EVENT_TYPE == "Heavy Rain")
rm(ten)
# read in 2011
eleven <- as.data.frame(read.csv("data/StormEvents_details-ftp_v1.0_d2011_c20170519.csv.gz", stringsAsFactors = FALSE))
# filter out events outside CONUS, HI, and AK.
eleven <- dplyr::filter(eleven, STATE_FIPS < 57 & STATE_FIPS > 0)
# filter out episodes with missing damage reports
eleven <- dplyr::filter(eleven, !(DAMAGE_PROPERTY == "") & !(DAMAGE_CROPS == ""))
#-- filter for PRCP related events=="Heavy Rain"
eleven_precip <- dplyr::filter(eleven, EVENT_TYPE == "Heavy Rain")
rm(eleven)
# read in 2012
twelve <- as.data.frame(read.csv("data/StormEvents_details-ftp_v1.0_d2012_c20170519.csv.gz", stringsAsFactors = FALSE))
# filter out events outside CONUS, HI, and AK.
twelve <- dplyr::filter(twelve, STATE_FIPS < 57 & STATE_FIPS > 0)
# filter out episodes with missing damage reports
twelve <- dplyr::filter(twelve, !(DAMAGE_PROPERTY == "") & !(DAMAGE_CROPS == ""))
#-- filter for PRCP related events=="Heavy Rain"
twelve_precip <- dplyr::filter(twelve, EVENT_TYPE == "Heavy Rain")
rm(twelve)
# read in 2013
thirteen <- as.data.frame(read.csv("data/StormEvents_details-ftp_v1.0_d2013_c20170519.csv.gz", stringsAsFactors = FALSE))
# filter out events outside CONUS, HI, and AK.
thirteen <- dplyr::filter(thirteen, STATE_FIPS < 57 & STATE_FIPS > 0)
# filter out episodes with missing damage reports
thirteen <- dplyr::filter(thirteen, !(DAMAGE_PROPERTY == "") & !(DAMAGE_CROPS == ""))
#-- filter for PRCP related events=="Heavy Rain"
thirteen_precip <- dplyr::filter(thirteen, EVENT_TYPE == "Heavy Rain")
rm(thirteen)
# read in 2014
fourteen <- as.data.frame(read.csv("data/StormEvents_details-ftp_v1.0_d2014_c20170718.csv.gz", stringsAsFactors = FALSE))
# filter out events outside CONUS, HI, and AK.
fourteen <- dplyr::filter(fourteen, STATE_FIPS < 57 & STATE_FIPS > 0)
# filter out episodes with missing damage reports
fourteen <- dplyr::filter(fourteen, !(DAMAGE_PROPERTY == "") & !(DAMAGE_CROPS == ""))
#-- filter for PRCP related events=="Heavy Rain"
fourteen_precip <- dplyr::filter(fourteen, EVENT_TYPE == "Heavy Rain")
rm(fourteen)
# read in 2015
fifteen <- as.data.frame(read.csv("data/StormEvents_details-ftp_v1.0_d2015_c20170718.csv.gz", stringsAsFactors = FALSE))
# filter out events outside CONUS, HI, and AK.
fifteen <- dplyr::filter(fifteen, STATE_FIPS < 57 & STATE_FIPS > 0)
# filter out episodes with missing damage reports
fifteen <- dplyr::filter(fifteen, !(DAMAGE_PROPERTY == "") & !(DAMAGE_CROPS == ""))
#-- filter for PRCP related events=="Heavy Rain"
fifteen_precip <- dplyr::filter(fifteen, EVENT_TYPE == "Heavy Rain")
rm(fifteen)
# combine into one big old df
freq_totals <- rbind(zero_precip, one_precip, two_precip, three_precip, four_precip, five_precip, six_precip, seven_precip, eight_precip,
nine_precip, ten_precip, eleven_precip, twelve_precip, thirteen_precip, fourteen_precip, fifteen_precip)
rm(zero_precip, one_precip, two_precip, three_precip, four_precip, five_precip, six_precip, seven_precip, eight_precip,
nine_precip, ten_precip, eleven_precip, twelve_precip, thirteen_precip, fourteen_precip, fifteen_precip)
# remove unnecessary cols
freq_totals <- select(freq_totals, "STATE","STATE_FIPS","YEAR","EVENT_TYPE","CZ_TYPE","CZ_FIPS",
"CZ_NAME","WFO","BEGIN_DATE_TIME","CZ_TIMEZONE","END_DATE_TIME","INJURIES_DIRECT",
"INJURIES_INDIRECT","DEATHS_DIRECT","DEATHS_INDIRECT","DAMAGE_PROPERTY","DAMAGE_CROPS",
"BEGIN_LAT","BEGIN_LON","END_LAT","END_LON")
# recode crop and property damage vars
damage_magnitude <- cbind(
strsplit(substr(freq_totals$DAMAGE_PROPERTY, nchar(freq_totals$DAMAGE_PROPERTY), nchar(freq_totals$DAMAGE_PROPERTY)), ""),
strsplit(substr(freq_totals$DAMAGE_CROPS, nchar(freq_totals$DAMAGE_CROPS), nchar(freq_totals$DAMAGE_CROPS)), ""))
damage_magnitude <- ifelse(damage_magnitude == "K", 3, ifelse(damage_magnitude == "M", 6, 9))
damage_numeric <- cbind(
as.numeric(strsplit(freq_totals$DAMAGE_PROPERTY, "[[:alpha:]]")),
as.numeric(strsplit(freq_totals$DAMAGE_CROPS, "[[:alpha:]]")))
damage_value <- rowSums(damage_numeric * 10^damage_magnitude, na.rm = TRUE)
freq_totals$DAMAGE_VALUE <- damage_value / 1e3
freq_totals$DAMAGE_VALUE.magnitude <- rep(3, length(freq_totals$DAMAGE_VALUE))
freq_totals$DAMAGE_VALUE.unit <- rep("USD", length(freq_totals$DAMAGE_VALUE))
rm(damage_magnitude, damage_numeric, damage_value)
freq_totals$DAMAGE_VALUE <- freq_totals$DAMAGE_VALUE*10^freq_totals$DAMAGE_VALUE.magnitude
freq_totals$DAMAGE_CROPS <- NULL
freq_totals$DAMAGE_PROPERTY <- NULL
freq_totals$DAMAGE_VALUE.magnitude <- NULL
# combine injuries/fatalities, could be interesting later
freq_totals$injuries.deaths <- freq_totals$INJURIES_DIRECT + freq_totals$INJURIES_INDIRECT +
freq_totals$DEATHS_DIRECT + freq_totals$DEATHS_INDIRECT
freq_totals$INJURIES_DIRECT <- NULL
freq_totals$INJURIES_INDIRECT <- NULL
freq_totals$DEATHS_DIRECT <- NULL
freq_totals$DEATHS_INDIRECT <- NULL
# group by NWS Region
wfo_reg <- read.csv(url("https://raw.githubusercontent.com/franciscothebrady/weather_forecasts/master/WFO-freq.csv"),
stringsAsFactors = FALSE)
# remove Alaska becuase it's useless!
wfo_reg <- filter(wfo_reg, region!="Alaska")
# match WFOs in freq totals
freq_totals <- merge(freq_totals, wfo_reg, by.x = c("WFO"), by.y = c("wfo"), all.x = TRUE)
rm(wfo_reg)
table(is.na(freq_totals$BEGIN_LAT))
# write to csv as tidy_precip_history.csv (need to remember to do this more often)
write.csv(freq_totals, "~/weather_forecasts/data/frequency_totals_00-15.csv")
# MAPS!
# group by state and year
# state_year_freq <- group_by(freq_totals, STATE, YEAR)
yearly <- group_by(freq_totals, YEAR, STATE)
yearly_state <- summarize(yearly, count = n())
yearly_state$region <- tolower(yearly_state$STATE)
year_ten <- filter(yearly_state, YEAR==2010)
# example using ggmap
# https://rpubs.com/neilfws/229230
# follow this! esp the one mapping by magnitude!
states <- map_data("state")
states <- left_join(states, year_ten, by = "region")
ggplot(data = states) + geom_polygon(data = states, aes(x=long, y=lat, group = group, fill = count), color = "white")
# map of events 2000-20125
usa <- make_bbox(lon = BEGIN_LON, lat = BEGIN_LAT, data = freq_totals, f = .1)
usa
usa_map <- get_map(location = usa, zoom = 3, maptype = "satellite", source = "google")
ggmap(usa_map) + geom_point(data = freq_totals, mapping = aes(x=BEGIN_LON, y=BEGIN_LAT),
color="blue", size = 2)
|
c9e86a0e77a5f5572b5d0deba9e7ccff834fed03 | 14c500c215c3c539503cefae4b3400cdd974bd82 | /analysis_polyE_data.R | f7f2540516fde18720c257730aa67adeb7ff375a | [] | no_license | bryjalab/2022_Kravec_ScienceAdvances | c709846839b90c4c969f2ecf5f7251fca54bcf86 | f1a2530afe1d78be3f4f98464a2266b9f51bc4c1 | refs/heads/main | 2023-04-11T02:48:21.634402 | 2022-10-20T08:32:53 | 2022-10-20T08:32:53 | 554,207,944 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 592 | r | analysis_polyE_data.R | # Script for statistical analysis of DVL3 subcellular localization
library(lme4)
library(lmerTest)
library(reshape)
library(multcomp)
D <- read.delim(file = "path/polyE data.txt") # adjust path according to particular file
D1 <- melt(D, id=c("exp", "treat"))
fit4 <- lmer(value ~ treat + (1|exp), data = D1[which(D1$variable=="puncta"),])
fit4
anova(fit4)
summary(fit4)
summary(glht(fit4)) # adjusted p-values
cc4 <- confint(fit4,parm="beta_")
ctab4 <- round((cbind(OR=fixef(fit4),cc4)),3)
ctab4 ### returns OR (95% CI)
summary(glht(fit4, mcp(treat = "Tukey"))) # post-hoc test |
4e7185390f3da2b331fc54259ea4c4c81664d8be | 3b9736dc267fd02a4dd8b8f754d0f7f547a88114 | /categories/script/smartwhatch.r | 590870fd3eadb2d5bba8d57a7f8cf4662cedbe5d | [] | no_license | kasrasammak/AprioriTransactionAnalysis | cd4a15316407a4629c10f214c2a865fc0281c07d | 9a07dd7845c4218b081e324c73533cdb60fe015b | refs/heads/master | 2021-03-03T02:51:20.519494 | 2020-03-09T02:23:57 | 2020-03-09T02:23:57 | 245,924,405 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 950 | r | smartwhatch.r | #create df, csv & transactions file for SMARTWHATCH
dfIdOrderAndSkuSmartWhatch <- smartwhatch[c('id_order', 'sku', 'name_en')]
write.csv(dfIdOrderAndSkuSmartWhatch, "/Users/owlthekasra/Documents/Code/RStudio/RecommenderSystem/categories/IdOrderAndSkuSmartWhatch.csv")
idOrderAndSkuSmartWhatch <- read.transactions("/Users/owlthekasra/Documents/Code/RStudio/RecommenderSystem/categories/IdOrderAndSkuSmartWhatch.csv",
format = 'single',
sep = ',',
header = TRUE,
cols= c('id_order', 'name_en'))
#plot frequency and calculate apriori
itemFrequencyPlot(idOrderAndSkuSmartWhatch, top = 20, support = 0.005, cex.names = .7)
rulesSmartWhatch <- apriori (idOrderAndSkuSmartWhatch, parameter = list(supp = 0.004, conf = .0005, minlen = 2))
inspect(rulesSmartWhatch) |
ae4d2d6ef3c52ad0153797436b04b7744c5fc5a8 | 1267295b4dc6938181811b0d20f785fe2c87ba3c | /R/pgnorm-internal.R | 85e791241d169240799949bda220113fbfadf1de | [] | no_license | cran/pgnorm | c4f72b2adb80e9edd2856a9c6a2df73f5bfb373a | 93263927583b9d94d6f6d9875e331c6cff069106 | refs/heads/master | 2021-01-16T01:01:38.850565 | 2015-11-24T15:08:34 | 2015-11-24T15:08:34 | 17,698,486 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 8,398 | r | pgnorm-internal.R | .Random.seed <-
c(403L, 532L, -1134443871L, -768325319L, 454138273L, 1616487675L,
-332138857L, -1085509620L, 355632913L, -1665533238L, 1018959096L,
-1630893905L, -1421531101L, 1544116033L, -530062943L, -242239448L,
-1005309530L, 1502899014L, 693810032L, -1097625268L, 360675189L,
-1654198732L, 553821648L, -1843509725L, 1661012989L, 1807279909L,
823401160L, 854030175L, -1238737359L, -1485648473L, 101090635L,
-1012625015L, -1002854676L, 1049898948L, -1044391010L, 2980896L,
45303667L, -1851833962L, -1787992680L, 379230517L, -857602905L,
-999396182L, -407705349L, -2038995730L, -336698570L, -1226831367L,
2084582431L, -1675423721L, 1753012801L, 635709233L, -63858633L,
-1434632776L, 1391608831L, -441973006L, -1672996476L, -1261037356L,
596592806L, -512924504L, -1714646291L, -1131561858L, 1211909929L,
2059660476L, -1601628154L, 1110058123L, -1401070501L, -1992596790L,
-1137484100L, 181815828L, 1449801253L, 169702059L, -465573607L,
-1212959361L, -2135040948L, 1197137830L, -1754624657L, 1399748111L,
999458254L, 1410811578L, -1626435175L, -443528942L, -345802023L,
-515344352L, 1395834803L, 1247839878L, 1476105081L, -1508909032L,
-1386804011L, -84397058L, 202800497L, 1686187083L, 377459331L,
1387004049L, 1929827039L, 725141969L, -1502518454L, 730487794L,
-630221634L, -232467968L, -2109846809L, -1033104174L, 1406101668L,
-587254014L, 1321141971L, 1406630338L, -1345314042L, 1138581488L,
-829551207L, -1817363543L, 563644244L, 1910012904L, 181679254L,
1919281989L, 760905594L, 401891359L, -2056552787L, -856013682L,
1521347005L, -20665701L, 1662300234L, -630524994L, -1534755579L,
-891478667L, 2122562881L, 1552174196L, -79850849L, -1087136569L,
-1274976899L, 1455482985L, -1234381559L, 1601982088L, 1592831135L,
-1269234035L, -996245439L, -561721426L, -669242018L, -44017011L,
990371103L, -372982568L, -609921524L, -1209114511L, 443573373L,
-1865663379L, 2001273927L, -237140798L, 1637440360L, -984953903L,
-1589208344L, 668587400L, -2024911337L, 2108279680L, 645887037L,
1213596268L, 1143629360L, -1770688282L, -1960861361L, -369302690L,
-1639912219L, -1331180420L, 2083053272L, 1973883674L, -824291885L,
-889292182L, 501338209L, 1403746867L, -1799288200L, -1988854171L,
-1093134820L, 1113309458L, -913422042L, -1756685320L, 413162609L,
-1582615451L, 835440544L, -451354467L, 1228013002L, -419279647L,
1591754819L, 1433723765L, 487212082L, -1063842429L, -1955717512L,
-1691671189L, 1353879429L, 2062471928L, 1406337303L, -688898698L,
-924919834L, 729943647L, -1290454740L, 2050900500L, 1342047457L,
476478041L, 118815067L, 2012984949L, -2086886368L, -1684461210L,
1653322651L, 1810557379L, -1349428989L, 64217647L, 1280745291L,
1506820979L, -1809321448L, 1067701365L, -517254826L, 1585137734L,
-603961086L, 190626014L, 592514794L, -320406586L, -1697827121L,
-778986955L, 571387249L, 678640968L, -2012963846L, -1567989045L,
1054897054L, -1714610167L, -304589293L, 2031844356L, -797229220L,
695568607L, 1436811321L, -775295755L, 154889892L, -1006210771L,
910456031L, 844255205L, 1820695882L, -550143685L, 954714372L,
2011715134L, 1444688013L, 970538327L, 585385898L, -638221016L,
855486808L, 1740469112L, 168416493L, -1318551412L, 1018883668L,
222161863L, -305922938L, -1334716711L, -1842859926L, -1647620122L,
1429830647L, -925785878L, -289915062L, 1198937695L, 247625983L,
-1001236449L, 130168037L, 415099389L, 2029475105L, 924576034L,
492886922L, 204666743L, 387903417L, 1951965281L, 1582305868L,
-337186500L, 747774584L, 27282941L, -1802195610L, 278117594L,
-1295511223L, -1829625857L, 1544510625L, -1280732820L, -2004222745L,
-36030831L, 503453129L, -2097390039L, 150460472L, 1899742668L,
60724309L, 68170720L, 466841568L, -102501569L, 851629069L, -1447283065L,
679485540L, -1045432743L, -368443112L, 1256609600L, -1345323350L,
-117207481L, -553157075L, -430374271L, -641854011L, 1789694586L,
-614317710L, -1912063366L, -1398048673L, 584261027L, 1246982061L,
1980013692L, 179377157L, -1447808561L, 202666948L, -248905063L,
-1502680480L, 1088265651L, -155443047L, 1728423859L, -1744026972L,
-833477375L, 663109878L, 1514997962L, 872084941L, 2070911276L,
-1362449334L, 1043009894L, -87458053L, 2088704370L, -293985501L,
1985110927L, 614537709L, 2063105271L, -1567424643L, 1235486991L,
-1139293244L, -1020656992L, -2038070127L, 650977482L, 912247381L,
1831003259L, -383765838L, 2102777255L, -1874477070L, -259069629L,
1326572249L, 345086970L, -1080424245L, -640928861L, -1176542153L,
1193232132L, 1242211147L, 180724523L, 219824852L, -965106753L,
561348820L, 1311132934L, 1689245020L, 738864004L, 2058220347L,
1540554244L, 1220853798L, 1061028951L, -1854879556L, -2099287909L,
-157321653L, -505945817L, 179995049L, 1467593410L, -362548521L,
-743281618L, -1311573001L, 1324957949L, -1177314102L, -1149447610L,
808294299L, -1004920112L, -259690765L, 628477206L, 592430313L,
925980187L, 8918540L, -909400317L, -976975369L, 1263473457L,
-796271094L, -114710090L, -1567085400L, -2055236513L, -870773431L,
-1817436424L, 1483770274L, 422247265L, 899993676L, -549445487L,
-1582139467L, -690490066L, -1377422837L, -465425587L, -1038606241L,
-43238954L, -1654718380L, -870947221L, 1424667641L, -821399528L,
1636762819L, 1753580329L, 1512216433L, -334836567L, 2037713279L,
-1289128891L, 1982627858L, 1123325626L, 1138172168L, 428394004L,
968412372L, 431066480L, -169639636L, 32457623L, -1379466711L,
1392462191L, 197068119L, -1358109989L, -1830921211L, -670086868L,
-1683847103L, -1093784019L, 1769948950L, -111605749L, 1727139503L,
-1180411411L, -1484187100L, 2062949964L, -1972018252L, -75460568L,
1100930395L, -1027441435L, -1836334561L, 1022216187L, 1045343557L,
-402450651L, -1127173087L, 348678238L, 1376974182L, 652416284L,
1053590602L, -1816983299L, 447644950L, -2042117426L, -1354394749L,
-1113253599L, -803404283L, -38947816L, 799599501L, -462375944L,
932293706L, -477543242L, -830401314L, 1380067743L, -1434821351L,
-929274668L, -1864628120L, 1648770380L, -1231774357L, 567624155L,
-1282900267L, 1431793241L, 1567347042L, -1221329025L, 245269579L,
-41059926L, 439069780L, 772346313L, -862642924L, 1634776435L,
-200257318L, 581280905L, -2096157399L, 440945805L, -14963736L,
241496265L, 1097051779L, -534120160L, 1275834375L, -677862898L,
-1099549622L, 1675620261L, 445931753L, 899368892L, 1359408445L,
1945925942L, 868580802L, -1017479084L, -1235331475L, 184562740L,
-930257733L, -54645116L, 1235688336L, -2098648700L, 544727585L,
-2045595283L, 184167204L, -336357953L, -708541495L, -385560560L,
-887660646L, -712335140L, -694407767L, 607098455L, 275206631L,
-526640781L, -1118235860L, 1383358724L, 1873183663L, 130300914L,
-10745572L, 1472651115L, 1996498387L, 541866246L, -1168129199L,
841973735L, -889679948L, -746650937L, 1890032986L, -1520519376L,
-17813082L, 44974992L, 1420648837L, -124702999L, 889943590L,
-2017836375L, 155393523L, 521664441L, 215427951L, -440285449L,
-1244902268L, 562426847L, -1161225551L, -1664648160L, 864890789L,
-372740438L, -823708804L, 1922196387L, -704742196L, -470009168L,
-1317036929L, -1175403094L, -971848973L, -1876446650L, -1580456663L,
607547763L, 784511096L, 717098213L, -1359642917L, 529393607L,
-1265584657L, 287300575L, 1464628314L, -800380247L, -1242195968L,
962632857L, 617503811L, -1282575664L, 1033642731L, 1302603277L,
-138123900L, -400780467L, 546077614L, 86592813L, 168249744L,
-516404350L, -1982909378L, -1441680494L, -922077006L, -850504234L,
-1535217657L, 1286111937L, -728349207L, 259922116L, -1859479605L,
-1496297963L, -326702933L, 736519511L, -881621655L, -961146169L,
-2016583525L, -1343495184L, 1029719030L, -803270644L, -492876347L,
-312958010L, 1301923848L, -385910333L, 226666379L, 1542435802L,
2009920276L, 1167032394L, -1522819857L, -937385450L, -632488921L,
1965687836L, 1213182560L, -694675204L, -548891324L, -788296678L,
-1603478139L, -1911461182L, -1204535226L, -1376609983L, -98802652L,
-2109878754L, 99183821L, -237004687L, 1256827551L, 1693926679L,
1550181257L, 1841086890L, -1207239384L, -611746245L, 1760643327L,
-1558658524L, -865648244L, -833561235L, 958091182L, -1456383229L,
-1378858056L, -579912799L, 1561890704L, 2111311621L, 444984031L,
-1863021030L, -2063535073L, -1236245371L, -477126567L, 1438137548L,
1969667361L, 755356136L, -1268951546L, -1324035683L)
|
f9909a86fb0e83c5c8dc6e93fad864d9df702b5a | 22dc0e0e631061a2cdb471773ea3c35a418ec874 | /R/score_sapphire.R | f565a0c4ff8f258393e50b7e36ec91767db7ad03 | [] | no_license | clangi/CampaRi | b042e2556b379ff78292af67d3eab1994e2b0fee | 48daa261fd39fd12a7eee17ad42773b8ffa24ba5 | refs/heads/master | 2021-01-20T11:38:31.987264 | 2018-07-19T17:05:11 | 2018-07-19T17:05:11 | 101,677,109 | 0 | 1 | null | 2017-08-28T18:57:51 | 2017-08-28T18:57:51 | null | UTF-8 | R | false | false | 31,596 | r | score_sapphire.R | #' @title Scoring the SAPPHIRE-plot basin division
#' @description
#' \code{score_sapphire} uses the information provided by the SAPPHIRE basin recognition in order to score the identified free energy basins
#' against the original partition (annotation).
#'
#' @param the_sap Name of the PROGIDX_<...>.dat or REPIX_<...>.dat file or Data frame with three columns containing, in order, the (sorted) progress index,
#' the relative time indices and the cut function.
#' @param ann Annotation must be a single line with the true cluster labels.
#' @param scoring_method Precise scoring method (final comparison between cluster labels)
#' \itemize{
#' \item "\code{nmi}"
#' \item "\code{adjusted_rand_index}"
#' \item "\code{jaccard_index}"
#' \item "\code{purity}"
#' }
#' @param basin_obj Output of \code{\link{basins_recognition}}. If you used \code{\link{basin_optimization}} to optimize the clusters please
#' insert only the resulting bas object
#' @param manual_barriers If an integer vector is inserted, it is used as the barrier locations.
#' @param plot_pred_true_resume Defaults tp \code{FALSE}. If set to true it plots the predicted and true labels one along the other.
#' @param return_predicted If true return the predicted vector.
#' @param multi_cluster_policy This string decides how the clusters must be handled in the case of more clusters found in comparison to the annotation
#' inserted. The default is \code{'popup'}. The available values are:
#' \itemize{
#' \item "\code{popup}" Creates new clusters (ordered by shannon weight).
#' \item "\code{keep}" Keeps the duplications and so on. This is a bias feature.
#' \item "\code{merge_previous}" Takes in account the last label inserted in the shannon weigthed table.
#' }
#'
#' @param silent A logical value indicating whether the function has to remain silent or not. Default value is \code{FALSE}.
#' @param return_plot returns plots
#' @param ... \code{max_number_of_elements} for plotting can be supplied. It defaults to 20k.
#'
#' @return A list containing
#' \itemize{
#' \item "\code{score.out}" Resulting score between 0 and 1 (good).
#' \item "\code{label_freq_list}" Total representation of the clusters along with the single shannon entropies.
#' \item "\code{main_desc}" Description of the main clusters found.
#' \item "\code{miss_perc}" Percentage of not correctly defined divisions.
#' }
#'
#'
#' @examples
#' adjl <- mst_from_trj(trj = matrix(rnorm(1000), nrow = 100, ncol = 10))
#' ret<-gen_progindex(adjl = adjl)
#' gen_annotation(ret_data = ret, local_cut_width = 10)
#' \dontrun{
#' score_sapphire(the_sap = "PROGIDX_000000000001.dat", ann = rnorm(100))
#' }
#'
#' @details For details regarding the SAPPHIRE plot, please refer to the relative publications \url{http://www.nature.com/articles/srep06264}.
#' Main documentation of the original campari software \url{http://campari.sourceforge.net/documentation.html}.
#'
#' @importFrom ClusterR external_validation
#' @importFrom data.table fread
#' @import ggplot2
#' @export score_sapphire
score_sapphire <- function(the_sap, ann, manual_barriers = NULL, basin_obj = NULL, # fundamental inputs
scoring_method = 'adjusted_rand_index', multi_cluster_policy = 'popup', # scoring and merging details
plot_pred_true_resume = FALSE, silent = FALSE, return_plot = FALSE, # it refers to this function
return_predicted = FALSE, # return the prediction and true vector
...){ # to add
# ----------------------------------------------------------------------------------------------- general input checking
input.args <- list(...)
avail.extra.arg <- c('dbg_score_sapphire', 'max_number_of_elements')
avail.policies <- c('popup', 'keep', 'merge_previous')
if(!is.null(names(input.args)) && any(!(names(input.args) %in% avail.extra.arg))){
if(!silent) warning('There is a probable mispelling in one of the inserted variables. Please check the available extra input arguments.')
if(!silent) cat('!!!!!!!!! We found the following variables without a father (not between our extra input arguments) !!!!!!!!!!\n')
if(!silent) cat(names(input.args)[!(names(input.args) %in% avail.extra.arg)], '\n')
}
if(!('dbg_score_sapphire' %in% names(input.args))) dbg_score_sapphire <- FALSE else dbg_score_sapphire <- input.args[['dbg_score_sapphire']]
if(!('max_number_of_elements' %in% names(input.args))) max_number_of_elements <- NULL else max_number_of_elements <- input.args[['max_number_of_elements']]
if(!is.logical(dbg_score_sapphire)) stop('dbg_score_sapphire must be a logical')
if(is.null(max_number_of_elements)) max_number_of_elements <- 20000
if(!.isSingleInteger(max_number_of_elements)) stop('max_number_of_elements must be a single integer')
# methods input check
scoring_method.opt <- c("adjusted_rand_index", "jaccard_index", "purity", "nmi")
if(!(scoring_method[1] %in% scoring_method.opt)) stop("Scoring method option not valid")
###
have_the_bas <- FALSE
manual_mode <- FALSE
# Fundamental inputs: the_sap and optimizations
# - basin_obj = bas: uses bas object from basins_recognition() or basin_optimization()
# - manual_barriers = c(1,2,3,4): insert manually the barriers [it creates a fake bas obj]
if(!is.character(the_sap) && !is.data.frame(the_sap)) stop("the_sap must be a string or a data frame")
if(is.character(the_sap) && (!grepl("PROGIDX", the_sap) && !grepl("REPIX", the_sap))) stop("Please provide a the_sap name starting with 'PROGIDX' or 'REPIX'" )
if(!is.null(basin_obj) && !is.logical(basin_obj)){
if(is.null(names(basin_obj)) || names(basin_obj)[1] != 'tab.st')
stop('Use the basin_optimization (optimal_bas) or the basins_recognition output for basin_optimization var.
It must be of the shape tab.st nbins seq.st etc.. (or put it TRUE/FALSE).')
if(!is.null(manual_barriers)) stop('Use either manual mode or bas mode')
have_the_bas <- TRUE
}
if(!is.null(manual_barriers)){
if(!all(sapply(manual_barriers, .isSingleInteger))) stop('All the values in manual_barriers must be integers')
if(any(manual_barriers < 1)) stop('the manual barriers should be above 0')
manual_mode <- TRUE
}
# - basin_optimization = TRUE: runs the optimization of the basins
# if(!is.null(basin_optimization) && is.logical(basin_optimization)) {
# if(!silent) cat('You are using basin_optimization as a logical. We see it. \n')
# do_optimization <- TRUE
# if(!is.null(manual_barriers) && basin_optimization) stop('use manual mode or basin optimization mode')
# }
#
# Other input checks
if(!is.numeric(ann) && (!is.null(dim(ann)))) stop('Please provide an integer vector for ann')
if(!is.logical(plot_pred_true_resume)) stop('plot_pred_true_resume must be a logical')
if(!is.logical(return_plot)) stop('return_plot must be a logical')
if(!is.logical(silent)) stop('silent must be a logical')
if(!is.logical(return_predicted)) stop("return_predicted must be a logical value")
# check over the merging or popping policy
if(!.isSingleChar(multi_cluster_policy)) stop('multi_cluster_policy must be a logical')
if(!(multi_cluster_policy %in% avail.policies)) stop(paste0('multi_cluster_policy must be between the following: ', paste(avail.policies, collapse = " ")))
# sapphire table loading
if(!is.null(the_sap) && !is.data.frame(the_sap)){
st <- as.data.frame(data.table::fread(the_sap, data.table = F))
}else{
st <- as.data.frame(the_sap)
}
# - basin_optimization = TRUE: runs the optimization of the basins
# if(do_optimization){
# if(!silent) cat('It is advisable to use the basin_optimization function EXTERNALLY to this one and feed the output to basin_optimization.\n')
# optim_bas <- CampaRi::basin_optimization(the_sap = the_sap, silent = silent, ...)
# bas <- optim_bas$bas
# n_fin_cl <- nrow(bas$tab.st)
# Main switcher for the final output
# - basin_obj = bas: uses bas object from basins_recognition() or basin_optimization()
# - manual_barriers = c(1,2,3,4): insert manually the barriers [it creates a fake bas obj]
if(have_the_bas){
if(!silent) cat('You inserted the bas object directly. No specific check of your wrong doing is applied. Therefore USE THE RIGHT ONE.\n')
bas <- basin_obj
n_fin_cl <- nrow(bas$tab.st)
pifilename <- bas$filename
}else if(manual_mode){
if(!silent) cat('You inserted manual barriers for testing.\n')
n_fin_cl <- .lt(manual_barriers) + 1
if(is.null(the_sap)) stop('with manual_barriers it is needed to specify the SAPPHIRE table in the_sap')
}else{
stop('We did not understood what to do.')
}
# ---------------------------------------------------------------------------------------------- Annotation analysis
# Using the prog. idx for reordering and lpi
if(!silent) cat('Having inserted the_sap we reorder the ann using it.\n')
if(.lt(ann) != nrow(st)) stop('Annotation and progress index must have same length. ')
piann_true_v <- ann[c(st[,3])]
lpiann <- .lt(piann_true_v)
uni_ann <- unique(piann_true_v)
if(anyNA(piann_true_v)) stop('We can not handle NAs in the annotation. Check it!')
# min of ann correction
if(min(uni_ann) < 1){
if(!silent) cat('Found negative or 0 values in the annotation. Correcting by sum of the abs of the min +1 per each annotation value. \n')
piann_true_v <- piann_true_v + abs(min(uni_ann)) + 1
uni_ann <- unique(piann_true_v)
}else if(min(uni_ann) > 1){
if(!silent) cat('Found minimum values > 1 in the annotation. Correcting it automatically. \n')
piann_true_v <- piann_true_v - min(uni_ann) + 1
uni_ann <- unique(piann_true_v)
}
# check for gaps in the annotation
if(any(seq(1:max(uni_ann)) != sort(uni_ann))) stop('Please provide an annotation withouth gaps.')
# Checks for the number of labels VS number of clusters
n_labels <- .lt(uni_ann)
do_cl_spawn <- FALSE
if(n_labels < n_fin_cl){
if(!silent) cat('ATTENTION: found', n_fin_cl, 'clusters using basin recognition while the inserted annotation has only', n_labels, 'number of labels.',
'\nIt would be the case to reduce the number of barriers. To do so, please consider reducing the number of bins (nbinsxy).\n')
if(!silent) cat('Spawning procedure (if merging is not TRUE) will be applied. This will create new cluster labels for the less populated or more hetereogenic clusters.\n')
do_cl_spawn <- TRUE
} else if(n_labels > n_fin_cl){
if(!silent) cat('ATTENTION: found', n_fin_cl, 'clusters while in ann we have', n_labels, 'number of labels.',
'\nThis is not problematic but it will lead to a possible overestimation of the error. Please consider more bins (nbinsxy).\n')
}
# creating a fake bas to fit the following analysis
if(manual_mode){
n_cl.b <- 1:n_fin_cl
if(any(manual_barriers >= lpiann)) stop('one manual_barriers or more are higher than the number of snapshots!.')
start.b <- c(1, sort(manual_barriers))
end.b <- c(sort(manual_barriers), lpiann)
lt.b <- diff(c(1, sort(manual_barriers), lpiann))
lt.b[1] <- lt.b[1] + 1
if(sum(lt.b) != lpiann) stop('The inserted barriers dont sum up to the length of the annotation.')
bastbl <- cbind('n_cl' = n_cl.b, 'start' = start.b, 'end' = end.b, '.lt' = lt.b)
bas <- list('tab.st' = bastbl)
}
# ---------------------------------------------------------------------------------------------- Creation of the Entropy levels
# we choose the representative label
label_freq_list <- list() # each element of this list is a cluster
for(jk in 1:n_fin_cl){
if(jk != 1) tarts <- bas$tab.st[jk,2] + 1
else tarts <- bas$tab.st[jk,2]
maj_selected <- sort(table(piann_true_v[tarts:bas$tab.st[jk,3]]), decreasing=TRUE) # count and sort for each cluster found.
maj_sel_filtered <- maj_selected[1:n_labels] # select only first n_labels!
label_freq_list[[jk]] <- (maj_sel_filtered*1.0)/bas$tab.st[jk,4] # calculating the density of major label
label_freq_list[[jk]] <- rbind('d' = label_freq_list[[jk]], 'n' = as.integer(maj_sel_filtered))
label_freq_list[[jk]][is.na(label_freq_list[[jk]])] <- 0
# if a group does not contain some of the labels a <NA> will appear
if(anyNA(colnames(label_freq_list[[jk]]))){
# no scream will be done. This part should be merged with the other one. Lets say that there is a better entropy or something else
clnms <- colnames(label_freq_list[[jk]])
clnms[is.na(clnms)] <- setdiff(c(1:n_labels), c(colnames(label_freq_list[[jk]])))
colnames(label_freq_list[[jk]]) <- clnms
}
label_freq_list[[jk]] <- rbind('lab' = as.integer(colnames(label_freq_list[[jk]])), 'clu' = rep(jk, n_labels), label_freq_list[[jk]])
}
# label_freq_list
# calculating the entropy
for(jk in 1:.lt(label_freq_list)){
it <- c()
for(kj in 1:ncol(label_freq_list[[jk]])) {
ulm <- label_freq_list[[jk]]['d', kj]
if(ulm != 0) kholn <- - ulm * log(ulm) else kholn <- 0
it <- c(it, kholn)
}
label_freq_list[[jk]] <- rbind(label_freq_list[[jk]], 'sh_en' = it)
}
# label_freq_list
if(F) {
ltest <- 500
n_tcl <- 4
tvec <- sample(1:n_tcl, size = ltest, replace = T)
tvec2 <- rep(1,10)
entr <- .myShEn(tvec); entr
entr <- .myShEn(tvec2); entr
.myShEn <- function(tvec){
x2 <- sapply(1:max(tvec), FUN = function(x) sum(tvec == x))
x2 <- x2 / .lt(tvec)
return(-sum(x2*log(x2)))
}
}
# ---------------------------------------------------------------------------------------------- Creation of the Entropy levels
# collecting the various elected labels - selection policy: unique labels!
# looking for collisions and candidates
top_label <- sapply(label_freq_list, function(x) x['lab', 1])
top_lab_n <- sapply(label_freq_list, function(x) x['n', 1])
top_lab_sh <- sapply(label_freq_list, function(x) x['sh_en', 1])
n_labels_in_cl <- sapply(label_freq_list, function(x) sum(x['n',] != 0))
tot_dim <- sapply(label_freq_list, function(x) sum(x['n',]))
tot_shen <- sapply(label_freq_list, function(x) sum(x['sh_en',]))
n_cl.b <- 1:n_fin_cl
main_desc <- data.frame('pos' = n_cl.b,
'top_lab' = top_label, 'tl_sh' = top_lab_sh, 'tl_n' = top_lab_n,
'freq_pos' = rep(1, n_fin_cl),
'res_lab' = rep(NA, n_fin_cl), #'res_sh' = top_lab_sh, 'res_n' = top_lab_n,
'n_lab_in_cl' = n_labels_in_cl,
'tl_shen_o_tot_n' = (top_lab_sh + 1) / tot_dim,
'tl_shen_o_tl_n' = (top_lab_sh + 1) / top_lab_n,
'tot_d' = tot_dim, 'tot_shen' = tot_shen,
'shen_frac_d' = (tot_shen + 1) / tot_dim, 'd_frac_shen' = tot_dim / (tot_shen + 1))
# ordering policy - weighted SH EN
# ordering_principle <- 'tl_shen_o_tot_n'
# ordering_principle <- 'ori_ord'
ordering_principle <- 'tl_shen_o_tl_n'
# ordering_principle <- 'shen_frac_d'
main_desc <- main_desc[order(main_desc[, ordering_principle]),]
main_desc <- cbind(main_desc, 'ori_ord' = n_cl.b)
# main_desc <- cbind(main_desc, 'problematic' = rep(FALSE, n_fin_cl))
# main_desc[main_desc$res_label %in% c(6, 19, 22, 23, 25, 3), 'problematic'] <- TRUE
# loop for choosing the label - using the number of clusters defined
ncl.i <- 0
extreme_search <- F
while(ncl.i < n_fin_cl){
if(ncl.i == 0) n_cl_popped <- n_labels
ncl.i <- ncl.i + 1
# select first candidate
candida <- main_desc[ncl.i, 'top_lab']
cl_pos <- main_desc$pos[ncl.i]
h_diply <- main_desc$freq_pos[ncl.i]
# repeat untill res_label is set
while (TRUE) {
h_diply <- h_diply + 1 # search level (it starts from two because first is in the init)
# candidate not yet picked
if(!(candida %in% main_desc$res_lab)){
main_desc$res_lab[ncl.i] <- candida
main_desc$freq_pos[ncl.i] <- h_diply - 1
break
# candidate already present
} else {
# redefining a candidate if the selection was not optimal of the first label
if(h_diply <= main_desc$n_lab_in_cl[ncl.i]){
candida <- label_freq_list[[cl_pos]]['lab', h_diply]
if(extreme_search){
tls <- label_freq_list[[cl_pos]]['sh_en', h_diply]
tln <- label_freq_list[[cl_pos]]['n', h_diply]
main_desc[ncl.i, 'top_lab'] <- candida
main_desc[ncl.i, 'tl_sh'] <- tls
main_desc[ncl.i, 'tl_n'] <- tln
main_desc[ncl.i, 'tl_shen_o_tot_n'] <- (tls + 1) / main_desc[ncl.i, 'tot_d']
main_desc[ncl.i, 'tl_shen_o_tl_n'] <- (tls + 1) / tln
main_desc$freq_pos[ncl.i] <- h_diply
main_desc <- main_desc[order(main_desc[, 'tl_shen_o_tot_n']),]
main_desc$res_lab <- rep(NA, n_fin_cl)
ncl.i <- 0
break
}
# we finished the available labels and we have to define new clusters!
}else{
n_cl_popped <- n_cl_popped + 1 # it is valid also for the merge (just for the check)
# define new clusters
if(multi_cluster_policy == 'popup'){
main_desc$res_lab[ncl.i] <- n_cl_popped
main_desc$freq_pos[ncl.i] <- n_cl_popped
# cat(n_cl_popped, '\n')
# keep the most freq - bias
}else if(multi_cluster_policy == 'keep'){
main_desc$res_lab[ncl.i] <- main_desc[ncl.i, 'top_lab'] # candida is different here
# main_desc$freq_pos[ncl.i] <- 1
# merge previous on the ordered main_desc
}else if(multi_cluster_policy == 'merge_previous'){
main_desc$res_lab[ncl.i] <- main_desc[ncl.i-1, 'top_lab']
main_desc$freq_pos[ncl.i] <- main_desc$freq_pos[ncl.i-1]
}
break # we must exit somehow
}
}
}
}
# stopifnot(n_cl_popped == n_fin_cl) # crashing if n_labels > n_fin_cl
main_desc <- main_desc[order(main_desc$pos),]
res_bound <- bas$tab.st[,4]
res_label <- main_desc$res_lab
# DEPRECATED -------------------------------------------------------------------------------------------------------
# ftab_for_coll <- data.frame('labs' = 1:n_labels, 'freq' = sapply(1:n_labels, function(x) sum(x == possible_coll)))
#
# not_to_consider <- ftab_for_coll$labs[which(ftab_for_coll$freq == 1)]
# ftab_for_coll <- ftab_for_coll[ftab_for_coll$labs[-not_to_consider],]
# if(F){
# if(multi_cluster_policy == 'select_non_conflict' || multi_cluster_policy == 'select_non_conflict_spawn'){
#
# # init - broken
# lab <- list()
# slct <- array(1, dim = .lt(label_freq_list))
#
# for(jk in 1:.lt(label_freq_list)){
# lab[[jk]] <- as.integer(colnames(label_freq_list[[jk]])[slct[jk]])
# if(jk != 1){
# while(lab[[jk]] %in% c(lab[1:(jk-1)])){
# slct[jk] <- slct[jk] + 1
# # label_freq_list[r]; unlist(lab[r]); slct[r]
# if(slct[jk] > n_labels || label_freq_list[[jk]]['n', slct[jk]] == 0){
# if(!silent) cat('Unfortunately we found that all the labels in cluster', jk,
# 'are colliding with others (we did not count empty buckets). Kept the duplication.\n')
# if(multi_cluster_policy == 'select_non_conflict') slct[jk] <- 1
# else if(multi_cluster_policy == 'select_non_conflict_spawn') slct[jk] <- 'pop'
# print_new_lab <- FALSE
# break
# }else{
# print_new_lab <- TRUE
# lab[[jk]] <- as.integer(colnames(label_freq_list[[jk]])[slct[jk]])
# }
# if(!silent && print_new_lab) cat('Label', lab[[jk]], 'has been found already in another cluster. We will automatically',
# 'select the second most present value in this cluster.\n')
# }
# }
# }
# } else if(multi_cluster_policy == 'merge_consecutive'){
# if(!silent) cat('Merging policy applied. For the moment only consecutive identically labeled clusters are merged. \n')
# slct <- array(1, dim = .lt(label_freq_list))
# }
#
# # main constructor loop for the selected (slct) labels
# lab <- list()
# size <- list()
# sh_en <- list()
# for(jk in 1:.lt(label_freq_list)){
# lab[[jk]] <- as.integer(colnames(label_freq_list[[jk]])[slct[jk]])
# size[[jk]] <- as.integer(label_freq_list[[jk]]['n', slct[jk]])
# sh_en[[jk]] <- label_freq_list[[jk]]['sh_en', slct[jk]]
# }
# # fin def
# res_bound <- bas$tab.st[,4]
# res_label <- unlist(lab)
# }
# ---------------------------------------------------------------------------------------------- Creation of Predicted vector
# creating the predicted vector
predicted_div <- unlist(sapply(1:.lt(res_label), function(x) rep(res_label[x], res_bound[x])))
# pred_test <- .vec_from_barriers(bar.vec = res_bound, label.vec = res_label)
# piann_true_v <- ann[st[,3]] # defined before
missass <- as.numeric(predicted_div != piann_true_v)
miss_perc <- round(sum(missass)*100/lpiann, 2)
# printing number of missass
if(!silent) cat('We found roughly', miss_perc, '% missassignment.\n')
# plotting
if(plot_pred_true_resume || return_plot){
if(lpiann > max_number_of_elements) {
s_pi <- round(seq(1, lpiann, length.out = max_number_of_elements))
fac <- round(lpiann/max_number_of_elements)
if(!silent) cat('Reducing plotting size by a factor of', fac, '\n')
} else {
s_pi <- 1:lpiann
}
plot_df <- data.frame('pi' = s_pi, 'predicted' = as.factor(predicted_div[s_pi]), 'true' = as.factor(piann_true_v[s_pi]),
'misass' = as.factor(missass[s_pi]))
if(dbg_score_sapphire) browser()
gg <- ggplot(data = plot_df) +
geom_point(aes_string(y = 'true', x = 'pi', col = shQuote("grey")), size = 8, shape = 108) +
geom_point(aes_string(y = 'predicted', x = 'pi', col = shQuote("black")), size = 2, shape = 108) +
theme_minimal() + xlab('Progress Index') + ylab('State') + scale_y_discrete(limits = sort(unique(res_label)))
# scale_color_manual(name = "", labels = c("Predicted", "True"), values = c("black", "lightblue")) +
# guides(color = guide_legend(override.aes = list(size=5))) +
# theme(panel.grid = element_blank())
for(gg.i in unique(res_label)) gg <- gg + geom_line(aes_string(y = gg.i, x = 'pi'), size = 0.1, alpha = 0.5)
gg <- gg + geom_segment(aes_string(y = 0, yend = 0.5, x = 'pi', xend = 'pi', col = 'misass')) +
scale_color_manual(name = "", labels = c("Correct", "Miss", "Predicted", "True"), values = c('green4', 'red3', "black", "grey")) +
guides(color = guide_legend(override.aes = list(size=7))) +
theme(panel.grid = element_blank())
gg <- gg + annotate('text', x = lpiann/7.5, y = 0.25, label = paste0('Misses: ', miss_perc, '%'), col = 'white')
gg <- gg + geom_vline(xintercept = bas$tab.st[,2][-1], size = 0.1, linetype = 'dashed')
# gg + geom_ribbon(aes_string(ymin = -0.1, ymax = 0.1, x = 'pi', fill = 'misass')) +
# scale_fill_manual(name = "", labels = c("Correct", "Miss"), values = c("darkgreen", "darkred"))
# cp <- cowplot::plot_grid(gg_popup + theme(legend.position="none") + ggtitle('popup'),
# gg_merge + theme(legend.position="none") + ggtitle('merge'), gg_keep + ggtitle('keep'), nrow = 1)
# ggsave('test_diff_policies.png', plot = cp, width = 25, height = 9)
}
if(.lt(piann_true_v) != .lt(predicted_div))
stop('Something went wrong. The pred and true vec have differenct lengths and respectively ', .lt(piann_true_v),' and ', .lt(predicted_div))
# ---------------------------------------------------------------------------------------------- Final scoring
# scoring it with accuracy?
score.out <- ClusterR::external_validation(true_labels = piann_true_v, clusters = predicted_div, method = scoring_method, summary_stats = FALSE)
if(!silent) cat('Using', scoring_method,'we obtained a final score of', score.out, '\n')
# final output
return_list <- list('score.out' = score.out, 'label_freq_list' = label_freq_list, 'main_desc' = main_desc, 'perc_miss' = miss_perc)
if(plot_pred_true_resume) print(gg)
if(return_predicted) return_list[['predicted_div']] <- predicted_div
if(return_plot) return_list[['plot']] <- gg
invisible(return_list)
}
# deprecated
# ----------------------------------------------------------------------------------------
# calculating the entropy of the new selection
# label_freq_list <- list()
# res_b <- 0
# for(jk in 1:.lt(res_label)){
# label_freq_list[[jk]] <- sort(table(ann[c(st[,3])][(res_b+1):(res_b + res_bound[i])]), decreasing=TRUE)[1:4] * 1.0 / res_bound[i]
# # (res_bound[i]-res_b)
# label_freq_list[[jk]] <- rbind(label_freq_list[[jk]], sort(table(ann[c(st[,3])][(res_b+1):(res_b + res_bound[i])]), decreasing=TRUE)[1:4])
# label_freq_list[[jk]][is.na(label_freq_list[[jk]])] <- 0
# res_b <- res_b + res_bound[i]
# }
#
# # label_freq_list
# for(jk in 1:.lt(label_freq_list)){
# it <- c()
# for(kj in 1:ncol(label_freq_list[[jk]])) {
# ulm <- label_freq_list[[jk]][2, kj]
# if(!is.na(names(ulm))) kholn <- - ulm * log(ulm) else kholn <- 0
# it <- c(it, kholn)
# }
# label_freq_list[[jk]] <- cbind(label_freq_list[[jk]], sh_en = sum(it))
# }
# lab <- list()
# size <- list()
# sh_en <- list()
# for(jk in 1:.lt(label_freq_list)){ # check label_freq
# lab[[jk]] <- as.integer(colnames(label_freq_list[[jk]])[1])
# size[[jk]] <- as.integer(label_freq_list[[jk]][2, 1])
# sh_en[[jk]] <- label_freq_list[[jk]][1,ncol(label_freq_list[[jk]])]
# }
#
# max_freq_table <- data.frame(cbind(label = unlist(lab), size = unlist(size), sh_en = unlist(sh_en)))
# max_freq_table
# max_freq_table[order(max_freq_table$sh_en), ]
# external_validation(true_labels = ann[st[,3]], clusters = predicted_div, method = "adjusted_rand_index", summary_stats = FALSE)
# external_validation(true_labels = ann[st[,3]], clusters = predicted_div, method = "jaccard_index", summary_stats = FALSE)
# external_validation(true_labels = ann[st[,3]], clusters = predicted_div, method = "purity", summary_stats = FALSE)
#####################################
# ------------------------------------------------------- basin_opt - number of clusters
# This method consist only into finding the right number of clusters
# without splitting the possible subdivision (of the same number of clustering)
# and score them per their relevance (e.g. uniformity or minimal entropy).
#
# if(basin_optimization[1] == 'number_of_clusters') {
#
# # checks
# if(!silent) cat('Automatic optimization of basins based on number of cluster selected... \n')
# if(is.null(number_of_clusters)) stop('To use basin_optimization with number_of_clusters the correspective variable must be set to a single integer.')
# if(number_of_clusters < 2 && number_of_clusters >= nrow(st)) stop('Number of cluster too high or too low.')
# if(number_of_clusters > nbinsxy) stop('Please set the number of clusters lower than the number of initial nbins (nbinsxy).')
#
# # init
# how_fine <- 10
# lin_scale <- unique(round(seq(2, nbinsxy, length.out = how_fine)))
# if(!silent) cat('Selected a convergence step of', how_fine, 'subdivisions, ranging from', nbinsxy, 'to 2. \n')
# n_cl <- nbins_x
# whch <- how_fine
#
# # loop over the first coarse search of best partitioning
# while(n_cl != number_of_clusters){
#
# nbins_x <- lin_scale[whch] # linear scale from 2 to nbinsxy
# if(!silent) cat('Looking for right divisions using', nbins_x, ' nbins...\n')
# bas <- CampaRi::basins_recognition(st, nx = nbins_x, plot = F, match = force_matching, out.file = F, new.dev = F, silent = T)
# n_cl <- nrow(bas$tab.st) # take the number of clusters found
#
# # normal finer search - descent
# if(n_cl > number_of_clusters){
# old_whch <- whch
# whch <- round(whch/2)
# if(old_whch == whch) whch <- old_whch - 1
# if(whch < 1){
# n_fin_cl <- n_cl
# n_cl <- number_of_clusters
# }
# # if fine partitioning went wrong, we want to try a finer search
# }else if(n_cl < number_of_clusters){
# how_fine2 <- 10
# if(whch+1 > how_fine) stop('Initial guess of bins brought you directly to have less barriers found than needed. Please consider an increment of nbinsxy parameter.')
# # case in which it is simply to select the upper part!
#
# if(!silent) cat('Found that our split was too coarse. Trying to split it again in', how_fine2,
# 'parts in the found ', nbins_x, '-', lin_scale[whch+1] , 'range.\n')
# lin_scale <- unique(round(seq(nbins_x, lin_scale[whch+1], length.out = how_fine2)))
# if(.lt(lin_scale) == 1){
# if(!silent) cat('Perfect number of divisions not found. In particular, we finally found', n_cl, 'partitions. Probably a finer search could work.',
# 'Otherwise, it is necessary to suppose another number of clusters. In particular, we stopped before starting internal loop because',
# 'the span between the last two splits was not sufficiently large.\n')
# n_fin_cl <- n_cl
# n_cl <- number_of_clusters
# }
# whch <- how_fine2
# while(n_cl != number_of_clusters){
# nbins_x <- lin_scale[whch]
# bas <- CampaRi::basins_recognition(st, nx = nbins_x, plot = F, match = force_matching, out.file = F, new.dev = F, silent = T)
# n_cl <- max(bas$tab.st[,1])
#
# if(n_cl > number_of_clusters){
# whch <- whch - 1
# } else if(n_cl < number_of_clusters){
# if(!silent) cat('Perfect number of divisions not found. In particular, we finally found', n_cl, 'partitions. Probably a finer search could work.',
# 'Otherwise, it is necessary to suppose another number of clusters. \n')
# n_fin_cl <- n_cl
# n_cl <- number_of_clusters
# }else{
# if(!silent) cat('We found a perfect binning using', nbins_x, 'number of bins.\n')
# n_fin_cl <- n_cl
# }
# }
# }else{ # end of the finer part, i.e. n_cl == number of clusters without having to go inside the inner loop
# if(!silent) cat('We found a perfect binning using', nbins_x, 'number of bins.\n')
# n_fin_cl <- n_cl
# }
# }
# }
# # final call if you want to plot!
# bas <- CampaRi::basins_recognition(st, nx = nbins_x, dyn.check = 1,
# plot = plot_basin_identification, match = force_matching, out.file = F, new.dev = F, silent = silent)
#
##########################################
|
39c99143bc389767da45efb25fc62d62ad834a8c | 461674805253ca434b2a2a0a5151d60924327365 | /man/arithmeticSet.Rd | 79d6c81290bea577f0752a84fa1c018c828eef51 | [
"MIT"
] | permissive | porteous54/caRtesian | 59a9d542282d0efa718157ffe04a0659a90c4477 | 700336f20736257519b17954e55c260834ada898 | refs/heads/master | 2021-03-22T04:52:44.153310 | 2018-05-03T23:26:16 | 2018-05-03T23:26:16 | 119,096,074 | 0 | 1 | null | 2018-05-03T23:26:16 | 2018-01-26T19:47:55 | R | UTF-8 | R | false | true | 337 | rd | arithmeticSet.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funcSet.R
\name{arithmeticSet}
\alias{arithmeticSet}
\title{arithmeticSet}
\usage{
arithmeticSet()
}
\value{
the function set created
}
\description{
Creates a function set containing the basic arithmetic operators (+, -, *, /)
}
\examples{
arithmeticSet()
}
|
b41964d46a128079abc92d2786d38512d8ab0530 | db04dbad2522dc7c96ed65c897a5b7b7353d209d | /ProblemSets/PS5/PS5_Stinnett.R | f8c3e194e54cbe1c26578ed6e2cd51d0aa6cac3d | [
"MIT"
] | permissive | SloanStinnett/DScourseS19 | 56f5c507600a1ba33226b567b13752489331d71d | b7010590af3fe626f160f2a9e6a7f997743e3cbb | refs/heads/master | 2020-04-16T21:49:40.251323 | 2019-04-18T12:00:11 | 2019-04-18T12:00:11 | 165,941,650 | 0 | 0 | null | 2019-01-15T23:50:54 | 2019-01-15T23:50:54 | null | UTF-8 | R | false | false | 564 | r | PS5_Stinnett.R | library("rvest")
library("quantmod")
library("forecast")
# code for scaping Rehnquist data
url <- "https://en.wikipedia.org/wiki/List_of_United_States_Supreme_Court_cases_by_the_Rehnquist_Court"
RehnquistCourt<- url %>%
read_html() %>%
html_nodes(xpath='//*[@id="mw-content-text"]/div/table[2]') %>%
html_table()
RehnquistCourt<- RehnquistCourt[[1]]
head(RehnquistCourt)
# code for scaping stock data on AMD and Nvidia
getSymbols('AMD',src='yahoo')
getSymbols('NVDA',src='yahoo')
autoplot(AMD[,4])
autoplot(NVDA[,4])
cor(AMD[,4],NVDA[,4])
|
9e57a409fb5532e5475c034fbe2d7d6c1f33c094 | 35f36246fc268af1337a5d486bdc2dbcb76ac863 | /man/getLibsizes3.Rd | f239480c05ccab73f1bf07b557b22518f602aa46 | [] | no_license | WaltersLab/doseR | 763786d502e9aa5e2c4f0a1891bbf553d2c8c72e | 12b96faee2c12bd81211611b51ce699d24302f82 | refs/heads/master | 2023-03-05T05:25:30.151052 | 2023-02-24T13:35:57 | 2023-02-24T13:35:57 | 92,343,309 | 2 | 2 | null | null | null | null | UTF-8 | R | false | true | 796 | rd | getLibsizes3.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getLibsizes3.R
\name{getLibsizes3}
\alias{getLibsizes3}
\title{getLibsizes3 Get Lib Sizes.}
\usage{
getLibsizes3(se, subset = NULL,
estimationType = c("quantile", "total",
"edgeR"),quantile = 0.75, ...)
}
\arguments{
\item{se}{A SummarizedExperiment object.}
\item{subset}{Value}
\item{estimationType}{e.g. quantile, total, edgeR.}
\item{quantile}{A quantile, expressed as e.g. 0.75.}
\item{...}{Passthrough arguments.}
}
\value{
Libsize value
}
\description{
getLibsizes3 method for SummarizedExp class object, derived
from getLibsizes2
}
\examples{
data(hmel.se)
getLibsizes3(se)
}
\references{
The "doseR" package, 2018 (in press).
}
\author{
AJ Vaestermark, JR Walters.
}
|
fe7aeef0b35ae5b538be0b6b8e3651385093edd1 | e0dab6d9ace1a82537e89ef7012831c8209f981c | /Black and White.R | 24a66d847fb33719bf282eb6bf05147fb940debf | [] | no_license | Xizor1970/Black-vs-White | 06fe62d16e56dc3b9521668f323dd8f61a90c872 | 704f3bec004faf2df206a3fc2603a75004c1b676 | refs/heads/master | 2020-05-15T15:19:38.427074 | 2019-04-20T06:41:35 | 2019-04-20T06:41:35 | 182,370,445 | 0 | 1 | null | null | null | null | WINDOWS-1250 | R | false | false | 1,164 | r | Black and White.R | # Author: Hendrik A. Dreyer
# Capture black and white samples into data frame
white <- c(16,17,17,19,17,15,14,19,17,16,18,17,20,15,17,16,16,18,18,16);
black <- c(17,15,15,19,17,18,18,15,17,19,20,18,16,17,17,18,16,17,19,16);
table(white);
table(black);
# Do some EDA
# Normally distributed
par(mfrow=c(2,2))
qqnorm(white, main="White vehicles");
qqnorm(black, main="Black vehicles");
#Frequency
hist(white, breaks=seq(12,22,by=1));
hist(black, breaks=seq(12,22,by=1));
#Spread
boxplot(white, black, names = c("White vehicles","Black vehicles"), ylab = "Wheel diameter (inches)");
#Basic stats
summary(white);
summary(black);
#Null hypothesis: H0 : µblack = µwhite
#Alternative hypothesis: HA : µblack > µwhite
#Calculate the mean and sd of each sample
white_mean = mean(white);
white_sd = sd(white);
white_var = var(white);
black_mean = mean(black);
black_sd = sd(black);
black_var = var(black);
#Perform an independent samples t-test
zt.test=t.test(black, white, conf.level = 0.95, var.equal=F, alternative="greater");
# alternative = "greater", i.e., black > white = H-alternative
zt.test;
|
1bc5bef918f43a829336825860529028ef4a4605 | 3d3f11d6002a505483003a59e0a94264ddd86757 | /R/genet.R | 0e54c73debd1e5dc2fb19ba4f4695121f1505bf7 | [] | no_license | cran/lqa | 75a867677d54e2e1b84dbe90822628adf42e6238 | fb0b5a6a7e8ce9028ac5f48adee8ffa8b3434d0b | refs/heads/master | 2020-06-03T14:21:37.296678 | 2010-07-12T00:00:00 | 2010-07-12T00:00:00 | 17,697,200 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,025 | r | genet.R | genet <-
function (lambda = NULL, ...)
{
lambda.check (lambda)
if ((length (lambda) > 3) | (length (lambda) < 2))
stop ("The genet penalty must consist on two (enet) or three parameters (genet)! \n")
if (length (lambda) == 2)
lambda[3] <- 2
lambda1 <- lambda[1]
alpha <- lambda[2]
gamma <- lambda[3]
if (alpha > 1)
stop ("'lambda[2]' (= alpha) must be between 0 and 1\n")
if (gamma <= 1)
stop ("'lambda[3]' (= gamma) must be greater than one \n")
names (lambda) <- c ("lambda", "alpha", "gamma")
getpenmat <- function (beta = NULL, c1 = lqa.control()$c1, ...)
{
if (is.null (beta))
stop ("'beta' must be the current coefficient vector \n")
if (c1 < 0)
stop ("'c1' must be non-negative \n")
penmat <- lambda1 * diag (((1 - alpha) * gamma * abs (beta)^(gamma - 1) + alpha) / (sqrt (beta^2 + c1)), length (beta))
penmat
}
structure (list (penalty = "genet", lambda = lambda, getpenmat = getpenmat), class = "penalty")
}
|
e63384f2b692e64adce7df8efe95770cad9ba53d | 64d5df27325f07af9c602ddf85a81ff4f0aec189 | /inst/examples/report_mig_env-example.R | 6f3a211da9411c9ef91e867d1ddc8adbd593d02d | [] | no_license | cran/stacomiR | 0641860bef2f4b5c05490d06de2c58fe3fe30059 | 981c67ba5d18ee9a5c357192e4ba4f9e864ec039 | refs/heads/master | 2022-07-22T14:11:55.680662 | 2022-07-18T08:20:02 | 2022-07-18T08:20:02 | 95,473,811 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,330 | r | report_mig_env-example.R | require(stacomiR)
stacomi(
database_expected=FALSE)
# the following will load the data provided the user has access to the database
# with data in the iav example scheme.
# prompt for user and password but you can set appropriate options for host, port and dbname
\dontrun{
stacomi(
database_expected=TRUE)
if (interactive()){
if (!exists("user")){
user <- readline(prompt="Enter user: ")
password <- readline(prompt="Enter password: ")
}
}
options(
stacomiR.dbname = "bd_contmig_nat",
stacomiR.host ="localhost",
stacomiR.port = "5432",
stacomiR.user = user,
stacomiR.user = password
)
r_mig_env<-new("report_mig_env")
r_mig_env<-choice_c(r_mig_env,
dc=c(5,6,12),
taxa=c("Anguilla anguilla"),
stage=c("AGJ","AGG","CIV"),
stationMesure=c("temp_gabion","coef_maree","phases_lune"),
datedebut="2008-01-01",
datefin="2008-12-31",
silent=FALSE)
r_mig_env<-charge(r_mig_env) # this is necessary to load operations, DF and DC
r_mig_env<-connect(r_mig_env)
r_mig_env<-calcule(r_mig_env,silent=TRUE)
}
data("r_mig_env")
# An example of plot with custom colors.
plot(r_mig_env,
color_station=c("temp_gabion"="red","coef_maree"="blue","phases_lune"="pink"),
color_dc=c("5"="yellow","6"="orange","12"="purple")
)
|
851d7f1bca12692f46ae9be8e80f749e1520d88e | 0de866765239b6fe2159f5da7b364093d43bbb33 | /data-raw/siteData.R | 6274cda34824c15133667519e6739688b2a20a97 | [] | no_license | yangxhcaf/functional_diversity | f0e38a5763d5bc318332e731ca74a48f82be63ca | c8ed2b3d9a0733fa9892f0294a63e4932b0b7c6e | refs/heads/master | 2022-04-08T15:12:38.123937 | 2020-03-04T18:24:20 | 2020-03-04T18:24:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,972 | r | siteData.R | ########################
### Dayment Climate#####
###### Data #########
########################
library(daymetr)
library(here)
library(tidyverse)
library(lubridate)
data_path <- here::here("data", "modis", "daymet_climate.rdata")
if (file.exists(data_path)){
load(data_path)
print("yes climate")
}else{
climate <- download_daymet_batch(file_location = here("data", 'bbs_sites.csv'),
start = 2007,
end = 2016,
internal = TRUE)
save(climate, data_path)
}
error_list <- which(map(climate, class) == 'try-error')
#climate_df <- as.data.frame(climate[-error_list])
extract_daymet <- function(daymet_object) {
site_id <- daymet_object$site
df <- as.data.frame(daymet_object$data) %>%
mutate(site = site_id)
}
climate_df <- map_df(climate[-error_list], extract_daymet) %>%
mutate(site = as.factor(site))
samp_period_avg <- climate_df %>%
filter(yday > 121, yday < 181) %>%
select(-yday) %>%
group_by(year, site) %>%
summarise_at(.vars = vars(avg_cols), .funs = mean) %>%
rename_all(paste0, "samp") %>%
rename(year = yearsamp, site = sitesamp)
year_avg <- climate_df %>%
select(-yday) %>%
group_by(year, site) %>%
summarise_all(mean) %>%
rename_all(paste0, "yearly") %>%
rename(year = yearyearly, site = siteyearly)
climate_means <- left_join(samp_period_avg, year_avg, by = c("year", "site"))
#######################
###### MODIS ##########
#######################
library(MODISTools)
sites <- read_csv(here("data", "bbs_sites.csv")) %>%
mutate(site_id = as.factor(site_id)) %>%
rename(lon = long) %>%
as.data.frame()
extract_MODIS <- function(site_name, modis_object){
as.data.frame(modis_object$data) %>% mutate(site_id = site_name)
}
#### YEARLY DATA #####
clean_yearly_df <- function(data, data_name){
#get all dates in the same format
data$date <- gsub("[^[:alnum:][:blank:]?&/\\-]", "", data$calendar_date)
data$month_first <- mdy(data$date)
data_clean <- data %>% subset(is.na(month_first)) %>%
select(-month_first) %>%
mutate(month_first = ydm(date)) %>%
bind_rows(subset(data, !is.na(month_first))) %>%
subset(!is.na(band)) %>%
mutate(year = year(month_first), site_id = as.factor(site_id)) %>%
select(site_id, year, !!quo_name(data_name) := data)
return(data_clean)
}
#FAO land cover type
lc <- mt_batch_subset(df = sites,
product = "MCD12Q1",
band = "LC_Type4",
internal = TRUE,
start = "2007-01-01",
end = "2016-12-31")
lc_df <- map2(sites$site_id, lc, extract_MODIS) %>%
bind_rows()
write_csv(lc_df, "data/modis/land_cover.csv")
#NPP
npp <- mt_batch_subset(df = sites,
product = "MYD17A3H",
band = "Npp_500m",
internal = TRUE,
start = "2007-01-01",
end = "2016-12-31")
npp_df <- map2(sites$site_id, npp, extract_MODIS) %>%
bind_rows()
write_csv(npp_df, "data/modis/npp.csv")
### MONTHLY DATA
get_month_subset <- function(year, site_df = sites, product_name, band_name, folder){
modis_data <- mt_batch_subset(df = site_df,
product = product_name,
band = band_name,
internal = TRUE,
start = paste0(year, "-05-01"),
end = paste0(year, "-06-30"))
modis_df <- map2(site_df$site_id, modis_data, extract_MODIS) %>% bind_rows()
write_csv(modis_df, paste0("data/modis/", folder, "/", folder, "_", year, ".csv"))
return(modis_df)
}
#Leaf area index
dir.create("data/modis/lai")
system.time(lai <- map_df(2007:2016, get_month_subset,
site_df = site_sub,
product_name = "MCD15A2H",
band_name = "Lai_500m",
folder = "lai"))
#NDVI
dir.create("data/modis/ndvi")
system.time(ndvi <- map_df(2012:2016, get_month_subset,
product_name = "MOD13Q1",
band_name = "250m_16_days_NDVI",
folder = "ndvi"))
ndvi_df <- map_df(paste0("data/modis/ndvi/", dir(path = "data/modis/ndvi", pattern = "*.csv")), read_csv) %>%
select(site_id, calendar_date, data) %>%
mutate(year = year(calendar_date)) %>%
group_by(site_id, year) %>%
summarise(ndvi = mean(data)) %>%
ungroup() %>%
mutate(site_id = as.factor(site_id))
site_data <- climate_means %>%
mutate(site_id = site) %>%
left_join(clean_yearly_df(lc_df, "land_cover"), by = c("site_id", "year")) %>%
left_join(clean_yearly_df(npp_df, "npp"), by = c("site_id", "year")) %>%
left_join(ndvi_df, by = c("site_id", "year"))
write_tsv(site_data, "data/site_variables.tsv.bz2")
|
dc4c3ccd4344f758f3c518600f426b59cda15682 | 579d3c9ef6f34d04e88f2cd6e3163d0c1fb78eda | /tests/testthat/files/router.R | 0d6476340dba046c0324b577545c8826cad63f8a | [
"MIT"
] | permissive | vnijs/rapier | 44098190f13849555860611f65241b0184183d84 | 4e195a82ee0da8ded252ed10113ecb64cc60c9c7 | refs/heads/master | 2020-12-11T03:22:31.550630 | 2015-06-13T05:05:37 | 2015-06-13T05:05:37 | 37,435,991 | 1 | 1 | null | 2015-06-15T00:55:24 | 2015-06-15T00:55:24 | null | UTF-8 | R | false | false | 405 | r | router.R |
#' @get /
#' @preempt __first__
function(){
"first"
}
#' @get /abc
function(){
"abc get"
}
#' @post /abc
function(){
"abc post"
}
#' @filter filt1
function(req, res){
forward()
}
#' @filter filt2
function(req, res){
forward()
}
#' @use /dog
#' @preempt filt2
function(){
"dog use"
}
#' @get /dog
#' @preempt filt1
function(){
"dog get"
}
#' @get /error
function(){
stop("ERROR")
}
|
619faed0cfe72be3cead2e40ce1d59a42d627f03 | f58228708c7ac302d1970c7daaa58c545c5f59b1 | /rscripts/myutils.R | 337eaa824b8092364cfd9f509f6492bc09c2ee46 | [] | no_license | escott55/me_variome | 50b59a169ba737eccbed6d59b5ef10108d508281 | 01ca8a601ebecc6226bf30bf024183e0fe4e967d | refs/heads/master | 2016-09-15T23:18:56.211076 | 2014-09-25T16:58:39 | 2014-09-25T16:58:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,256 | r | myutils.R |
library(tools)
######################################################################
# getBasename
######################################################################
getBasename <- function( filename ){
suffix <- file_ext(filename)
path <- dirname(filename)
#basename <- basename(gsub( "(.*)\\.(.*)", "\\1", filename))
basename <- basename( file_path_sans_ext(filename) )
lst <- c(path, basename, suffix)
return(lst)
}# END getBasename
######################################################################
# readMultiTable
######################################################################
readMultiTable <- function( filename ){
x <- readLines( filename )
x <- x[1:15] # This is a hack! delete if you can!
start <- grep("^[/]+$", x)
mark <- vector('integer', length(x))
mark[start] <- 1
# determine limits of each table
mark <- cumsum(mark)
# split the data for reading
df <- lapply(split(x, mark), function(.data){
.input <- read.table(textConnection(.data), skip=2, header=FALSE, sep="\t")
attr(.input, 'name') <- .data[2] # save the name
.input
})
# rename the list
names(df) <- sapply(df, attr, 'name')
return(df)
} #END readMultiTable
|
62f9d40244543c7c9ae74f2cfabdc18dcb066ce6 | e1c34fb3226673721ff7038c2ea5bb63adb84931 | /dtw_svm_lvmlight.r | 3a836a8a98ba40f190f9dbca12e61290bbc2c1de | [] | no_license | root2891/Kaggle-EEG | ca6700f658e62165c6d28d5c1dc4f1722009820b | a4860f16e751c053dad563996ff345afe4e45f10 | refs/heads/master | 2016-09-05T14:16:41.215677 | 2014-07-21T10:48:28 | 2014-07-21T10:48:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,263 | r | dtw_svm_lvmlight.r | library("fNonlinear")
library("R.matlab")
library("tseriesChaos")
library("fractal")
library("fractaldim")
library("tseriesChaos")
library("fields")
library("FNN")
library("caret")
library("RSNNS")
library("e1071")
library("wavelets")
library("scatterplot3d")
library("combinat")
library("rasclass")
library("dtw")
path <- "D:/Work Related/Kaggle EEG/Volumes/Seagate/seizure_detection/competition_data/clips/"
patients <- c("Dog_1","Dog_2","Dog_3","Dog_4", "Patient_1","Patient_2","Patient_3","Patient_4","Patient_5","Patient_6","Patient_7","Patient_8")
fileTypes_list <- c("ictal", "interictal", "test")
noOfSeconds <- 1
get_no_of_files <- function(path, patient, fileTypes_list)
{
noOfFiles_list <- list(NULL)
for(filetype in fileTypes_list)
{
fileList <- list.files(paste(path, patient,"/", sep = ""), pattern =paste("_",filetype,sep = ""))
noOfFiles_list[filetype] <- length(fileList)
}
return (noOfFiles_list)
}
calc_splv_spec <- function(wav_transform1, wav_transform2)
{
sum <- complex(imaginary = 0, real = 0)
for(i in 1:nrow(wav_transform1))
{
tempVal <- exp(complex(imaginary=(wav_transform1[i,] - wav_transform2[i,])))
realPart <- Re(sum)+Re(tempVal)
imaginaryPart <- Im(sum)+Im(tempVal)
sum <- complex(real = realPart, imaginary = imaginaryPart)
}
val <- abs(sum)
return (val/nrow(wav_transform1))
}
calc_splv <- function(wav_transform1, wav_transform2)
{
splv <- data.frame(NULL)
for(i in 1:length(wav_transform1@W))
{
tempVal <- calc_splv_spec(data.frame(wav_transform1@W[i]), data.frame(wav_transform2@W[i]))
splv <- rbind(splv, tempVal)
}
return (splv)
}
calculation_per_combination <- function(inputData.data, noOfRods)
{
result <- data.frame(NULL)
tempResult <- dtwDist(inputData.data)
for(i in 1:noOfRods)
{
for(j in 1:noOfRods)
{
if(i > j)
{
result <- rbind(result, tempResult[i,j])
}
}
}
return (t(result))
}
get_data <- function(patient, segment_type, noOfSeconds, k)
{
if(noOfSeconds >1)
{
fileName<- paste(path,patient,"/",patient,"_",segment_type,"_segment_",k:(k+noOfSeconds), sep = "")
}else
{
fileName<- paste(path, patient,"/",patient,"_",segment_type,"_segment_",k, sep = "")
}
fileName <- paste(fileName, ".mat", sep = "")
inputData <-lapply(fileName,readMat)
}
write_to_file_train <- function(dtw_result, targetDataSet)
{
path <- "D:/Work Related/Kaggle EEG/svm/"
fileLocation <- paste(path, "trainfile.dat", sep = "")
for(j in 1:nrow(trainInput))
{
if(targetDataSet[j,] == 0)
outputval <- 2
else
outputval <- 1
output <- NULL
for(i in 1:ncol(trainInput))
{
output <- paste(output," ",i, ":", dtw_result[j,i], sep = "")
}
val <- paste(outputval, output)
write(val, fileLocation, append = TRUE)
}
}
write_to_file_predict <- function(result)
{
path <- "D:/Work Related/Kaggle EEG/svm/"
fileLocation <- paste(path, "testfile.dat", spe = "")
for(j in 1:nrow(trainInput))
{
output <- NULL
for(i in 1:ncol(trainInput))
{
output <- paste(output," ",i, ":", result[j,i], sep = "")
}
write(output, fileLocation, append = TRUE)
}
}
results <- data.frame(NULL)
for(patient in patients)
{
noOfFiles_list <- get_no_of_files(path, patient, fileTypes_list)
noOfIctalFiles <- noOfFiles_list$ictal
noOfInterictalFiles <-noOfFiles_list$interictal
noOfTestFiles <- noOfFiles_list$test
noOfFiles_list$ictal <- 10
noOfFiles_list$interictal <-10
noOfFiles_list$test <- 10
noOfFiles <- noOfIctalFiles + noOfInterictalFiles
#output data set containing if file is interictal, earlyictal or ictal
targetDataSet <- data.frame(NULL)
dtw_result <- data.frame(NULL)
for(filetype in fileTypes_list[1:2])
{
k<-1
while(k <= noOfFiles_list[filetype] )
{
print(paste("patient:",patient, "filetype:", filetype, "K:", k, spe = ""))
#get the data in inputData.data
inputData <- get_data(patient, filetype, noOfSeconds,k)
#create input data set for processing
inputData.data <- inputData[[1]]$data
frequency <- ncol(inputData.data)
noOfRods <- nrow(inputData.data)
if(noOfSeconds >1)
{
for(i in 2:noOfSeconds)
{
inputData.data <- cbind(inputData.data,inputData[[i]]$data)
}
}
#create the output data set only in case of ictal or interictal files
if(filetype == "ictal")
{
inputData.latency <- inputData[[1]]$latency
if(inputData.latency < 15)
{
targetDataSet <- rbind(targetDataSet, 1)
}
else
{
targetDataSet <- rbind(targetDataSet, 2)
}
}else
{
targetDataSet <- rbind(targetDataSet, 0)
}
inputData.data <- as.ts(inputData.data)
dtw_result<-rbind(dtw_result,calculation_per_combination(inputData.data, noOfRods))
k<- k+noOfSeconds
}
}
frequency <- ncol(inputData.data)
noOfRods <- nrow(inputData.data)
noOfCombinations <- ncol(combn(noOfRods, 2))
write_to_file_train(dtw_result, targetDataSet)
trainCommand <- paste("\"D:/Work Related/Kaggle EEG/svm/svm_multiclass_learn.exe\" -c 1.0 trainfile.dat model.dat", sep = "")
shell(trainCommand, intern = F)
k<- 1
result <- data.frame(NULL)
while(k<=noOfTestFiles)
{
inputData <- get_data(patient, "test", noOfSeconds,k)
inputData.data <- inputData[[1]]$data
inputData.data <- as.ts(inputData.data)
result <- rbind(dtw_result,calculation_per_combination(inputData.data, noOfRods))
}
write_to_file_predict(result)
learnCommand <- paste("\"D:/Work\ Related/Kaggle\ EEG/svm/svm_multiclass_classify.exe\" trainfile.dat model.dat, predictfile_",patient,"_", k , sep = "")
prediction <- predict(model_list, result)
value <- prediction
if(value == 0)
{
seizure <- 0
early <- 0
}else if(value== 1)
{
seizure <- 1
early <- 1
}else if(value == 2)
{
seizure <- 1
early<- 0
}
print(paste(patient, "seizure:", seizure,"early:",early))
k<- k+noOfSeconds
save.image(paste("C:/Shubham/", patient,".RData", sep = ""))
}
|
77a629d4ba45a312c3cfdf75b008f6a9021fb7c2 | 0c3ad13ceb982ddff3c51818ce8e5d8807de9313 | /R/day06/r2.R | 8efda69fa73fe6b35b68e49424c78bc283a6c716 | [] | no_license | HanHyunwoo/TIL | befc9ecae77d227832179b49a37248780558697e | cd3c49270ca20f3cb9f841b929490d2e9193bafd | refs/heads/master | 2021-05-25T12:06:30.157595 | 2018-12-09T13:00:46 | 2018-12-09T13:00:46 | 127,377,631 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,555 | r | r2.R | #213p 성별에 따른 월급 차이 - "성별에 따라 월급이 다를까?"
welfare
class(welfare$sex)
table(welfare$sex)
# 이상치 결측 처리
welfare$sex <- ifelse(welfare$sex == 9, NA, welfare$sex)
#결측치 확인
table(is.na(welfare$sex)) #is.na : NA면 TRUE, 아니면 FALSE 해서 개수 보여줌
welfare$sex <- ifelse(welfare$sex ==1, "male", "female")
#welfare$sex 각각 개수 세어줘
table(welfare$sex)
#표보여줘
qplot(welfare$sex)
#--------------------------------------------
#월급변수 컴토하기 216p
class(welfare$income)
#요약하여 통계량을 확인하기 (너무 많은 항목이 있어서 table은 부적합)
#최소값 0, 최대값 2400, 결측치 12030개, 최소값0이 있다는것은
#이상치가 존재한다는 것을 의미하므로, 값이 0이면 결측 처리해야 한다.
#값이 0이거나 9999일 경우 결측처리 하겠다.
summary(welfare$income)
#그래프 x값 범위를 0~1000 으로 변경
qplot(welfare$income) + xlim(0,1000)
#income 값이 0 or 9999를 NA(결측) 처리하겠다. NA: 제외한다.
welfare$income <- ifelse(welfare$income %in% c(0,9999), NA, welfare$income)
#is.na : NA면 TRUE, 아니면 FALSE 해서 개수 보여줌
table(is.na(welfare$income)) #FALSE:4620, TRUE :12044, 결측치가 12044개
#성별에 따른 월급 차이 분석하기
sex_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(sex) %>%
summarise (mean_income = mean(income))
sex_income
ggplot(data = sex_income, aes(x=sex, y=mean_income)) + geom_col()
|
f6ca6a70a2f0fdbb5f21ae8afe0a7b865774101b | 5f8da4d4d01c6947759af8db517cf295980bfc11 | /stppResid/R/tess.info.R | f559324b3ef73a069a81c5b51b344f801979b50b | [] | no_license | r-clements/stppResid | 8f3042a12c0189ccd28764f9ad6fba29a00c79ea | 471286070dc4a4866860a4e82da656d39ce8ce01 | refs/heads/master | 2021-01-23T05:44:32.818058 | 2018-06-06T03:36:09 | 2018-06-06T03:36:09 | 5,900,472 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,712 | r | tess.info.R | tess.info <- function(X, cifunction, theta = NULL, areas, tl, n.def = 100)
{
data <- data.frame(cbind(X$x, X$y, X$t))
names(data) <- c("x", "y", "t")
if(is.null(theta)) {
fun2 <- function(z)
{
Y <- rbind(data, z)
Y <- Y[which(Y$t <= z[3]),]
Y <- stpp(Y$x, Y$y, Y$t, stwin(X$xcoord, X$ycoord, X$tcoord))
tail(cifunction(Y),1)
}
}
if(!is.null(theta)) {
fun2 <- function(z)
{
Y <- rbind(data, z)
Y <- Y[which(Y$t <= z[3]),]
Y <- stpp(Y$x, Y$y, Y$t, stwin(X$xcoord, X$ycoord, X$tcoord))
tail(cifunction(Y, theta),1)
}
}
all <- function(x)
{
div <- length(x)/3
xt <- x[1:div]
yt <- x[(div+1):(2*div)]
tt <- x[(2*div+1):(3*div)]
new.pts <- data.frame(cbind(xt, yt, tt))
new.pts <- new.pts[order(new.pts[,3]),]
lamb2 <- apply(new.pts, 1, fun2)
return(lamb2)
}
new.points <- function(tl) {
poly.temp <- as.points(tl$x, tl$y)
pts <- csr(poly.temp, n.def)
pts2 <- c(pts[,1], pts[,2])
return(pts2)
}
vol <- areas * diff(X$tcoord)
n <- 0
lamb2 <- c()
temp.tl <- tl
place <- 1:length(temp.tl)
lamb.ave.final <- rep(0, length(place))
lamb.sd.final <- rep(0, length(place))
n.final <- rep(0, length(place))
n = n + n.def
new.pts <- lapply(temp.tl, new.points)
xyp <- data.frame(new.pts)
xyp <- data.matrix(xyp)
tp.t <- runif(n.def*length(temp.tl), X$tcoord[1], X$tcoord[2])
tp <- matrix(tp.t, ncol = length(temp.tl))
new.p <- rbind(xyp, tp)
lamb2 <- rbind(lamb2, apply(new.p, 2, all))
lamb2.ave <- apply(lamb2, 2, mean)
lamb2.sd <- apply(lamb2, 2, sd)
while(length(place) > 0) {
n = n + n.def
cat("number of bins left: ", length(temp.tl), "\n")
new.pts <- lapply(temp.tl, new.points)
xyp <- data.frame(new.pts)
xyp <- data.matrix(xyp)
tp.t <- runif(n.def*length(temp.tl), X$tcoord[1], X$tcoord[2])
tp <- matrix(tp.t, ncol = length(temp.tl))
new.p <- rbind(xyp, tp)
if(length(temp.tl) > 1) {
lamb2 <- rbind(lamb2, apply(new.p, 2, all))
lamb2.ave <- apply(lamb2, 2, mean)
lamb2.sd <- apply(lamb2, 2, sd)
} else {
lamb2 <- c(lamb2, apply(new.p, 2, all))
lamb2.ave <- mean(lamb2)
lamb2.sd <- sd(lamb2)
}
error <- lamb2.ave/100
std.error <- lamb2.sd/sqrt(n)
if(any(std.error < error)) {
lose <- which(std.error < error)
replace <- place[lose]
lamb.ave.final[replace] <- lamb2.ave[lose]
lamb.sd.final[replace] <- lamb2.sd[lose]
n.final[replace] <- n
temp.tl <- temp.tl[-lose]
if(sum(std.error < error) < length(error)) {
lamb2 <- lamb2[ , -lose]
}
place <- place[-lose]
}
}
int.approx <- lamb.ave.final * vol
tess <- list(n = n.final, integral = int.approx, mean.lambda = lamb.ave.final, sd.lambda = lamb.sd.final)
class(tess) <- "tess.info"
return(tess)
} |
06da03b761e76646f0e131894898eb1e8124c9ef | d5326e11bad5910a13fb5bb24db0a240126b875b | /bachelors_course/sochynskyi_lab_6_2.R | 7f47029d0e0a3f12936564ea06d273bd01e88f1a | [] | no_license | SochynskyiStas/Data-Analysis | 6798867886c475a3cee173620b6d7f95f2643cdf | d705815a3ef8a83d8699ac4d7a4b6afb1fdd3a01 | refs/heads/master | 2022-03-31T11:16:04.970608 | 2019-12-08T23:08:36 | 2019-12-08T23:08:36 | 79,068,943 | 0 | 0 | null | null | null | null | WINDOWS-1251 | R | false | false | 1,957 | r | sochynskyi_lab_6_2.R | library('ggplot2')
library('forecast')
library('tseries')
library(xlsx)
daily_data = read.csv('day.csv', header=TRUE, stringsAsFactors=FALSE)
daily_data$Date = as.Date(daily_data$dteday)
#daily_data$Date = as.Date(daily_data$dteday)
ggplot(daily_data, aes(Date, cnt)) + geom_line() + scale_x_date('Місяці') + ylab("К-ть велосипедів") +
xlab("")
#tsclean() - згладжує статистичні викиди
count_ts = ts(daily_data[, c('cnt')])
daily_data$clean_cnt = tsclean(count_ts)
ggplot() +
geom_line(data = daily_data, aes(x = Date, y = clean_cnt)) + ylab('Очищені дані')
daily_data$cnt_ma = ma(daily_data$clean_cnt, order=7)
daily_data$cnt_ma30 = ma(daily_data$clean_cnt, order=30)
ggplot() +
geom_line(data = daily_data, aes(x = Date, y = clean_cnt, colour = "Значення")) +
geom_line(data = daily_data, aes(x = Date, y = cnt_ma, colour = "Тижнева ковзка середня")) +
geom_line(data = daily_data, aes(x = Date, y = cnt_ma30, colour = "Ковзка середня за місяць")) +
ylab('Кількість велосипедів')+ xlab('Час')
count_ma = ts(na.omit(daily_data$cnt_ma), frequency=30)
#allow.multiplicative.trend=TRUE
decomp = stl(count_ma, s.window="periodic")
deseasonal_cnt <- seasadj(decomp)
plot(decomp)
adf.test(count_ma, alternative = "stationary")
count_d1 = diff(deseasonal_cnt, differences = 1)
plot(count_d1)
adf.test(count_d1, alternative = "stationary")
Acf(count_d1, main='ACF for Differenced Series')
Pacf(count_d1, main='PACF for Differenced Series')
fit_w_seasonality = auto.arima(deseasonal_cnt, seasonal=TRUE)
seas_fcast <- forecast(fit_w_seasonality, h=30)
plot(seas_fcast)
tsdisplay(residuals(fit_w_seasonality), lag.max=45, main='(2,1,2) Model Residuals')
#the model incorporates differencing of degree 1, and uses an autoregressive term of first lag and a moving average model of order 1: |
f097e39dd2fdb222d0e3de5b12b1ea13b59d0586 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Rnumerai/examples/download_data.Rd.R | 99f9bee96acdc2db32aecbbaeaf5568b335c675e | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 512 | r | download_data.Rd.R | library(Rnumerai)
### Name: download_data
### Title: Function to download the Numerai Tournament data
### Aliases: download_data
### ** Examples
## Not run:
##D ## Directory where data files and prediction files to be saved
##D ## Put custom directory path or use the current working directory
##D data_dir <- tempdir()
##D
##D ## Download data set for current competition
##D data <- download_data(data_dir)
##D data_train <- data$data_train
##D data_tournament <- data$data_tournament
## End(Not run)
|
0a49af198b22fb5f00b0be345dbe840571c16503 | 26c16d345565ecadc5985cfa047694d342181e67 | /Post_test/03_DESSA_post_test_Raw_Scores.R | 07f7756119d749da9ae0463ced3f2e2e4b5673a6 | [] | no_license | aejb22122/Youth-Social-and-emotional-learning | abdcb24d03c55f53c46aa25a3c63e230a3e15d4f | 92f247f20294dd6a7367b88457269e7c5a5ba06b | refs/heads/master | 2020-05-31T21:22:25.282935 | 2019-12-05T19:52:23 | 2019-12-05T19:52:23 | 190,496,757 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,333 | r | 03_DESSA_post_test_Raw_Scores.R | # Title: Devereux Student Strengths Assessment (DESSA) - Spring 2019
# Author: Annick Eudes JB Research Manager
# Date: July 23, 2019
# Version: 1.0
# This revision of the stript replaces the original cleaned dataset with the
# sel_data_imputed data that incoporates the missing values.
# ---- Preliminaries ----
# Loading the packages that we might need :
# library(tidyverse)
# library(ggplot2)
# library(plyr)
# library(dplyr)
# ---- Step 2: Calculating the DESSA Scale Raw Scores ----
# The Scale Raw Scores for the eight scales (Personal
# Responsibility, Optimistic Thinking, Goal-Directed Behavior,
# Social-Awareness, Decision Making, Relationship Skills, Self-
# Awareness, and Self-Management) are obtained by adding the raw
# scores for all of the items that comprise each scale."
# ---- 2.1. Computing the PR, OT, GB, and SO scores for each child ----
# attach(sel_data) # so objects in the database can be accessed
# by simply giving their names...
# i.e Q1 in lieu of sel_data$Q1 ...
attach(sel_data_imputed)
# We will be using the mutate() function from the dplyr package to compute
# The Raw Scrores for each child.
# and add the new column "Personal_Responsibility" to the dataset:
sel_data_imputed <- mutate(sel_data_imputed, Personal_Responsibility = Q1+Q4+Q6+Q20+Q21+Q23+Q24+Q28+Q32+Q35)
# We will do the same for :
# Optimistic Thinking scale
sel_data_imputed <- mutate(sel_data_imputed, Optimistic_Thinking = Q2+Q5+Q7+Q10+Q16+Q30+Q36)
# Goal-Directed Behavior :
sel_data_imputed <- mutate(sel_data_imputed, Goal_Directed_Behavior = Q3+Q9+Q12+Q13+Q14+Q15+Q18+Q26+Q29+Q33)
# Social Awareness :
sel_data_imputed <- mutate(sel_data_imputed, Social_Awareness = Q8+Q11+Q17+Q19+Q22+Q25+Q27+Q31+Q34)
# Decision Making :
sel_data_imputed <- mutate(sel_data_imputed, Decision_Making = Q37+Q39+Q42+Q52+Q65+Q66+Q68+Q69)
# Relationship Skills :
sel_data_imputed <- mutate(sel_data_imputed, Relationship_Skills = Q38+Q40+Q45+Q47+Q50+Q55+Q61+Q64+Q70+Q71)
# Self- Awareness :
sel_data_imputed <- mutate(sel_data_imputed, Self_Awareness = Q41+Q49+Q57+Q58+Q59+Q62+Q63)
# Self-Management :
sel_data_imputed <- mutate(sel_data_imputed, Self_Management = Q43+Q44+Q46+Q48+Q51+Q53+Q54+Q56+Q60+Q67+Q72)
# The new columns should have been added at the end of the dataset :
View(sel_data_imputed)
# ---- 2.3 Calculations of the DESSA Raw Scores of each Child ID ----
table_PR_Child <- select(sel_data_imputed, Child_ID, Personal_Responsibility)
table_OT_Child <- select(sel_data_imputed, Child_ID, Optimistic_Thinking)
table_GDB_Child <- select(sel_data_imputed, Child_ID, Goal_Directed_Behavior)
table_SA_Child <- select(sel_data_imputed, Child_ID, Social_Awareness)
table_DM_Child <- select(sel_data_imputed, Child_ID, Decision_Making)
table_RS_Child <- select(sel_data_imputed, Child_ID, Relationship_Skills)
table_SelfAwar_Child <- select(sel_data_imputed, Child_ID, Self_Awareness)
table_SM_Child <- select(sel_data_imputed, Child_ID, Self_Management)
# You can view these table if necessary or export them to excell
# View(table_PR_Child)
# View(table_OT_Child)
# View(table_SA_Child)
# View(table_GDB_Child)
# View(table_DM_Child)
# View(table_RS_Child)
# View(table_SelfAwar_Child)
# View(table_SM_Child)
|
bb62f2de1aa11dfc026498bc8964a200583ae63f | 616e8ba5e7356a3b493062cd8095fa98455d12f1 | /Archive/exploring multiplicative factor for tilde c1.R | 533c789da991ac3745769d2a41941bad1fe1ab29 | [] | no_license | Breakend/RIBSS_tax_evasion_ABM | 0813ecc2ac964d0d68a12fb5b4559f26d25b502d | f64c4b8ab1e02a95fa3e032fbcb3b37647eeb017 | refs/heads/master | 2022-02-21T14:40:09.271146 | 2019-09-03T20:39:18 | 2019-09-03T20:39:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,619 | r | exploring multiplicative factor for tilde c1.R | remove(list = ls(all = TRUE))
gc()
##Set Seed
set.seed(55279) ### first seed given by Chris Marcum
set.seed(55333)
#Setting options
options(warn=1)
options(showErrorCalls=TRUE)
options(showWarnCalls=TRUE)
### Find on which platform and macchine we are running this on
if(.Platform$OS.type == "windows"){
if (Sys.info()["nodename"]=="" ){
OSdir <- paste("" ,sep="")}
memory.limit(4000)
}else{
if (Sys.info()["nodename"]=="vardavas-r.rand.org" |
Sys.info()["nodename"]=="vardavas-r.local"){
OSdir <- paste("/Users/rvardava/Documents/Projects_2015/Tax_Evasion/R code/" ,sep="")}
else {
OSdir <- paste("/Users/rvardava/Documents/Projects_2015/Tax_Evasion/R code/" ,sep="")
}
}
library(ggplot2)
library(Hmisc)
library.dir <- "Library/"
library <- file.path(library.dir, "library.R")
source(library)
### set Working Directory
setwd(OSdir)
erf <- function(x) {2 * pnorm(x * sqrt(2)) - 1}
erf.inv <- function(x) {qnorm((x + 1)/2)/sqrt(2)}
logit <- function(x) {log(x/(1-x))}
cdf.logitnormal <- function(x,mu=0,sigma=1) {0.5* (1+erf((logit(x)-mu)/sqrt(2*sigma^2)))}
cdf.lognormal <- function(x,mu=0,sigma=1) {0.5* (1+erf((log(x)-mu)/(sqrt(2)*sigma)))}
#median e^mu
c1<-0.1
c2<-0.7
qP <- seq(0,1,0.01)
tmp <- as.data.frame(cbind(x=qP,y=cdf.lognormal(x=qP, log(0.06),1)))
plot(tmp$x,tmp$y,type="l",xlab="qP",ylab="Phi",lwd = 2, col = "dark red",main = "multiplicative factor in the tilde c1 equation")
tmp <- as.data.frame(cbind(x=qP,y=sqrt(qP)))
plot(tmp$x,tmp$y,type="l",xlab="qP",ylab="Phi",lwd = 2, col = "dark red",main = "multiplicative factor in the tilde c1 equation")
|
66ee9b2c6f7418e70ac3b753df737e9a5a9c99de | 7a95abd73d1ab9826e7f2bd7762f31c98bd0274f | /meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615831494-test.R | 56d1eb686b5ce2f223ee42ca894e0cfe7fe56d63 | [] | no_license | akhikolla/updatedatatype-list3 | 536d4e126d14ffb84bb655b8551ed5bc9b16d2c5 | d1505cabc5bea8badb599bf1ed44efad5306636c | refs/heads/master | 2023-03-25T09:44:15.112369 | 2021-03-20T15:57:10 | 2021-03-20T15:57:10 | 349,770,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 767 | r | 1615831494-test.R | testlist <- list(doy = numeric(0), latitude = numeric(0), temp = c(4.11667183447263e-311, 4.39109691573039e-265, -3.24725805263896e+53, 9.19320775670991e-307, 1.18965321338936e-150, -3.24725792070966e+53, 9.19320775670991e-307, 2.91390127620259e+235, 3.06007463551262e+163, 3.16743122292222e-161, 1.71875486715696e+93, -1.99202616888714e-137, -2.58497449382399e+45, -5.18731820469628e+222, 3.02609778442457e-306, 7.23459960146345e-308, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) |
cde1a63a55568d8daf10503c3d327f6ab54fe78d | a96e59fef9ec17190f1f32e678d8ba8c1c05dba8 | /experiments/plotting/plot_experiment_results.r | 51fbe43afee8d83f653b591a83aa7c70375d9333 | [] | no_license | stassa/mlj_2021 | b569b25c22fc9bef10eabd190e18130afbab2c83 | de8b822e3df88ef1b6da70950b8c2e21665b253b | refs/heads/master | 2023-04-01T02:31:52.762815 | 2021-04-07T16:21:47 | 2021-04-07T16:21:47 | 350,858,786 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,760 | r | plot_experiment_results.r | library(Hmisc)
source('experiment_data.r', local=T)
# Line types and colours
# 1: sorts, 2: matrices, 3: punches, 4: baseline.
# The baseline is drawn without points but needs a point type in pnt.typs
# otherwise the legend function will raise an error.
plot.type <- 'b'
lin.typs <- 1:4
pnt.typs <- 1:4
systems.cols <- c('red','blue','magenta','brown')
# Plot title and labels
# Uncomment for plot identification.
# title <- paste('Learning target:',target)
title <- ''
x.lab <- 'Sort metarules'
# Adjust y-label to metric.
if (metric == 'Time'){
y.lab <- 'Time (sec.)'
} else {
y.lab <- metric
}
# Legend
leg.pos <- 'topleft'
# Last legend item is the empty hypothesis. Can't find how to render \U2205,
# "Empty Set" correctly so using \U00D8 "Latin capital letter O with stroke" as
# an alternative.
leg.text <- c('Sorts', 'Matrices', 'Punches','H = \U00D8')
leg.lin.cols <- systems.cols
leg.lin.typs <- lin.typs
# 26 is the very-small-dot point type for the baseline that shouldn't have any
# point type really.
leg.pnt.typs <- c(1,2,3,26)
leg.lwd <- 4.0
# Increased legend text size
leg.cex <- 3
# Error bar line and point types
bar.type = 'o'
sort.bar.col = 'gray48'
matrix.bar.col = 'brown4'
punch.bar.col = 'darkgreen'
# Large axis, ticks, label, line and point sizes
# Better for papers. Tweak for other uses.
cex.axis <- 2.70
cex.lab <- 2.8
cex <- 2.5
lwd.ticks=3.0
lwd <- 3.0
# Increased errorbar sizes.
cap <- 0.025
# The x-axis enumerates results.
results.size <- length(sort.means)
x.axis <- 1:results.size
# Calculate standard errors.
sort.ses <- sort.sds / sqrt(results.size)
matrix.ses <- matrix.sds / sqrt(results.size)
punch.ses <- punch.sds / sqrt(results.size)
# Calculate the accuracy of the empty hypothesis as the baseline of predictive
# accuracy. When an experiment times out we return the accuracy of the empty
# hypothesis.
baseline <- function(pos, neg) {
base.line <- neg / (pos + neg)
}
base.line <- baseline(pos,neg)
# Calculate plot limits
y.lim.max <- max(sort.means+sort.ses, matrix.means+sort.ses, punch.means+punch.ses)
y.lim.min <- min(base.line,sort.means-sort.ses, matrix.means-sort.ses, punch.means-punch.ses)
# Must call before getting legend size.
plot.new()
leg.size <- legend(leg.pos, inset=0.02, legend=leg.text, lty=leg.lin.typs, pch=leg.pnt.typs, cex=leg.cex, lwd=leg.lwd, plot=F)
# Note legend height added to y-axis limit
y.lim <- c(y.lim.min, y.lim.max + leg.size$rect$h + 0.01)
x.lim <- c(1, results.size + 0.5)
p <- par()
par(mar=c(6,6.1,1.0,0.8), mgp=c(4,1,0) )
# Main plot lines
plot(x.axis, main=title, sort.means, ylim=y.lim, type=plot.type, lty=lin.typs[1], pch=pnt.typs[1], col=systems.cols[1], xlab=x.lab, ylab=y.lab, xaxt='n', cex.axis=cex.axis, cex=cex, cex.main=cex, lwd=lwd, cex.lab=cex.lab, lwd.ticks=lwd.ticks)
lines(x.axis, matrix.means, ylim=y.lim, type=plot.type, lty=lin.typs[2], pch=pnt.typs[2], col=systems.cols[2], xlab=x.lab, ylab=y.lab, xaxt='n', cex.axis=cex.axis, cex=cex, lwd=lwd, cex.lab=cex.lab, lwd.ticks=lwd.ticks)
lines(x.axis, punch.means, ylim=y.lim, type=plot.type, lty=lin.typs[3], pch=pnt.typs[3], col=systems.cols[3], xlab=x.lab, ylab=y.lab, xaxt='n', cex.axis=cex.axis, cex=cex, lwd=lwd, cex.lab=cex.lab, lwd.ticks=lwd.ticks)
# Baseline - omitted when the metric is running time becaue time doesn't have a
# baseline. I'm sure there's an existential joke in there somewhere.
if(metric != 'Time') {
abline(h=base.line, type=plot.type, lty=lin.typs[4], pch=pnt.typs[4], col=systems.cols[4], xlab=x.lab, ylab=y.lab, xaxt='n', cex.axis=cex.axis, cex=cex, lwd=lwd, cex.lab=cex.lab, lwd.ticks=lwd.ticks)
}
# Error bars
errbar(x.axis, sort.means, yplus=sort.means+sort.ses, yminus=sort.means-sort.ses, col=0, pch=1, type=bar.type, errbar.col=sort.bar.col, add=T, cap=cap, lwd=lwd)
errbar(x.axis, matrix.means, yplus=matrix.means+matrix.ses, yminus=matrix.means-matrix.ses, col=0, pch=1, type=bar.type, errbar.col=matrix.bar.col, add=T, cap=cap, lwd=lwd)
errbar(x.axis, punch.means, yplus=punch.means+punch.ses, yminus=punch.means-punch.ses, col=0, pch=1, type=bar.type, errbar.col=punch.bar.col, add=T, cap=cap, lwd=lwd)
# Plot axis, adjusted to contents.
axis(1, at=x.axis, labels=rev(x.axis), cex.axis=cex.axis, cex.lab=cex.lab, padj=0.5, lwd.ticks=lwd.ticks)
# There's no baseline for time, so we adjust the legend elements.
if (metric == 'Time') {
leg.text <- leg.text[1:3]
leg.lin.typs <- leg.lin.typs[1:3]
leg.pnt.typs <- leg.pnt.typs[1:3]
leg.lin.cols <- leg.lin.cols[1:3]
legend(leg.pos, inset=0.02, legend=leg.text, lty=leg.lin.typs, pch=leg.pnt.typs, col=leg.lin.cols, cex=leg.cex, lwd=leg.lwd)
} else {
legend(leg.pos, inset=0.02, legend=leg.text, lty=leg.lin.typs, pch=leg.pnt.typs, col=leg.lin.cols, cex=leg.cex, lwd=leg.lwd)
}
par(p)
|
5a9ddc5a0a03fa2df18b81bee53ba1902e37ba9b | daa633ef9085cc1567cb040553741386cae76890 | /man/plot.burningEmbers.Rd | bfae330e953a41492e58eb232d7f26a53b9c10d3 | [] | no_license | rkrug/burningEmbers | 6c322ee81e5bcd054e69193dd2c73e4c337093e3 | b8bbf0fd21e962f4aaba1c42b3f024b086cd3a7c | refs/heads/master | 2020-12-30T14:34:55.605088 | 2019-07-04T07:35:38 | 2019-07-04T07:35:38 | 91,074,086 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,774 | rd | plot.burningEmbers.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.burningEmbers.R
\name{plot.burningEmbers}
\alias{plot.burningEmbers}
\title{plot.burningEmbers}
\usage{
\method{plot}{burningEmbers}(
x,
ylim,
labels,
ny = 1000,
relSpace = 7,
add0line = TRUE,
col = rev(heat.colors(1000)),
borderB = "black",
lwdB = 3,
xlab = "",
ylab = "",
...)
}
\arguments{
\item{x}{\code{burningEmbers} object to be plotted}
\item{ylim}{see \link{plot}}
\item{labels}{labels for the bars. If not specified, will be taken from the row names of the \code{x$colRange} parameter}
\item{ny}{default 1000; number of y blocks - 1000 should be sufficient}
\item{relSpace}{relative width of the gap between bars compared to width to bars}
\item{add0line}{default \code{TRUE}; if a line at \code{y = 0} should be drawn}
\item{col}{default \code{rev(heat.colors(1000))}; the colour to be used for the graph. The more values, the more continuous the gradient. 1000 should be fine.}
\item{borderB}{default "black"; the colour of the borders arounf=d the bars}
\item{lwdB}{default 3; the width of the borders arount=fd =he bars}
\item{xlab}{x label}
\item{ylab}{y label}
\item{...}{additional parameter for the \code{image()} function. Examples are \code{main}, ...}
}
\value{
the data structur which was plotted
}
\description{
The \code{plot} method for the \code{burningEmbers} objects
}
\details{
Plot a Burning Amber Diagram
}
\examples{
x <- burningEmbers()
plot(x)
x <- burningEmbers(
bot = c(1, -2, 3, -3),
top = c(2, 5, 4, -2)
)
plot(x, main = "This is a header")
plot(x, main = "This is a header", ylim = c(-1, 3))
}
\author{
Rainer M. Krug
}
|
dfcb666935aea70bb85106c8af4eb3f23be8fb68 | 34fae523eccd13bfdc4fcd529e39840280c6c0a3 | /R/object.R | 19f7d4a60e6664e2fa9ea2baec143662c2e29ed2 | [
"MIT"
] | permissive | Mochi2805/nimue | 161970b5ad7dc24728e70651be93d322646e85f4 | 81c6acf1cce96804b96faa256d587a0295352cc5 | refs/heads/master | 2023-06-19T19:35:17.766158 | 2021-07-19T13:50:17 | 2021-07-19T13:50:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,897 | r | object.R | #' nimue simulation plot
#'
#' @param x An squire_simulation object
#' @param replicates Plot replicates
#' @param summarise Logical, add summary line
#' @param ci logical add confidence interval ribbon
#' @param q Quantiles for upper and lower of interval ribbon
#' @param var_select Vector of variable names to plot (default is all)
#' @param summary_f Function to summarise each compartment
#' passed to the \code{fun} argument of \code{\link[ggplot2]{stat_summary}}
#' @param x_var X variable to use for plotting (default is \code{"t"},
#' but can be set to, \code{"date"}, if \code{date_0} provided), which will
#' cause the date to be plotted rather than time.
#' @param particle_fit If the squire_simulation provided is the result of
#' running the particle filter, do we want to just plot the fit. Default =
#' FALSE
#' @param date_0 Date of time 0 (e.g. "2020-03-01"), if specified a date column
#' will be added
#' @param ... additional arguments affecting the plot produced.
#'
#' @export
#'
plot.nimue_simulation <- function(x,
var_select = NULL,
replicates = FALSE,
summarise = TRUE,
ci = TRUE,
q = c(0.025, 0.975),
summary_f = mean,
x_var = "t",
date_0 = NULL,
particle_fit = FALSE,
...) {
# are we just wanting to plot the fit to data
if (particle_fit & !is.null(x$pmcmc_results)) {
return(squire:::plot_pmcmc_sample(x, ...))
} else if (particle_fit & !is.null(x$scan_results)) {
return(squire:::plot_sample_grid_search(x, ...))
}
# work out what compartments are being plotted
compartments = c("S", "E",
"IMild", "ICase", "IICU", "IHospital",
"IRec", "R", "D")
summaries = c("N",
"hospitalisations",
"hospital_demand","hospital_occupancy",
"ICU_demand", "ICU_occupancy",
"vaccines", "unvaccinated", "vaccinated", "priorvaccinated",
"infections", "deaths")
comps <- var_select[var_select %in% compartments]
summs <- var_select[var_select %in% summaries]
# get the compartments requried
pd <- do.call(rbind, lapply(seq_len(dim(x$output)[3]), function(i) {
format(x, compartments = comps, summaries = summs, replicate = i)
})) %>%
dplyr::rename(y = .data$value)
# replacing time with date if date_0 is provided
if(!is.null(date_0)){
assert_date(date_0)
pd$date <- as.Date(pd$t + as.Date(date_0),
format = "%Y-%m-%d")
}
# make the x label be the x axis requested
pd <- pd %>% dplyr::mutate(x = .data[[x_var]])
# remove any NA rows (due to different start dates)
if(sum(is.na(pd$t) | is.na(pd$y))>0) {
pd <- pd[-which(is.na(pd$t) | is.na(pd$y)),]
}
# Format summary data
pds <- pd %>%
dplyr::group_by(.data$x, .data$compartment) %>%
dplyr::summarise(ymin = stats::quantile(.data$y, q[1]),
ymax = stats::quantile(.data$y, q[2]),
y = summary_f(.data$y))
# Plot
p <- ggplot2::ggplot()
# Add lines for individual draws
if(replicates){
p <- p + ggplot2::geom_line(data = pd,
ggplot2::aes(x = .data$x,
y = .data$y,
col = .data$compartment,
group = interaction(.data$compartment, .data$replicate)),
alpha = max(0.2, 1 / x$parameters$replicates))
}
if(summarise){
if(x$parameters$replicates < 10){
warning("Summary statistic estimated from <10 replicates")
}
p <- p + ggplot2::geom_line(data = pds,
ggplot2::aes(x = .data$x, y = .data$y,
col = .data$compartment))
}
if(ci){
if(x$parameters$replicates < 10){
warning("Confidence bounds estimated from <10 replicates")
}
p <- p + ggplot2::geom_ribbon(data = pds,
ggplot2::aes(x = .data$x,
ymin = .data$ymin,
ymax = .data$ymax,
fill = .data$compartment),
alpha = 0.25, col = NA)
}
# Add remaining formatting
p <- p +
ggplot2::scale_color_discrete(name = "") +
ggplot2::scale_fill_discrete(guide = FALSE) +
ggplot2::xlab("Time") +
ggplot2::ylab("N") +
ggplot2::theme_bw() +
ggplot2::guides(col = ggplot2::guide_legend(ncol = 2))
return(p)
}
|
755cdbeb42e535c0eb849fd189ed58d59398bffb | 591771c6a3972cab8c680696771fd4b4aa0c3f20 | /R/setLastImportDate.R | 3b7b0608d094a490a65df8d121885939b2f63e98 | [] | no_license | Sumpfohreule/S4Level2 | a36dfc014dde47763009dcc4420a198ce11a9a5d | 9034cddbd04efed8cea8c5b90cb2e4fbf16209e7 | refs/heads/main | 2023-08-19T08:58:05.616624 | 2021-09-29T14:47:03 | 2021-09-29T14:47:03 | 304,371,990 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 246 | r | setLastImportDate.R | ########################################################################################################################
setGeneric("setLastImportDate", def = function(.Object, posixct_date) {
standardGeneric("setLastImportDate")
}
)
|
04538b7687fc92b66f09016ce80945089d21ad28 | 2e940271c21be18f391ebaeab2079e03728eecb9 | /man/branchsim-package.Rd | af8ddb6cb0580df9505b44950f1c9800468a1393 | [] | no_license | pspc-data-science/branchsim | 3fe1a98b4f219dd6258f82868f36718c2e344ef5 | d49ab68e071e91ac8f46b074ff23cb728a147c64 | refs/heads/master | 2023-02-13T18:56:06.240842 | 2021-01-12T17:48:36 | 2021-01-12T17:48:36 | 273,470,739 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,080 | rd | branchsim-package.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/branchsim-package.R
\docType{package}
\name{branchsim-package}
\alias{branchsim}
\alias{branchsim-package}
\title{branchsim: Gamma negative binomial branching process simulation of epidemics}
\description{
A tool suite, including a simulation engine, for a Crump-Mode-Jagers continuous time branching process of an epidemic propagation based on a gamma subordinated negative binomial process (GNBBP). The simulation engine builds continuous time sample paths with the inclusion of propagation interruption mechanisms. Expectations over random characteristics are made possible by numerical integration of the renewal equation. Malthusian parameters and asymptotic analysis is also included. See Levesque, Maybury, and Shaw https://www.medrxiv.org/content/10.1101/2020.07.08.20149039v3 for model details.
}
\author{
\strong{Maintainer}: Jérôme Levesque \email{jerome.levesque@tpsgc-pwgsc.gc.ca}
Authors:
\itemize{
\item David Maybury \email{david.maybury@tpsgc-pwgsc.gc.ca}
}
}
\keyword{internal}
|
202f6437cb0100f8b91f7b2adf94271b022d283c | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.analytics/R/kafkaconnect_operations.R | 6c8785f990af8dad6d6283d746bf2ce4b08d8f44 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | false | 17,909 | r | kafkaconnect_operations.R | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include kafkaconnect_service.R
NULL
#' Creates a connector using the specified properties
#'
#' @description
#' Creates a connector using the specified properties.
#'
#' See [https://www.paws-r-sdk.com/docs/kafkaconnect_create_connector/](https://www.paws-r-sdk.com/docs/kafkaconnect_create_connector/) for full documentation.
#'
#' @param capacity [required] Information about the capacity allocated to the connector. Exactly one
#' of the two properties must be specified.
#' @param connectorConfiguration [required] A map of keys to values that represent the configuration for the
#' connector.
#' @param connectorDescription A summary description of the connector.
#' @param connectorName [required] The name of the connector.
#' @param kafkaCluster [required] Specifies which Apache Kafka cluster to connect to.
#' @param kafkaClusterClientAuthentication [required] Details of the client authentication used by the Apache Kafka cluster.
#' @param kafkaClusterEncryptionInTransit [required] Details of encryption in transit to the Apache Kafka cluster.
#' @param kafkaConnectVersion [required] The version of Kafka Connect. It has to be compatible with both the
#' Apache Kafka cluster's version and the plugins.
#' @param logDelivery Details about log delivery.
#' @param plugins [required] Specifies which plugins to use for the connector.
#' @param serviceExecutionRoleArn [required] The Amazon Resource Name (ARN) of the IAM role used by the connector to
#' access the Amazon Web Services resources that it needs. The types of
#' resources depends on the logic of the connector. For example, a
#' connector that has Amazon S3 as a destination must have permissions that
#' allow it to write to the S3 destination bucket.
#' @param workerConfiguration Specifies which worker configuration to use with the connector.
#'
#' @keywords internal
#'
#' @rdname kafkaconnect_create_connector
kafkaconnect_create_connector <- function(capacity, connectorConfiguration, connectorDescription = NULL, connectorName, kafkaCluster, kafkaClusterClientAuthentication, kafkaClusterEncryptionInTransit, kafkaConnectVersion, logDelivery = NULL, plugins, serviceExecutionRoleArn, workerConfiguration = NULL) {
op <- new_operation(
name = "CreateConnector",
http_method = "POST",
http_path = "/v1/connectors",
paginator = list()
)
input <- .kafkaconnect$create_connector_input(capacity = capacity, connectorConfiguration = connectorConfiguration, connectorDescription = connectorDescription, connectorName = connectorName, kafkaCluster = kafkaCluster, kafkaClusterClientAuthentication = kafkaClusterClientAuthentication, kafkaClusterEncryptionInTransit = kafkaClusterEncryptionInTransit, kafkaConnectVersion = kafkaConnectVersion, logDelivery = logDelivery, plugins = plugins, serviceExecutionRoleArn = serviceExecutionRoleArn, workerConfiguration = workerConfiguration)
output <- .kafkaconnect$create_connector_output()
config <- get_config()
svc <- .kafkaconnect$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.kafkaconnect$operations$create_connector <- kafkaconnect_create_connector
#' Creates a custom plugin using the specified properties
#'
#' @description
#' Creates a custom plugin using the specified properties.
#'
#' See [https://www.paws-r-sdk.com/docs/kafkaconnect_create_custom_plugin/](https://www.paws-r-sdk.com/docs/kafkaconnect_create_custom_plugin/) for full documentation.
#'
#' @param contentType [required] The type of the plugin file.
#' @param description A summary description of the custom plugin.
#' @param location [required] Information about the location of a custom plugin.
#' @param name [required] The name of the custom plugin.
#'
#' @keywords internal
#'
#' @rdname kafkaconnect_create_custom_plugin
kafkaconnect_create_custom_plugin <- function(contentType, description = NULL, location, name) {
op <- new_operation(
name = "CreateCustomPlugin",
http_method = "POST",
http_path = "/v1/custom-plugins",
paginator = list()
)
input <- .kafkaconnect$create_custom_plugin_input(contentType = contentType, description = description, location = location, name = name)
output <- .kafkaconnect$create_custom_plugin_output()
config <- get_config()
svc <- .kafkaconnect$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.kafkaconnect$operations$create_custom_plugin <- kafkaconnect_create_custom_plugin
#' Creates a worker configuration using the specified properties
#'
#' @description
#' Creates a worker configuration using the specified properties.
#'
#' See [https://www.paws-r-sdk.com/docs/kafkaconnect_create_worker_configuration/](https://www.paws-r-sdk.com/docs/kafkaconnect_create_worker_configuration/) for full documentation.
#'
#' @param description A summary description of the worker configuration.
#' @param name [required] The name of the worker configuration.
#' @param propertiesFileContent [required] Base64 encoded contents of connect-distributed.properties file.
#'
#' @keywords internal
#'
#' @rdname kafkaconnect_create_worker_configuration
kafkaconnect_create_worker_configuration <- function(description = NULL, name, propertiesFileContent) {
op <- new_operation(
name = "CreateWorkerConfiguration",
http_method = "POST",
http_path = "/v1/worker-configurations",
paginator = list()
)
input <- .kafkaconnect$create_worker_configuration_input(description = description, name = name, propertiesFileContent = propertiesFileContent)
output <- .kafkaconnect$create_worker_configuration_output()
config <- get_config()
svc <- .kafkaconnect$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.kafkaconnect$operations$create_worker_configuration <- kafkaconnect_create_worker_configuration
#' Deletes the specified connector
#'
#' @description
#' Deletes the specified connector.
#'
#' See [https://www.paws-r-sdk.com/docs/kafkaconnect_delete_connector/](https://www.paws-r-sdk.com/docs/kafkaconnect_delete_connector/) for full documentation.
#'
#' @param connectorArn [required] The Amazon Resource Name (ARN) of the connector that you want to delete.
#' @param currentVersion The current version of the connector that you want to delete.
#'
#' @keywords internal
#'
#' @rdname kafkaconnect_delete_connector
kafkaconnect_delete_connector <- function(connectorArn, currentVersion = NULL) {
op <- new_operation(
name = "DeleteConnector",
http_method = "DELETE",
http_path = "/v1/connectors/{connectorArn}",
paginator = list()
)
input <- .kafkaconnect$delete_connector_input(connectorArn = connectorArn, currentVersion = currentVersion)
output <- .kafkaconnect$delete_connector_output()
config <- get_config()
svc <- .kafkaconnect$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.kafkaconnect$operations$delete_connector <- kafkaconnect_delete_connector
#' Deletes a custom plugin
#'
#' @description
#' Deletes a custom plugin.
#'
#' See [https://www.paws-r-sdk.com/docs/kafkaconnect_delete_custom_plugin/](https://www.paws-r-sdk.com/docs/kafkaconnect_delete_custom_plugin/) for full documentation.
#'
#' @param customPluginArn [required] The Amazon Resource Name (ARN) of the custom plugin that you want to
#' delete.
#'
#' @keywords internal
#'
#' @rdname kafkaconnect_delete_custom_plugin
kafkaconnect_delete_custom_plugin <- function(customPluginArn) {
op <- new_operation(
name = "DeleteCustomPlugin",
http_method = "DELETE",
http_path = "/v1/custom-plugins/{customPluginArn}",
paginator = list()
)
input <- .kafkaconnect$delete_custom_plugin_input(customPluginArn = customPluginArn)
output <- .kafkaconnect$delete_custom_plugin_output()
config <- get_config()
svc <- .kafkaconnect$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.kafkaconnect$operations$delete_custom_plugin <- kafkaconnect_delete_custom_plugin
#' Returns summary information about the connector
#'
#' @description
#' Returns summary information about the connector.
#'
#' See [https://www.paws-r-sdk.com/docs/kafkaconnect_describe_connector/](https://www.paws-r-sdk.com/docs/kafkaconnect_describe_connector/) for full documentation.
#'
#' @param connectorArn [required] The Amazon Resource Name (ARN) of the connector that you want to
#' describe.
#'
#' @keywords internal
#'
#' @rdname kafkaconnect_describe_connector
kafkaconnect_describe_connector <- function(connectorArn) {
op <- new_operation(
name = "DescribeConnector",
http_method = "GET",
http_path = "/v1/connectors/{connectorArn}",
paginator = list()
)
input <- .kafkaconnect$describe_connector_input(connectorArn = connectorArn)
output <- .kafkaconnect$describe_connector_output()
config <- get_config()
svc <- .kafkaconnect$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.kafkaconnect$operations$describe_connector <- kafkaconnect_describe_connector
#' A summary description of the custom plugin
#'
#' @description
#' A summary description of the custom plugin.
#'
#' See [https://www.paws-r-sdk.com/docs/kafkaconnect_describe_custom_plugin/](https://www.paws-r-sdk.com/docs/kafkaconnect_describe_custom_plugin/) for full documentation.
#'
#' @param customPluginArn [required] Returns information about a custom plugin.
#'
#' @keywords internal
#'
#' @rdname kafkaconnect_describe_custom_plugin
kafkaconnect_describe_custom_plugin <- function(customPluginArn) {
op <- new_operation(
name = "DescribeCustomPlugin",
http_method = "GET",
http_path = "/v1/custom-plugins/{customPluginArn}",
paginator = list()
)
input <- .kafkaconnect$describe_custom_plugin_input(customPluginArn = customPluginArn)
output <- .kafkaconnect$describe_custom_plugin_output()
config <- get_config()
svc <- .kafkaconnect$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.kafkaconnect$operations$describe_custom_plugin <- kafkaconnect_describe_custom_plugin
#' Returns information about a worker configuration
#'
#' @description
#' Returns information about a worker configuration.
#'
#' See [https://www.paws-r-sdk.com/docs/kafkaconnect_describe_worker_configuration/](https://www.paws-r-sdk.com/docs/kafkaconnect_describe_worker_configuration/) for full documentation.
#'
#' @param workerConfigurationArn [required] The Amazon Resource Name (ARN) of the worker configuration that you want
#' to get information about.
#'
#' @keywords internal
#'
#' @rdname kafkaconnect_describe_worker_configuration
kafkaconnect_describe_worker_configuration <- function(workerConfigurationArn) {
op <- new_operation(
name = "DescribeWorkerConfiguration",
http_method = "GET",
http_path = "/v1/worker-configurations/{workerConfigurationArn}",
paginator = list()
)
input <- .kafkaconnect$describe_worker_configuration_input(workerConfigurationArn = workerConfigurationArn)
output <- .kafkaconnect$describe_worker_configuration_output()
config <- get_config()
svc <- .kafkaconnect$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.kafkaconnect$operations$describe_worker_configuration <- kafkaconnect_describe_worker_configuration
#' Returns a list of all the connectors in this account and Region
#'
#' @description
#' Returns a list of all the connectors in this account and Region. The list is limited to connectors whose name starts with the specified prefix. The response also includes a description of each of the listed connectors.
#'
#' See [https://www.paws-r-sdk.com/docs/kafkaconnect_list_connectors/](https://www.paws-r-sdk.com/docs/kafkaconnect_list_connectors/) for full documentation.
#'
#' @param connectorNamePrefix The name prefix that you want to use to search for and list connectors.
#' @param maxResults The maximum number of connectors to list in one response.
#' @param nextToken If the response of a ListConnectors operation is truncated, it will
#' include a NextToken. Send this NextToken in a subsequent request to
#' continue listing from where the previous operation left off.
#'
#' @keywords internal
#'
#' @rdname kafkaconnect_list_connectors
kafkaconnect_list_connectors <- function(connectorNamePrefix = NULL, maxResults = NULL, nextToken = NULL) {
op <- new_operation(
name = "ListConnectors",
http_method = "GET",
http_path = "/v1/connectors",
paginator = list(input_token = "nextToken", output_token = "nextToken", limit_key = "maxResults", result_key = "connectors")
)
input <- .kafkaconnect$list_connectors_input(connectorNamePrefix = connectorNamePrefix, maxResults = maxResults, nextToken = nextToken)
output <- .kafkaconnect$list_connectors_output()
config <- get_config()
svc <- .kafkaconnect$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.kafkaconnect$operations$list_connectors <- kafkaconnect_list_connectors
#' Returns a list of all of the custom plugins in this account and Region
#'
#' @description
#' Returns a list of all of the custom plugins in this account and Region.
#'
#' See [https://www.paws-r-sdk.com/docs/kafkaconnect_list_custom_plugins/](https://www.paws-r-sdk.com/docs/kafkaconnect_list_custom_plugins/) for full documentation.
#'
#' @param maxResults The maximum number of custom plugins to list in one response.
#' @param nextToken If the response of a ListCustomPlugins operation is truncated, it will
#' include a NextToken. Send this NextToken in a subsequent request to
#' continue listing from where the previous operation left off.
#'
#' @keywords internal
#'
#' @rdname kafkaconnect_list_custom_plugins
kafkaconnect_list_custom_plugins <- function(maxResults = NULL, nextToken = NULL) {
op <- new_operation(
name = "ListCustomPlugins",
http_method = "GET",
http_path = "/v1/custom-plugins",
paginator = list(input_token = "nextToken", output_token = "nextToken", limit_key = "maxResults", result_key = "customPlugins")
)
input <- .kafkaconnect$list_custom_plugins_input(maxResults = maxResults, nextToken = nextToken)
output <- .kafkaconnect$list_custom_plugins_output()
config <- get_config()
svc <- .kafkaconnect$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.kafkaconnect$operations$list_custom_plugins <- kafkaconnect_list_custom_plugins
#' Returns a list of all of the worker configurations in this account and
#' Region
#'
#' @description
#' Returns a list of all of the worker configurations in this account and Region.
#'
#' See [https://www.paws-r-sdk.com/docs/kafkaconnect_list_worker_configurations/](https://www.paws-r-sdk.com/docs/kafkaconnect_list_worker_configurations/) for full documentation.
#'
#' @param maxResults The maximum number of worker configurations to list in one response.
#' @param nextToken If the response of a ListWorkerConfigurations operation is truncated, it
#' will include a NextToken. Send this NextToken in a subsequent request to
#' continue listing from where the previous operation left off.
#'
#' @keywords internal
#'
#' @rdname kafkaconnect_list_worker_configurations
kafkaconnect_list_worker_configurations <- function(maxResults = NULL, nextToken = NULL) {
op <- new_operation(
name = "ListWorkerConfigurations",
http_method = "GET",
http_path = "/v1/worker-configurations",
paginator = list(input_token = "nextToken", output_token = "nextToken", limit_key = "maxResults", result_key = "workerConfigurations")
)
input <- .kafkaconnect$list_worker_configurations_input(maxResults = maxResults, nextToken = nextToken)
output <- .kafkaconnect$list_worker_configurations_output()
config <- get_config()
svc <- .kafkaconnect$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.kafkaconnect$operations$list_worker_configurations <- kafkaconnect_list_worker_configurations
#' Updates the specified connector
#'
#' @description
#' Updates the specified connector.
#'
#' See [https://www.paws-r-sdk.com/docs/kafkaconnect_update_connector/](https://www.paws-r-sdk.com/docs/kafkaconnect_update_connector/) for full documentation.
#'
#' @param capacity [required] The target capacity.
#' @param connectorArn [required] The Amazon Resource Name (ARN) of the connector that you want to update.
#' @param currentVersion [required] The current version of the connector that you want to update.
#'
#' @keywords internal
#'
#' @rdname kafkaconnect_update_connector
kafkaconnect_update_connector <- function(capacity, connectorArn, currentVersion) {
op <- new_operation(
name = "UpdateConnector",
http_method = "PUT",
http_path = "/v1/connectors/{connectorArn}",
paginator = list()
)
input <- .kafkaconnect$update_connector_input(capacity = capacity, connectorArn = connectorArn, currentVersion = currentVersion)
output <- .kafkaconnect$update_connector_output()
config <- get_config()
svc <- .kafkaconnect$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.kafkaconnect$operations$update_connector <- kafkaconnect_update_connector
|
08906c573612ce65e09b5fd59d8df52c8115ec9b | f65d6b7b175b2d9eff34de4a3c7dfe4f09ca9a36 | /R/cpmmc_main.R | 1f8a2619cbb3be7fc8bdd956c2284779c356133d | [] | no_license | OxWaSP2018/cpmmc | a6c21dc13f4f44f0a525f8f6b1f61954ad5f6f22 | c33e7a69907cbfc46af44193febf4a1940ba43cb | refs/heads/master | 2023-08-16T09:02:11.782375 | 2018-11-07T17:22:33 | 2018-11-07T17:22:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,193 | r | cpmmc_main.R |
#' Instantiate cpmmc object and set initial parameters
#'
#' This is an S3 object constructor helper function for class cpmmc which inherit from metropolis_hastings class and markov_chain class
#'
#' @param data, observed data points
#' @param theta_0, initialisation of the vector of parameters of interest
#' @param u_0, initialisation of the auxiliary random variables
#' @param rho, correlation parameter used to sample auxiliary random variables
#' @param log_marginal_estimator_func, log likelihood estimator function
#' @param log_theta_prior_density, log density function for the prior distribution of theta
#' @param log_theta_proposal_density, log density function for theta proposals
#' @param theta_proposal_sampler, distribution function to sample new theta proposals
#' @return cpmmc object
#' @examples
#' cpmmc_obj <- cpmmc(data = rnorm(10, 1, 1),
#' theta_0 = 0,
#' u_0 = array(rnorm(5*10), dim = c(5,1,10)),
#' rho = 0.9,
#' log_marginal_estimator_func = function(x) {dnorm(x, 1, log=T)},
#' log_theta_prior_density = function(x) {dnorm(x, log = T)},
#' log_theta_proposal_density = function(old_theta, new_theta) {dnorm(new_theta-old_theta, log = T)},
#' theta_proposal_sampler = function(theta) {rnorm(1, mean = theta)}
#' )
#' @export
cpmmc <- function(data,
theta_0,
u_0,
rho,
log_marginal_estimator_func,
log_theta_prior_density,
log_theta_proposal_density,
theta_proposal_sampler){
# define object structure
obj <- list(
# data
data = data,
# number of iterations
N_ = dim(u_0)[1],
# number of data points
T_ = dim(u_0)[3],
# number of parameters per observation
p_ = dim(u_0)[2],
#set chain length for future additions
chain_length = 1,
# accepted states in the markov chain
chain = list(
theta_0
),
# latest state
latest_state = list(theta = theta_0, u = u_0),
# proposed states in the markov chain, empty list of lists, first list is the index i.e. 1, 2, 3
# second list is the paramater at each iteration i.e. theta, u
proposed_chain = list(
),
# store rho for methods
rho = rho,
# importance sampler estimator (phat(y|theta,u))
log_marginal_estimator_func = log_marginal_estimator_func,
# theta proposal density, often a random walk proposal based on latest state (q density)
log_theta_proposal_density = log_theta_proposal_density,
# theta proposal sampler, often a random walk proposal based on latest state (q sampler)
theta_proposal_sampler = theta_proposal_sampler,
# theta prior density (p(theta))
log_theta_prior_density = log_theta_prior_density
)
attr(obj,'class') <- c('cpmmc', 'metropolis_hastings', 'markov_chain') # #TODO inherit mh class, once implemented
obj
}
#' S3 Implementation of single_transition generic method for cpmmc
#'
#' Runs correlated pseudo-marginal algorithm once to generate a new proposal and accepted state from
#' the latest state of markov chain for cpmmc object, using intrinsic cpmmc proposals
#'
#' @param object, cpmmc object
#' @return cpmmc object, with chains of parameters updated by one step of the correlated pseudo-marginal algorithm
#' @export
single_transition.cpmmc <- function(object){
# get current state
latest_state <- get_state(object)
old_theta <- latest_state$theta
old_u <- latest_state$u
# sample new theta
new_theta <- object$theta_proposal_sampler(old_theta)
# sample new U
rho <- object$rho
dimension_single_observation <- object$p_
number_ISsamples <- object$N_
number_datapoints <- object$T_
epsilon <- array(data = rnorm(number_datapoints * number_ISsamples * dimension_single_observation),
dim = c(number_ISsamples, dimension_single_observation, number_datapoints))
new_u <- rho * old_u + sqrt(1-rho^2) * epsilon
proposal_param <- list(theta=new_theta, u=new_u)
# calculate accept / reject probability using log likelihoods
new_marginal_estimator <- object$log_marginal_estimator_func(object$data, new_theta, new_u)
old_marginal_estimator <- object$log_marginal_estimator_func(object$data, old_theta, old_u)
numerator <- new_marginal_estimator +
object$log_theta_prior_density(new_theta) +
object$log_theta_proposal_density(new_theta,old_theta)
denominator <- old_marginal_estimator +
object$log_theta_prior_density(old_theta) +
object$log_theta_proposal_density(old_theta, new_theta)
ar_prob <- numerator- denominator
accept_bool <- log(runif(1)) < ar_prob
if (accept_bool){
accept_param <- proposal_param
} else {
accept_param <- latest_state
}
# set proposal and latest state within object
object$latest_state <- accept_param
object$chain_length <- object$chain_length + 1
object$chain[[object$chain_length]] <- accept_param$theta
object$proposed_chain[[object$chain_length-1]] <- proposal_param$theta
# TODO remove this hack
object$log_new_marginal_estimator <- new_marginal_estimator
object$log_old_marginal_estimator <- old_marginal_estimator
# return latest params
object
}
|
e661e0bb69b7504b20e2e1dd98b6a9c6f2e238dd | 69630a75fb71b75a1abd21f74b8a811533f7cab4 | /man/energy_scotland.Rd | 84560935b71cdf4f99f59fd289d8a5f968f6f598 | [] | no_license | codeclan/CodeClanData | aa0cd21aea3390d3629ab7ebbd53543623d92941 | e3965c596a0439e125b4643412bd434a54266da8 | refs/heads/master | 2023-03-16T06:52:38.769843 | 2023-03-10T09:02:43 | 2023-03-10T09:02:43 | 190,196,169 | 5 | 8 | null | 2019-08-03T10:46:43 | 2019-06-04T12:24:19 | R | UTF-8 | R | false | true | 281 | rd | energy_scotland.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/documentation.R
\docType{data}
\name{energy_scotland}
\alias{energy_scotland}
\title{energy_scotland}
\format{
A data frame
}
\usage{
energy_scotland
}
\description{
energy_scotland
}
\keyword{datasets}
|
8f40b2c68d2644c908cb63398171575f7c03138e | e553d1a55742c8c8be3d0e7c656583d2cfecb08e | /run_analysis.R | 8947b04c5cd752c8ad24a440a76d9c2f7dc96fca | [] | no_license | AndyGerrity/Getting-and-Cleaning-Data-Project | 074c9595bd6c49943d0503d3b840633353b172a4 | 81fd23dcd3d6fd4399e70e939efdc2872f5330b9 | refs/heads/master | 2021-01-12T17:51:51.873842 | 2016-10-22T17:42:54 | 2016-10-22T17:42:54 | 71,654,606 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,201 | r | run_analysis.R | ###Get the data
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip")
# Unzip dataSet to /data directory
unzip(zipfile="./data/Dataset.zip",exdir="./data")
###assign data to variables
x_train <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
x_test <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
features <- read.table('./data/UCI HAR Dataset/features.txt')
activitylabels = read.table('./data/UCI HAR Dataset/activity_labels.txt')
###Merge data set
data_subject <- rbind(subject_test, subject_train)
data_activity <- rbind(y_test, y_train)
data_features <- rbind(x_train, x_test)
###Name coloumns
names(data_subject)<-c("subject")
names(data_activity)<- c("activity")
names(data_features) <-features$V2
colnames(activitylabels) <- c('activity','activityType')
###Combine datesets
data_combined <- cbind(data_activity, data_subject)
data <- cbind(data_combined, data_features)
###Subset data
data_meanstd <- data[,grepl("mean|std|subject|activity", names(data))]
###Name the data
names(data_meanstd)<-gsub("^t", "time", names(data_meanstd))
names(data_meanstd)<-gsub("^f", "frequency", names(data_meanstd))
names(data_meanstd)<-gsub("Acc", "Accelerometer", names(data_meanstd))
names(data_meanstd)<-gsub("Gyro", "Gyroscope", names(data_meanstd))
names(data_meanstd)<-gsub("Mag", "Magnitude", names(data_meanstd))
names(data_meanstd)<-gsub("BodyBody", "Body", names(data_meanstd))
##name the columns
data_tidynames <- merge(data_meanstd, activitylabels, by='activity', all.x=TRUE)
##create tidy dataset
data_tidyset <- aggregate(. ~subject + activity, data_tidynames, mean)
data_tidyset <- data_tidyset[order(data_tidyset$subject, data_tidyset$activity),]
###write tidy dataset
write.table(data_tidyset, "data_tidyset.txt", row.name=FALSE)
|
803d7f4321f97ec7dae3709ad3fcfa20ce5a647e | 3db768da9b99873ec55cdf449feaf64654da1bdf | /R/readGff.R | b18b7c3357fd566ed6c76ce95ea51fcbdc935810 | [] | no_license | zhushijia/STAR2bSMRT | ca8cf30efaca8bed2fda287779b5f705875a4c0d | a3081be9455f6073e6ce1be4a229983aab7c6aad | refs/heads/master | 2021-03-27T08:49:39.151251 | 2019-12-12T23:13:13 | 2019-12-12T23:13:13 | 119,112,032 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 910 | r | readGff.R | #' readGff
#' for reading isoform gff file
#'
#' @param file character value indicating the name of the gff file
#' @param chrom character value indicating the chromosome of interest. By default,
#' STAR2bSMRT works on the whole genome.
#' @param s integeter value indicating the start position of the transcript of
#' interest. This is useful for target Isoseq sequencing.
#' @param e integeter value indicating the end position of the transcript of
#' interest. This is useful for target Isoseq sequencing.
#'
#' @return a list of isoforms
#' @export
#'
#' @examples
readGff <- function( file , chrom=NULL , s=0 , e=Inf )
{
gff = read.table(file,sep='\t')
colnames(gff)[c(1,3:5,7)] = c('chr','type','start','end','strand')
exon = subset( gff , type=='exon' & start>=s & end<=e )
if( !is.null(chrom) )
exon = subset( exon , chr==chrom )
split( exon[,c(1,4:5)] , exon[,9] )
}
|
91242cb1159d2d5a8248905145b17529376fc4d2 | 474b9c0dd798418894fd47cea3251da1239cc027 | /readData.R | fc483e6d9cb3ffc99e624641d5bf507bd2e323b4 | [] | no_license | abessudnov/dataanalysis3 | d80af34487aca7685787fa8cf7a79a789e8d2402 | b9689b7442526d96ca6f1d9ce1a874cac900c8b2 | refs/heads/master | 2021-05-12T00:09:24.271244 | 2019-01-17T21:19:35 | 2019-01-17T21:19:35 | 117,527,612 | 3 | 35 | null | 2018-08-21T16:34:34 | 2018-01-15T09:53:42 | R | UTF-8 | R | false | false | 2,510 | r | readData.R | # Reading the data from the Understanding Society survey
# 29 January 2017
# All the data (about 4Gb) must be downloaded from the UK Data Service website and stored
# in the data directory as a subdirectory called UKDA-6614-tab.
# Do not change the original data.
# For our purposes we mostly need the individual level data from adult questionnaires.
# These are the indresp files.
# Let's read into R the file from wave 1 (us_w1 directory).
# There are several ways of doing this in R.
# 1) Base R
? read.table
UndSoc1 <- read.table("data/UKDA-6614-tab/tab/us_w1/a_indresp.tab",
header = TRUE,
stringsAsFactors = FALSE)
# Question: What is the difference between read.table, read.csv, read.csv2,
# read.delim and read.delim2?
# 2) We can also use the functions from the readr package. The main advantage
# is that it works faster which is helpful with large files.
library(readr)
? read_table
? read_tsv
UndSoc1 <- read_tsv("data/UKDA-6614-tab/tab/us_w1/a_indresp.tab")
# Exercises for reading the data in R.
# 1. There are several files in the exData folder. Read them into R in the following order:
# Table0.txt, Table3.txt, Table4.txt. states2.csv, tableExcel.xlsx.
#
# Note that the last file is an Excel file and you cannot read it with read.table. You'll
# need to find out how to read Excel files into R. The file has two sheets that
# need to be read into R as separate data frames.
# 2. This link has the full text of Shakespear's Hamlet:
# http://www.gutenberg.org/cache/epub/2265/pg2265.txt
# Read it into R using readr (you need to find a function for this in the documentation).
# The result must be a character vector that starts with the actual text of the play.
# Next: talk about Git and Github
# Next: data manipulation in R and dplyr.
#
# Take this DataCamp module: https://www.datacamp.com/courses/dplyr-data-manipulation-r-tutorial
# and/or read ch. 5 from Data Science:
# http://r4ds.had.co.nz/transform.html (and do the exercises)
# By Friday you need to know the following:
# 1) the pipe operator
# 2) filter() and select()
# 3) arrange()
# 4) mutate()
# 5) group_by()
# 6) summarize()
# If you have a bit more time also read ch.21 from Data Science on iteration:
# http://r4ds.had.co.nz/iteration.html
# Preparing and saving a short abstract from the data.
W1 <- UndSoc1 %>%
select(pidp, a_sex, a_dvage, a_ukborn, a_racel, a_hlht:a_hlwtk, a_vote1:a_vote6)
write_csv(W1, "exData/W1.csv")
|
61551680b8c2edc090464eada8f1e7a3d6b2f85c | 05023be47e53613170dc2c5a52c602fa6e34276b | /R/ZI.plot.R | dfcd3f34d55abaffaf6073a0946f8cf2754dc6a7 | [] | no_license | NathanDeFruyt/WormTools | 087f7558dbf098ec965b0369840ef8ddb356ece0 | 5822d14e79816905c3b9166bec31c27bd82def45 | refs/heads/master | 2022-11-05T01:13:51.282285 | 2020-06-25T13:48:58 | 2020-06-25T13:48:58 | 274,924,320 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,215 | r | ZI.plot.R | #' Plot in Nathan's favourite style (hint: There are options!).
#'
#' This function elegantly puts together a graph using ggplot.
#' Be sure ggplot2 and ggthemes are installed.
#' @param data Here goes your data as a data-frame.
#' @param x x-values (format: either data$column or column)
#' @param y y-values (format: idem)
#' @param group How you'd like to group your variables.
#' @param facet_wrap (T or F) Would you like to have several panels?
#' @param facet Optional: If you chose for several panels, here you indicate what the panels should include.
#' @param ylab Labels the y-axis
#' @param xlab Labels the x-axis
#' @param fill How you'd like to fill the plots. Different from colour.
#' @param colour Default == fill. How you'd like to colour the outlines of your plotted elements (e.g. outline of boxes in the boxplot)
#' @param filling 'manual' or 'automatic' (Default: automatic) How would you like to fill the plots? Manually or automatically?
#' @param colour How you'd like to colour your
#' @param colours If you opted filling = 'manual', provide a vector of the colours you'd like. This should have the same length as the number of objects you graph.
#' @param type 'Boxplot' or 'violin' for now.
#' @keywords
#' @export
#' @examples
#' ZI.plot
ZI.plot <- function(data, x, y, group, fill, colour=fill, facet_wrap = F, facet = '', ylab = 'y', xlab = 'x', filling = 'automatic', colours, type = 'Boxplot'){
library(crayon)
library(ggthemes)
library(ggplot2)
attach(data)
if (type == 'Boxplot'){
if (filling == 'automatic'){
plot <- ggplot(NULL)+
geom_boxplot(data = data, aes(x=x, y=y, group=group, fill = fill, colour = colour), alpha= 0.7, width = 0.7) +
geom_jitter(data = data, stat="identity", width = 0.2, aes(x=x, y=y, fill=fill, group=group, colour = colour), alpha = 0.5, cex=1, pch=16)+
theme_few(base_size=14) +
theme(legend.position="none") +
labs(y = ylab, x = xlab)+
theme(axis.text.x = element_text(angle=45, hjust = 1))
} # if automatic
if (filling == 'manual'){
cat(red(paste0('Do not forget to add a vector of your ', length(levels(as.factor(fill))), ' favourite colours')))
plot <- ggplot(NULL)+
geom_boxplot(data = data, aes(x=x, y=y, group=group, fill = fill, colour = colour), alpha= 0.7, width = 0.7) +
geom_jitter(data = data, stat="identity", width = 0.2, aes(x=x, y=y, fill=fill, group=group, colour = colour), alpha = 0.5, cex=1, pch=16)+
theme_few(base_size=14) +
scale_fill_manual(values=colours)+
scale_colour_manual(values = colours)+
theme(legend.position="none") +
labs(y = ylab, x = xlab)+
theme(axis.text.x = element_text(angle=45, hjust = 1))
} # if manual
} # if boxplot
if (type == 'violin'){
if (filling == 'automatic'){
plot <- ggplot(NULL)+
geom_violin(data = data, aes(x=x, y=y, fill = fill , group = group, colour = colour), draw_quantiles= TRUE, alpha=0.5)+
geom_jitter(data = data, width = 0.2, stat="identity", aes(x=x, y=y, fill=fill, group=group, colour = colour), alpha = 0.8, cex=1.5, pch=21)+
theme_few(base_size=14) +
theme(legend.position="none") +
labs(y = ylab, x = xlab)+
theme(axis.text.x = element_text(angle=45, hjust = 1))
} # if fill == automatic
if (filling == 'manual'){
cat(red(paste0('Do not forget to add a vector of your ', length(levels(as.factor(fill))), ' colours')))
plot <- ggplot(NULL)+
geom_violin(data = data, aes(x=x, y=y, fill = fill , group = group, colour = colour), draw_quantiles= TRUE, alpha=0.7)+
geom_jitter(data = data, stat="identity", width = 0.2, aes(x=x, y=y, fill=fill, group=group, colour = colour), alpha = 0.5, cex=1.5, pch=21)+
theme_few(base_size=14) +
theme(legend.position="none") +
scale_fill_manual(values=colours)+
scale_colour_manual(values = colours)+
labs(y = ylab, x = xlab)+
theme(axis.text.x = element_text(angle=45, hjust = 1))
} # if fill == manual
} # if type == violin
return(plot)
}
|
d576fc8acc74f447dae2582c42d59aaeaf8b20c8 | 58e8b007f00ff8868b53772844f3b0888b6f2876 | /man/cyclize.Rd | 70ec6f2d779b7c88010e1b1d952c1404cb4625dd | [] | no_license | ckenaley/trackter | 650989f740faefa715c4d322082b07eac9b70f99 | a1e8d02bd921814ea71d1f6257fe0bfc3da619cc | refs/heads/master | 2022-02-08T16:51:52.706637 | 2022-02-07T21:52:21 | 2022-02-07T21:52:21 | 120,318,542 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,247 | rd | cyclize.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kin.2d.R
\name{cyclize}
\alias{cyclize}
\title{Calculate waveform cycles}
\usage{
cyclize(x, y, start = "peak", ...)
}
\arguments{
\item{x}{numeric; x position}
\item{y}{numeric; y position}
\item{start}{character; where the cycles should start either 'peak' or `trough'. See Details.}
\item{...}{additional arguments to be passed to \code{\link{features}}}
}
\value{
A a data table including the following:
\itemize{
\item \code{cyc}: The cycle number.
\item \code{pos}: the position/index values of the x data (not the x values themselves).
}
}
\description{
Finds discrete cycles of wave-like data using critical points (peaks and troughs).
}
\details{
If "'start='peak'", the cycles start at critical points with high values, If "'start='trough'", cycles start at critical points with low values.
}
\examples{
library(ggplot2)
n <- 200
d <- data.frame(
x <- sort(runif(n)),
y <- exp(-0.2 * sin(10*pi*x)) + rnorm(n, sd=0.05)
)
#start at peaks
c <- cyclize(d$x,d$y)
d$cyc <- c$cyc
qplot(d=d,x,y,col=as.factor(cyc))
#start at troughs
c <- cyclize(d$x,d$y,start="trough")
d$cyc <- c$cyc
qplot(data=d,x,y,col=as.factor(cyc))
}
\seealso{
\code{\link{features}}
}
|
67f5371ebc78adcaadb65663070eed32843c104e | a50d24ab0913759f2ac5f4ec8c19f64801ea6b21 | /man/split_fill.Rd | 233dbdaf0ac6386f1b6c82127e5d5a65ece54212 | [] | no_license | slin30/wzMisc | 960eb9105ddc2306e9bd3d70a567bfb37e8b703c | 5f24fd47ce6e4b61ba8fbb28943acf03101ce009 | refs/heads/master | 2023-02-08T03:51:39.517767 | 2023-01-26T22:21:37 | 2023-01-26T22:21:37 | 68,938,415 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,445 | rd | split_fill.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/split_fill.R
\name{split_fill}
\alias{split_fill}
\title{Split and fill a chr vector}
\usage{
split_fill(dat, targ, split_on, IDcol, rebind = FALSE, keep.targ = FALSE, ...)
}
\arguments{
\item{dat}{a data.table}
\item{targ}{chr; vector of length 1 denoting column that contains data to be split}
\item{split_on}{chr; what pattern should be used to perform the split?}
\item{IDcol}{chr; vector of length 1 denoting the column in \emph{dat} containing the ID to be used for melting}
\item{rebind}{logical; should the original columns be appended back to the output? Defaults to \code{FALSE}}
\item{keep.targ}{logical; only used if \emph{rebind} = \code{TRUE}; drop the column that was split on?}
\item{...}{Other (prefereably named) arguments to pass on to \code{strsplit} aside from \emph{split}}
}
\value{
A melted data.table using \emph{IDcol} as \code{id.var} for \code{melt.data.table},
with \emph{targ} splitted by \emph{split_on}.
If \code{rebind == TRUE}, will also return the original columns, with a single \emph{IDcol} as denoted
in input. This is performed via a \code{data.table} ad-hoc join, using \emph{IDcol} in \code{j}. The input
\emph{targ} column will be returned as well, if \emph{keep.targ} is \code{TRUE}.
}
\description{
Split a chr vector based on sep, return melted DT by ID
}
\details{
This is a convenience-convenience (not a typo) wrapper around \code{data.table::tstrsplit}, taking advantage
of the performance of \code{data.table::Ctranspose}, and adding faculties to melt and rejoin selectively.
}
\note{
\emph{targ} currently is limited to a vector of length 1, as is \emph{IDcol}. This is likely to change in the future, to
make this function more flexible and consistent with the capabilities of \code{melt.data.table}.
Use \code{...} to pass e.g. \code{fixed = TRUE} or \code{perl = TRUE} to \code{strsplit}. See documentation
for \code{\link[data.table]{tstrsplit}}.
}
\examples{
library(data.table)
dt <- data.table(
ID = 1:10,
targ = sapply(1:10, function(f)
paste0(
LETTERS[1:5],
f,
collapse = "|"
)
)
)
head(split_fill(dat = dt, targ = "targ", split_on = "\\\\|", IDcol = "ID"))
#Demonstrating rebind
dt[, targ_additional := targ]
head(split_fill(dat = dt, targ = "targ", split_on = "\\\\|", IDcol = "ID", rebind = TRUE))
}
|
271c2a095bb1ea9d35ce4a381cfc9e016b2e5fd0 | 17de79e08b7d6836c96d717e5086c58223d8741f | /WGCNA_analysis/WGCNA_1.R | 5f9b44f9bdd735dc3bd1070b819a3e1a502cf328 | [] | no_license | shwatkins/PhD | fcd884a09cf3ae43ab51a361a4344251d75dfe17 | 473e9c3a19544eaeb609cf7a0bba6a80c53666fd | refs/heads/master | 2020-08-12T17:01:42.684693 | 2020-01-24T12:33:49 | 2020-01-24T12:33:49 | 214,805,069 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,409 | r | WGCNA_1.R | ### WGCNA code for AIRES
## F7 timepoint
# Standalone network construction
# adapted from https://horvath.genetics.ucla.edu/html/CoexpressionNetwork/Rpackages/WGCNA/
## DNAm betas have been normalised, filtered, and adjusted for age, sex and cell counts
# Loading data
# install.packages(WGCNA)
library(WGCNA);
# The following setting is important, do not omit.
options(stringsAsFactors = FALSE);
## DATA LOADING
# Load the AIRES F7 methylation dataset
lnames = load("/path/to/F7_450k/F7readyfor450kcor.Rdata")
lnames
F7Data.sort[1:10,1:10]
## 2. DATA CLEANING
# If there are columns etc that are not needed (eg probe quality)
# then remove here. Only methyl values in the matrix.
F7data1 = as.data.frame(F7Data.sort)
colnames(F7data1)[1:5]
# CpGs
rownames(F7data1)[1:5]
# samples
F7data1[1:10,1:10]
# We first check for genes and samples with too many missing values.
# If the last statement returns TRUE, all genes have passed the cuts.
gsg = goodSamplesGenes(F7data1, verbose = 3);
gsg$allOK
# If not, we remove the offending genes and samples from the data:
if (!gsg$allOK)
{
# Optionally, print the gene and sample names that were removed:
if (sum(!gsg$goodGenes)>0)
printFlush(paste("Removing genes:", paste(names(F7data1)[!gsg$goodGenes], collapse = ", ")));
if (sum(!gsg$goodSamples)>0)
printFlush(paste("Removing samples:", paste(rownames(F7data1)[!gsg$goodSamples], collapse = ", ")));
# Remove the offending genes and samples from the data:
F7data1 = F7data1[gsg$goodSamples, gsg$goodGenes]
}
# How many CpGs are left?
dim(F7data1)
#======================================================
# Code chunk 7
# We now read in the trait data and match the samples
#=====================================================================================
traitData = read.csv("/path/to/F7NetworkTraits.txt", header = T);
dim(traitData)
names(traitData)
# Form a data frame (F7Samples) analogous to expression data that will hold the clinical traits.
# first create a vector of the sample IDs in the metylation data:
F7Samples = rownames(F7data1);
# make a vector traitRows that identifies sample IDs that are present in both the
# methylation and trait datasets:
traitRows = match(F7Samples, traitData$ID);
length(traitRows)
#Remove the 4 NAs which pop up where the siblings were:
traitRows <- na.omit(traitRows)
length(traitRows)
# makes a vector datTraits which is the trait data read in from the csv (traitData),
# the samples are restricted to those who also have methylation data,
# and the sample IDs are removed as a column and made into the rownames:
datTraits = traitData[traitRows,]
head(datTraits)
dim(datTraits)
datTraits = traitData[traitRows, -1];
head(datTraits)
dim(datTraits)
rownames(datTraits) = traitData[traitRows, 1];
head(datTraits)
dim(datTraits)
collectGarbage();
#=====================================================================================
# finally, make F7data1 the same length as datTraits (as F7 Data currently has more
# samples)
#=====================================================================================
dim(F7data1)
traitSamples = rownames(datTraits);
length(traitSamples)
F7data1 <- F7data1[traitSamples,]
dim(F7data1)
head(datTraits)
save(F7data1, datTraits, file = "/path/to/F7_WGCNA/F7_full450k_WGCNA.RData")
#=====================================================================================
# Sample clustering
#=====================================================================================
# Next we cluster the samples (in contrast to clustering genes that will
# come later) to see if there are any obvious outliers.
sampleTree = hclust(dist(F7data1), method = "average");
# Plot the sample tree: Open a graphic output window of size 12 by 9 inches
# The user should change the dimensions if the window is too large or too small.
sizeGrWindow(12,9)
pdf(file = "/path/to/F7_WGCNA/sampleClusteringF7_WGCNA_full450k.pdf", width = 12, height = 9);
par(cex = 0.6);
par(mar = c(0,4,4,0))
plot(sampleTree, main = "Sample clustering to detect outliers, F7,\n outliers removed, and adjusted for covariates in linear model", sub="", xlab="", cex.lab = 1.5,
cex.axis = 1.5, cex.main = 2)
dev.off()
#================================================
# Code chunk 6
# Are there outlier samples?
# Choose a height cut that will remove the offending sample, say [15] (the red
# line in the plot), and use a branch cut at that height.
#=====================================================================================
# Plot a line to show the cut
sizeGrWindow(12,9)
pdf(file = "/path/to/F7_WGCNA/sampleClusteringF7WithProposedHeightCut-full450k_WGCNA.pdf", width = 12, height = 9);
par(cex = 0.6);
par(mar = c(0,4,2,0))
plot(sampleTree, main = "Sample clustering to detect outliers, F7, with height cut", sub="", xlab="", cex.lab = 1.5,
cex.axis = 1.5, cex.main = 2);
abline(h = 40, col = "red")
dev.off()
# Determine cluster under the line
clust = cutreeStatic(sampleTree, cutHeight = 40, minSize = 10)
table(clust)
# clust 1 contains the samples we want to keep.
keepSamples = (clust==1)
F7DataExperimental = F7data1[keepSamples, ]
nGenes = ncol(F7DataExperimental)
nSamples = nrow(F7DataExperimental)
F7DataExperimental[1:10,1:10]
sampleTree = hclust(dist(F7DataExperimental), method = "average");
sizeGrWindow(12,9)
pdf(file = "/path/to/F7_WGCNA/sampleClusteringF7WithHeightCut-full450k_WGCNA.pdf", width = 12, height = 9);
par(cex = 0.6);
par(mar = c(0,4,2,0))
plot(sampleTree, main = "Sample clustering to detect outliers, F7, with height cut", sub="", xlab="", cex.lab = 1.5,
cex.axis = 1.5, cex.main = 2);
abline(h = 42.5, col = "red")
dev.off()
F7data1 <- F7DataExperimental
names <- rownames(F7data1)
length(names)
datTraits <- datTraits[match(names, rownames(datTraits)),]
save(F7data1, datTraits, file = "/path/to/F7_WGCNA/F7_heightcut_WGCNA_full450k.RData")
# F7WGCNAdata now contains the expression data ready for network analysis.
#**************************************
#**************************************
## PHASE 2 - CONSTRUCTING THE NETWORK
#**************************************
#**************************************
Sys.time()
#workingDir = ".";
#setwd(workingDir);
# Load the WGCNA package
library(WGCNA)
# The following setting is important, do not omit.
options(stringsAsFactors = FALSE);
# Allow multi-threading within WGCNA. This helps speed up certain calculations.
# At present this call is necessary.
# Any error here may be ignored but you may want to update WGCNA if you see one.
# Caution: skip this line if you run RStudio or other third-party R environments.
# See note above.
enableWGCNAThreads()
# Load the data saved in the first part
lnames = load(file = "/path/to/F7_WGCNA/F7_heightcut_WGCNA_full450k.RData");
#The variable lnames contains the names of loaded variables.
lnames
#=====================================================================================
#
# Code chunk 2
# Choosing the soft threshold power
#=====================================================================================
# Choose a set of soft-thresholding powers
powers = c(c(1:20))
# Call the network topology analysis function
sft = pickSoftThreshold(F7data1, powerVector = powers, networkType = "signed", verbose = 5)
# Plot the results:
sizeGrWindow(9, 5)
pdf(file = "/path/to/F7_WGCNA/F7_BWScaleFreeTopologyPlot_full450k.pdf", width = 12, height = 9);
par(mfrow = c(1,2));
cex1 = 0.9;
# Scale-free topology fit index as a function of the soft-thresholding power
plot(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
xlab="Soft Threshold (power)",ylab="Scale Free Topology Model Fit,signed R^2",type="n",
main = paste("Scale independence, F7 full 450k"));
text(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
labels=powers,cex=cex1,col="red");
# this line corresponds to using an R^2 cut-off of h
abline(h=0.90,col="red")
dev.off()
# Mean connectivity as a function of the soft-thresholding power
sizeGrWindow(9, 5)
pdf(file = "/path/to/F7_WGCNA/F7_BWConnectivityPlot_full450k.pdf", width = 12, height = 9);
plot(sft$fitIndices[,1], sft$fitIndices[,5],
xlab="Soft Threshold (power)",ylab="Mean Connectivity", type="n",
main = paste("Mean connectivity, F7 full 450k"))
text(sft$fitIndices[,1], sft$fitIndices[,5], labels=powers, cex=cex1,col="red")
dev.off()
|
8da415203c2659a8c131bccc8406b48c31f99f87 | 412cc2a42a1e62749cdc836dd66259432db252a3 | /run_analysis.R | 8f0d5f4937d14d9a1916fe41a1b0c615b3bd5d2a | [] | no_license | angelacai01/Getting-and-Cleaning-Data | 9430b7dc87b2ceb9e7fdf8d193401c42655b84fa | f0ae2408d785d363d83c82937d581f95070291d2 | refs/heads/master | 2021-01-10T13:51:47.138739 | 2015-05-23T02:12:41 | 2015-05-23T02:12:41 | 36,102,308 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,414 | r | run_analysis.R | # Create one R script called run_analysis.R that does the following.
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# #1. Merges the training and the test sets to create one data set.
# 1-1 read training and test data into the table
trainXData <- read.table("./UCI HAR Dataset/train/X_train.txt", header = FALSE)
testXData <- read.table("./UCI HAR Dataset/test/X_test.txt", header = FALSE)
trainYData <- read.table("./UCI HAR Dataset/train/y_train.txt", header = FALSE)
testYData <- read.table("./UCI HAR Dataset/test/y_test.txt", header = FALSE)
subjectTrainData <- read.table("./UCI HAR Dataset/train/subject_train.txt", header = FALSE)
subjectTestData <- read.table("./UCI HAR Dataset/test/subject_test.txt", header = FALSE)
# 1-2 merge training and test data
allXData <- rbind(trainXData, testXData)
allYData <- rbind(trainYData, testYData)
allSubjectData <- rbind(subjectTrainData, subjectTestData)
#head(allXData)
#head(allYData)
#head(allSubjectData)
#dim(trainXData)
#dim(testXData)
#dim(allXData)
# #2. Extracts only the measurements on the mean and standard deviation for each measurement.
# 2-1 read features data into the table
featuresData <- read.table("./UCI HAR Dataset/features.txt")
# 2-2 find feature name that matches mean or standard deviation
# and store the indices of the element in a vector
featureIndexData <- grep("-mean\\(\\)|-std\\(\\)", featuresData[,2])
#featureIndexData
# 2-3 extract the X data only for those coloumns exists in the featureIndex
allXData <- allXData[, featureIndexData]
# 2-4 replace the extracted merge X data column name with feature name from the features data
# #3. Uses descriptive activity names to name the activities in the data set
# 3-1 read the activity file that contains the activity name into the table
activityData <- read.table("./UCI HAR Dataset/activity_labels.txt")
# 3-2 replace the merge Y data first column with the activity name
#head(allYData)
allYData[, 1] <- activityData[allYData[, 1], 2]
# #4. Appropriately labels the data set with descriptive variable names.
# 4-1 labels extracted merge X data column name with descriptive variable names which is the feature name
names(allXData) <- gsub("\\(|\\)", "", (featuresData[featureIndexData, 2]))
# 4-2 labels merge Y and S data column name with descriptive variable names
names(allYData) <- "ACTIVITY"
names(allSubjectData) <- "SUBJECT"
#head(allXData)
#head(allYData)
#head(allSubjectData)
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# 5-1 compute the mean for the variables in the allXData, grouped according to
# the subject and activity in the allSubjectData and allYData
tidyData <- aggregate(allXData,list(allSubjectData$SUBJECT, allYData$ACTIVITY), mean)
names(tidyData)[1] <- "SUBJECT"
names(tidyData)[2] <- "ACTIVITY"
#head(tidyData)
#dim(tidyData)
# 5-2 write tidydata to a text file
write.table(tidyData, "tidy_data.txt", row.names = FALSE)
|
6c7167bf8e512b5c5058b4143f6f899e695bc24d | a176626eb55b6525d5a41e2079537f2ef51d4dc7 | /Uni/Projects/code/P031.MIAC_PM/v2.2015/CS04.mod1and2.TR.PM25_2000-2002.r | 91c07af39d453c22c9a8d01c37297a24203d4eee | [] | no_license | zeltak/org | 82d696b30c7013e95262ad55f839998d0280b72b | d279a80198a1dbf7758c9dd56339e8a5b5555ff2 | refs/heads/master | 2021-01-21T04:27:34.752197 | 2016-04-16T04:27:57 | 2016-04-16T04:27:57 | 18,008,592 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 40,439 | r | CS04.mod1and2.TR.PM25_2000-2002.r | ###############
#LIBS
###############
library(lme4)
library(reshape)
library(foreign)
library(ggplot2)
library(plyr)
library(data.table)
library(reshape2)
library(Hmisc)
library(mgcv)
library(gdata)
library(car)
library(dplyr)
library(ggmap)
library(broom)
library(splines)
library(DataCombine)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
m1.all <-readRDS("/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/Xmod1.Tr.2000.rds")
#-------------------->> RES TABLE
res <- matrix(nrow=1, ncol=48)
res <- data.frame(res)
colnames(res) <- c(
"m1.raw","m1.raw.space","m1.raw.time","m1.time","m1.time.space","m1.time.time","m1.space","m1.space.space","m1.space.time","m1.noaod","m1.noaod.space","m1.noaod.time"
,"m1.R2","m1.rmspe","m1.R2.space","m1.R2.time","m1.rmspe.space" #mod1 Full
,"m1cv.R2","m1cv.I","m1cv.Ise","m1cv.slope","m1cv.slopese","m1cv.rmspe","m1cv.R2.space","m1cv.R2.time","m1cv.rmspe.space" #mod1 CV
,"m1cvloc.R2","m1cvloc.I","m1cvloc.Ise","m1cvloc.slope","m1cvloc.slopese","m1cvloc.rmspe","m1cvloc.R2.space","m1cvloc.R2.time","m1cvloc.rmspe.space"#loc m1
,"m2.R2" #mod2
,"m3.t31","m3.t33" #mod3 tests
,"m3.R2","m3.rmspe","m3.R2.space","m3.R2.time","m3.rmspe.space" #mod3
,"m3.I","m3.Ise","m3.slope","m3.slopese")#Extra
res$type <- c("PM25")
names(m1.all)
m1.all<-filter(m1.all,!is.na(Temp_C))
m1.all<-filter(m1.all,!is.na(NDVI))
summary(m1.all)
#base model for stage 1
#clean data and exclude bad values
m1.all$logroad<-log(m1.all$Mjrrdden_1 +.1)
#base model for stage 1
m1.formula <- as.formula(PM25 ~ aod+Temp_C+wdsp+NDVI+dist_PE+pcturb_1km+logroad+nei05nonpntcntypm25+pop_sqkm+elev_m+RH+visib+aod*hpbl+hpbl+NOXsum+PM10sum+SO2sum+pctmd_1km + pctld_1km+pctop_1km+ pctdf_1km+pctmf_1km+pctev_1km+ pctcr_1km+pctpa_1km+pctsh_1km+ pctgr_1km+ pm25stge30_15k + pm25stlt30_3k+pm10stge30_15k + pm10stlt30_3k +noxstge30_15k+noxstlt30_3k+ so2stge30_15k+so2stlt30_3k+ (1 +aod+Temp_C|day/region))
m1_sc <- lmer(m1.formula,data=m1.all)
m1.all[,pred.m1 := NULL]
m1.all$pred.m1 <- predict(m1_sc)
res[res$type=="PM25", 'm1.R2'] <- print(summary(lm(PM25~pred.m1,data=m1.all))$r.squared)
#RMSPE
res[res$type=="PM25", 'm1.rmspe'] <- print(rmse(residuals(m1_sc)))
#spatial
spatialall<-m1.all %>%
group_by(SiteCode) %>%
summarise(barpm = mean(PM25, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.all.s <- lm(barpm ~ barpred, data=spatialall)
res[res$type=="PM25", 'm1.R2.space'] <-print(summary(lm(barpm ~ barpred, data=spatialall))$r.squared)
res[res$type=="PM25", 'm1.rmspe.space'] <- print(rmse(residuals(m1.fit.all.s)))
#temporal
tempoall<-left_join(m1.all,spatialall)
tempoall$delpm <-tempoall$PM25-tempoall$barpm
tempoall$delpred <-tempoall$pred.m1-tempoall$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempoall)
res[res$type=="PM25", 'm1.R2.time']<- print(summary(lm(delpm ~ delpred, data=tempoall))$r.squared)
saveRDS(m1.all,"/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/Xmod1C.TR.PM25.pred.2000.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.all)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.all)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.all)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.all)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.all)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.all)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.all)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.all)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.all)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.all)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.all.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
saveRDS(m1.all.cv,"/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/Xmod1cv.TR.PM25.2000.rds")
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.all.cv<-lm(PM25~pred.m1.cv,data=m1.all.cv)
res[res$type=="PM25", 'm1cv.R2'] <- print(summary(lm(PM25~pred.m1.cv,data=m1.all.cv))$r.squared)
res[res$type=="PM25", 'm1cv.I'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.all.cv))$coef[1,1])
res[res$type=="PM25", 'm1cv.Ise'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.all.cv))$coef[1,2])
res[res$type=="PM25", 'm1cv.slope'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.all.cv))$coef[2,1])
res[res$type=="PM25", 'm1cv.slopese'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.all.cv))$coef[2,2])
#RMSPE
res[res$type=="PM25", 'm1cv.rmspe'] <- print(rmse(residuals(m1.fit.all.cv)))
#spatial
spatialall.cv<-m1.all.cv %>%
group_by(SiteCode) %>%
summarise(barpm = mean(PM25, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.all.cv.s <- lm(barpm ~ barpred, data=spatialall.cv)
res[res$type=="PM25", 'm1cv.R2.space'] <- print(summary(lm(barpm ~ barpred, data=spatialall.cv))$r.squared)
res[res$type=="PM25", 'm1cv.rmspe.space'] <- print(rmse(residuals(m1.fit.all.cv.s)))
#temporal
tempoall.cv<-left_join(m1.all.cv,spatialall.cv)
tempoall.cv$delpm <-tempoall.cv$PM25-tempoall.cv$barpm
tempoall.cv$delpred <-tempoall.cv$pred.m1.cv-tempoall.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempoall.cv)
res[res$type=="PM25", 'm1cv.R2.time'] <- print(summary(lm(delpm ~ delpred, data=tempoall.cv))$r.squared)
gc()
#### mod2
m2.all <- readRDS("/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/Xmod2.Tr.2000.rds")
#generate predictions
m2.all$logroad<-log(m2.all$Mjrrdden_1 +.1)
m2.all<-filter(m2.all,!is.na(Temp_C))
summary(m2.all)
m2.all[, pred.m2 := predict(object=m1_sc,newdata=m2.all,allow.new.levels=TRUE,re.form=NULL)]
summary(m2.all$pred.m2)
#delete implossible values
m2.all <- m2.all[pred.m2 > 0.00000000000001 , ]
m2.all <- m2.all[pred.m2 < 200 , ]
saveRDS(m2.all,"/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/Xmod2.AQ.PM25.pred.2000.rds")
#-------------->prepare for mod3
m2.all[, bimon := (Month + 1) %/% 2]
setkey(m2.all,day, GUID)
m2.all.2000<-m2.all[!is.na(meanPM)]
rm(m2.all)
gc()
#2000
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth <- lme(pred.m2 ~ meanPM,random = list(GUID= ~1 + meanPM),control=lmeControl(opt = "optim"), data= m2.all.2000 )
#correlate to see everything from mod2 and the mpm works
m2.all.2000[, pred.t31 := predict(m2.smooth)]
m2.all.2000[, resid := residuals(m2.smooth)]
print(summary(lm(pred.m2~pred.t31,data=m2.all.2000))$r.squared)
#split the files to the separate bi monthly datsets
Tall_bimon2 <- subset(m2.all.2000 ,m2.all.2000$bimon == "2")
Tall_bimon3 <- subset(m2.all.2000 ,m2.all.2000$bimon == "3")
Tall_bimon4 <- subset(m2.all.2000 ,m2.all.2000$bimon == "4")
Tall_bimon5 <- subset(m2.all.2000 ,m2.all.2000$bimon == "5")
Tall_bimon6 <- subset(m2.all.2000 ,m2.all.2000$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_2 <- gam(resid ~ s(Long,Lat), data= Tall_bimon2 )
fit2_3 <- gam(resid ~ s(Long,Lat), data= Tall_bimon3 )
fit2_4 <- gam(resid ~ s(Long,Lat), data= Tall_bimon4 )
fit2_5 <- gam(resid ~ s(Long,Lat), data= Tall_bimon5 )
fit2_6 <- gam(resid ~ s(Long,Lat), data= Tall_bimon6 )
#get the predicted-fitted
Xpred_2 <- (Tall_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (Tall_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (Tall_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (Tall_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (Tall_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.all.2000$pred.t32 <- c(Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.all.2000,day, GUID)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_all <- lme(pred.t32 ~ meanPM ,random = list(GUID= ~1 + meanPM ),control=lmeControl(opt = "optim"),data= m2.all.2000 )
m2.all.2000[, pred.t33 := predict(Final_pred_all)]
#check correlations
res[res$type=="PM25", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.all.2000))$r.squared)
#------------------------>>>foo
#import mod3
data.m3.2000 <- readRDS("/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/Xmod3.Tr.2000.rds")
#for PM25
data.m3.2000 <- select(data.m3.2000,day,GUID,Month,meanPM,Long,Lat)
data.m3.2000[, bimon := (Month + 1) %/% 2]
setkey(data.m3.2000,day, GUID)
data.m3.2000<-data.m3.2000[!is.na(meanPM)]
#generate m.3 initial pred
data.m3.2000$pred.m3.mix <- predict(Final_pred_all,data.m3.2000)
#create unique grid
ugrid <-data.m3.2000 %>%
group_by(GUID) %>%
summarise(Long = mean(Long, na.rm=TRUE), Lat = mean(Lat, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3.2000_bimon2 <- data.m3.2000[bimon == 2, ]
data.m3.2000_bimon3 <- data.m3.2000[bimon == 3, ]
data.m3.2000_bimon4 <- data.m3.2000[bimon == 4, ]
data.m3.2000_bimon5 <- data.m3.2000[bimon == 5, ]
data.m3.2000_bimon6 <- data.m3.2000[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon2,GUID)
setkey(data.m3.2000_bimon2,GUID)
data.m3.2000_bimon2 <- merge(data.m3.2000_bimon2, uniq_gid_bimon2[,list(GUID,gpred)], all.x = T)
setkey(uniq_gid_bimon3,GUID)
setkey(data.m3.2000_bimon3,GUID)
data.m3.2000_bimon3 <- merge(data.m3.2000_bimon3, uniq_gid_bimon3[,list(GUID,gpred)], all.x = T)
setkey(uniq_gid_bimon4,GUID)
setkey(data.m3.2000_bimon4,GUID)
data.m3.2000_bimon4 <- merge(data.m3.2000_bimon4, uniq_gid_bimon4[,list(GUID,gpred)], all.x = T)
setkey(uniq_gid_bimon5,GUID)
setkey(data.m3.2000_bimon5,GUID)
data.m3.2000_bimon5 <- merge(data.m3.2000_bimon5, uniq_gid_bimon5[,list(GUID,gpred)], all.x = T)
setkey(uniq_gid_bimon6,GUID)
setkey(data.m3.2000_bimon6,GUID)
data.m3.2000_bimon6 <- merge(data.m3.2000_bimon6, uniq_gid_bimon6[,list(GUID,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3.2000_bimon2,data.m3.2000_bimon3,data.m3.2000_bimon4,data.m3.2000_bimon5,data.m3.2000_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
#mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/mod3.TR.PM25.2000.pred3.rds")
saveRDS(res,"/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/res.2000.rds")
rm(list = ls(all = TRUE))
gc()
###############
#LIBS
###############
library(lme4)
library(reshape)
library(foreign)
library(ggplot2)
library(plyr)
library(data.table)
library(reshape2)
library(Hmisc)
library(mgcv)
library(gdata)
library(car)
library(dplyr)
library(ggmap)
library(broom)
library(splines)
library(DataCombine)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
m1.all <-readRDS("/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/Xmod1.Tr.2001.rds")
#-------------------->> RES TABLE
res <- matrix(nrow=1, ncol=48)
res <- data.frame(res)
colnames(res) <- c(
"m1.raw","m1.raw.space","m1.raw.time","m1.time","m1.time.space","m1.time.time","m1.space","m1.space.space","m1.space.time","m1.noaod","m1.noaod.space","m1.noaod.time"
,"m1.R2","m1.rmspe","m1.R2.space","m1.R2.time","m1.rmspe.space" #mod1 Full
,"m1cv.R2","m1cv.I","m1cv.Ise","m1cv.slope","m1cv.slopese","m1cv.rmspe","m1cv.R2.space","m1cv.R2.time","m1cv.rmspe.space" #mod1 CV
,"m1cvloc.R2","m1cvloc.I","m1cvloc.Ise","m1cvloc.slope","m1cvloc.slopese","m1cvloc.rmspe","m1cvloc.R2.space","m1cvloc.R2.time","m1cvloc.rmspe.space"#loc m1
,"m2.R2" #mod2
,"m3.t31","m3.t33" #mod3 tests
,"m3.R2","m3.rmspe","m3.R2.space","m3.R2.time","m3.rmspe.space" #mod3
,"m3.I","m3.Ise","m3.slope","m3.slopese")#Extra
res$type <- c("PM25")
names(m1.all)
m1.all<-filter(m1.all,!is.na(Temp_C))
summary(m1.all)
#base model for stage 1
#clean data and exclude bad values
m1.all$logroad<-log(m1.all$Mjrrdden_1 +.1)
#base model for stage 1
m1.formula <- as.formula(PM25 ~ aod+Temp_C+wdsp+NDVI+dist_PE+pcturb_1km+logroad+nei05nonpntcntypm25+pop_sqkm+elev_m+RH+visib+aod*hpbl+hpbl+NOXsum+PM10sum+SO2sum+pctmd_1km + pctld_1km+pctop_1km+ pctdf_1km+pctmf_1km+pctev_1km+ pctcr_1km+pctpa_1km+pctsh_1km+ pctgr_1km+ pm25stge30_15k + pm25stlt30_3k+pm10stge30_15k + pm10stlt30_3k +noxstge30_15k+noxstlt30_3k+ so2stge30_15k+so2stlt30_3k+ (1 +aod+Temp_C|day/region))
m1_sc <- lmer(m1.formula,data=m1.all)
m1.all[,pred.m1 := NULL]
m1.all$pred.m1 <- predict(m1_sc)
res[res$type=="PM25", 'm1.R2'] <- print(summary(lm(PM25~pred.m1,data=m1.all))$r.squared)
#RMSPE
res[res$type=="PM25", 'm1.rmspe'] <- print(rmse(residuals(m1_sc)))
#spatial
spatialall<-m1.all %>%
group_by(SiteCode) %>%
summarise(barpm = mean(PM25, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.all.s <- lm(barpm ~ barpred, data=spatialall)
res[res$type=="PM25", 'm1.R2.space'] <-print(summary(lm(barpm ~ barpred, data=spatialall))$r.squared)
res[res$type=="PM25", 'm1.rmspe.space'] <- print(rmse(residuals(m1.fit.all.s)))
#temporal
tempoall<-left_join(m1.all,spatialall)
tempoall$delpm <-tempoall$PM25-tempoall$barpm
tempoall$delpred <-tempoall$pred.m1-tempoall$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempoall)
res[res$type=="PM25", 'm1.R2.time']<- print(summary(lm(delpm ~ delpred, data=tempoall))$r.squared)
saveRDS(m1.all,"/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/Xmod1C.TR.PM25.pred.2001.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.all)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.all)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.all)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.all)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.all)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.all)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.all)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.all)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.all)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.all)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.all.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
saveRDS(m1.all.cv,"/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/Xmod1cv.TR.PM25.2001.rds")
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.all.cv<-lm(PM25~pred.m1.cv,data=m1.all.cv)
res[res$type=="PM25", 'm1cv.R2'] <- print(summary(lm(PM25~pred.m1.cv,data=m1.all.cv))$r.squared)
res[res$type=="PM25", 'm1cv.I'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.all.cv))$coef[1,1])
res[res$type=="PM25", 'm1cv.Ise'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.all.cv))$coef[1,2])
res[res$type=="PM25", 'm1cv.slope'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.all.cv))$coef[2,1])
res[res$type=="PM25", 'm1cv.slopese'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.all.cv))$coef[2,2])
#RMSPE
res[res$type=="PM25", 'm1cv.rmspe'] <- print(rmse(residuals(m1.fit.all.cv)))
#spatial
spatialall.cv<-m1.all.cv %>%
group_by(SiteCode) %>%
summarise(barpm = mean(PM25, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.all.cv.s <- lm(barpm ~ barpred, data=spatialall.cv)
res[res$type=="PM25", 'm1cv.R2.space'] <- print(summary(lm(barpm ~ barpred, data=spatialall.cv))$r.squared)
res[res$type=="PM25", 'm1cv.rmspe.space'] <- print(rmse(residuals(m1.fit.all.cv.s)))
#temporal
tempoall.cv<-left_join(m1.all.cv,spatialall.cv)
tempoall.cv$delpm <-tempoall.cv$PM25-tempoall.cv$barpm
tempoall.cv$delpred <-tempoall.cv$pred.m1.cv-tempoall.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempoall.cv)
res[res$type=="PM25", 'm1cv.R2.time'] <- print(summary(lm(delpm ~ delpred, data=tempoall.cv))$r.squared)
gc()
#### mod2
m2.all <- readRDS("/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/Xmod2.Tr.2001.rds")
#generate predictions
m2.all$logroad<-log(m2.all$Mjrrdden_1 +.1)
m2.all<-filter(m2.all,!is.na(Temp_C))
summary(m2.all)
m2.all[, pred.m2 := predict(object=m1_sc,newdata=m2.all,allow.new.levels=TRUE,re.form=NULL)]
summary(m2.all$pred.m2)
#delete implossible values
m2.all <- m2.all[pred.m2 > 0.00000000000001 , ]
m2.all <- m2.all[pred.m2 < 200 , ]
saveRDS(m2.all,"/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/Xmod2.AQ.PM25.pred.rds")
#-------------->prepare for mod3
m2.all[, bimon := (Month + 1) %/% 2]
setkey(m2.all,day, GUID)
m2.all.2001<-m2.all[!is.na(meanPM)]
rm(m2.all)
gc()
#2001
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth <- lme(pred.m2 ~ meanPM,random = list(GUID= ~1 + meanPM),control=lmeControl(opt = "optim"), data= m2.all.2001 )
#correlate to see everything from mod2 and the mpm works
m2.all.2001[, pred.t31 := predict(m2.smooth)]
m2.all.2001[, resid := residuals(m2.smooth)]
print(summary(lm(pred.m2~pred.t31,data=m2.all.2001))$r.squared)
#split the files to the separate bi monthly datsets
Tall_bimon1 <- subset(m2.all.2001 ,m2.all.2001$bimon == "1")
Tall_bimon2 <- subset(m2.all.2001 ,m2.all.2001$bimon == "2")
Tall_bimon3 <- subset(m2.all.2001 ,m2.all.2001$bimon == "3")
Tall_bimon4 <- subset(m2.all.2001 ,m2.all.2001$bimon == "4")
Tall_bimon5 <- subset(m2.all.2001 ,m2.all.2001$bimon == "5")
Tall_bimon6 <- subset(m2.all.2001 ,m2.all.2001$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(Long,Lat), data= Tall_bimon1 )
fit2_2 <- gam(resid ~ s(Long,Lat), data= Tall_bimon2 )
fit2_3 <- gam(resid ~ s(Long,Lat), data= Tall_bimon3 )
fit2_4 <- gam(resid ~ s(Long,Lat), data= Tall_bimon4 )
fit2_5 <- gam(resid ~ s(Long,Lat), data= Tall_bimon5 )
fit2_6 <- gam(resid ~ s(Long,Lat), data= Tall_bimon6 )
#get the predicted-fitted
Xpred_1 <- (Tall_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (Tall_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (Tall_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (Tall_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (Tall_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (Tall_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.all.2001$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.all.2001,day, GUID)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_all <- lme(pred.t32 ~ meanPM ,random = list(GUID= ~1 + meanPM ),control=lmeControl(opt = "optim"),data= m2.all.2001 )
m2.all.2001[, pred.t33 := predict(Final_pred_all)]
#check correlations
res[res$type=="PM25", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.all.2001))$r.squared)
#------------------------>>>foo
#import mod3
data.m3.2001 <- readRDS("/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/Xmod3.Tr.2001.rds")
#for PM25
data.m3.2001 <- select(data.m3.2001,day,GUID,Month,meanPM,Long,Lat)
data.m3.2001[, bimon := (Month + 1) %/% 2]
setkey(data.m3.2001,day, GUID)
data.m3.2001<-data.m3.2001[!is.na(meanPM)]
#generate m.3 initial pred
data.m3.2001$pred.m3.mix <- predict(Final_pred_all,data.m3.2001)
#create unique grid
ugrid <-data.m3.2001 %>%
group_by(GUID) %>%
summarise(Long = mean(Long, na.rm=TRUE), Lat = mean(Lat, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3.2001_bimon1 <- data.m3.2001[bimon == 1, ]
data.m3.2001_bimon2 <- data.m3.2001[bimon == 2, ]
data.m3.2001_bimon3 <- data.m3.2001[bimon == 3, ]
data.m3.2001_bimon4 <- data.m3.2001[bimon == 4, ]
data.m3.2001_bimon5 <- data.m3.2001[bimon == 5, ]
data.m3.2001_bimon6 <- data.m3.2001[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,GUID)
setkey(data.m3.2001_bimon1,GUID)
data.m3.2001_bimon1 <- merge(data.m3.2001_bimon1, uniq_gid_bimon1[,list(GUID,gpred)], all.x = T)
setkey(uniq_gid_bimon2,GUID)
setkey(data.m3.2001_bimon2,GUID)
data.m3.2001_bimon2 <- merge(data.m3.2001_bimon2, uniq_gid_bimon2[,list(GUID,gpred)], all.x = T)
setkey(uniq_gid_bimon3,GUID)
setkey(data.m3.2001_bimon3,GUID)
data.m3.2001_bimon3 <- merge(data.m3.2001_bimon3, uniq_gid_bimon3[,list(GUID,gpred)], all.x = T)
setkey(uniq_gid_bimon4,GUID)
setkey(data.m3.2001_bimon4,GUID)
data.m3.2001_bimon4 <- merge(data.m3.2001_bimon4, uniq_gid_bimon4[,list(GUID,gpred)], all.x = T)
setkey(uniq_gid_bimon5,GUID)
setkey(data.m3.2001_bimon5,GUID)
data.m3.2001_bimon5 <- merge(data.m3.2001_bimon5, uniq_gid_bimon5[,list(GUID,gpred)], all.x = T)
setkey(uniq_gid_bimon6,GUID)
setkey(data.m3.2001_bimon6,GUID)
data.m3.2001_bimon6 <- merge(data.m3.2001_bimon6, uniq_gid_bimon6[,list(GUID,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3.2001_bimon1,data.m3.2001_bimon2,data.m3.2001_bimon3,data.m3.2001_bimon4,data.m3.2001_bimon5,data.m3.2001_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
#mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/mod3.TR.PM25.2001.pred3.rds")
saveRDS(res,"/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/res.2001.rds")
rm(list = ls(all = TRUE))
gc()
###############
#LIBS
###############
library(lme4)
library(reshape)
library(foreign)
library(ggplot2)
library(plyr)
library(data.table)
library(reshape2)
library(Hmisc)
library(mgcv)
library(gdata)
library(car)
library(dplyr)
library(ggmap)
library(broom)
library(splines)
library(DataCombine)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
m1.all <-readRDS("/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/Xmod1.Tr.2002.rds")
#-------------------->> RES TABLE
res <- matrix(nrow=1, ncol=48)
res <- data.frame(res)
colnames(res) <- c(
"m1.raw","m1.raw.space","m1.raw.time","m1.time","m1.time.space","m1.time.time","m1.space","m1.space.space","m1.space.time","m1.noaod","m1.noaod.space","m1.noaod.time"
,"m1.R2","m1.rmspe","m1.R2.space","m1.R2.time","m1.rmspe.space" #mod1 Full
,"m1cv.R2","m1cv.I","m1cv.Ise","m1cv.slope","m1cv.slopese","m1cv.rmspe","m1cv.R2.space","m1cv.R2.time","m1cv.rmspe.space" #mod1 CV
,"m1cvloc.R2","m1cvloc.I","m1cvloc.Ise","m1cvloc.slope","m1cvloc.slopese","m1cvloc.rmspe","m1cvloc.R2.space","m1cvloc.R2.time","m1cvloc.rmspe.space"#loc m1
,"m2.R2" #mod2
,"m3.t31","m3.t33" #mod3 tests
,"m3.R2","m3.rmspe","m3.R2.space","m3.R2.time","m3.rmspe.space" #mod3
,"m3.I","m3.Ise","m3.slope","m3.slopese")#Extra
res$type <- c("PM25")
names(m1.all)
m1.all<-filter(m1.all,!is.na(Temp_C))
summary(m1.all)
#base model for stage 1
#clean data and exclude bad values
m1.all$logroad<-log(m1.all$Mjrrdden_1 +.1)
#base model for stage 1
m1.formula <- as.formula(PM25 ~ aod+Temp_C+wdsp+NDVI+dist_PE+pcturb_1km+logroad+nei05nonpntcntypm25+pop_sqkm+elev_m+RH+visib+aod*hpbl+hpbl+NOXsum+PM10sum+SO2sum+pctmd_1km + pctld_1km+pctop_1km+ pctdf_1km+pctmf_1km+pctev_1km+ pctcr_1km+pctpa_1km+pctsh_1km+ pctgr_1km+ pm25stge30_15k + pm25stlt30_3k+pm10stge30_15k + pm10stlt30_3k +noxstge30_15k+noxstlt30_3k+ so2stge30_15k+so2stlt30_3k+ (1 +aod+Temp_C|day/region))
m1_sc <- lmer(m1.formula,data=m1.all)
m1.all[,pred.m1 := NULL]
m1.all$pred.m1 <- predict(m1_sc)
res[res$type=="PM25", 'm1.R2'] <- print(summary(lm(PM25~pred.m1,data=m1.all))$r.squared)
#RMSPE
res[res$type=="PM25", 'm1.rmspe'] <- print(rmse(residuals(m1_sc)))
#spatial
spatialall<-m1.all %>%
group_by(SiteCode) %>%
summarise(barpm = mean(PM25, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.all.s <- lm(barpm ~ barpred, data=spatialall)
res[res$type=="PM25", 'm1.R2.space'] <-print(summary(lm(barpm ~ barpred, data=spatialall))$r.squared)
res[res$type=="PM25", 'm1.rmspe.space'] <- print(rmse(residuals(m1.fit.all.s)))
#temporal
tempoall<-left_join(m1.all,spatialall)
tempoall$delpm <-tempoall$PM25-tempoall$barpm
tempoall$delpred <-tempoall$pred.m1-tempoall$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempoall)
res[res$type=="PM25", 'm1.R2.time']<- print(summary(lm(delpm ~ delpred, data=tempoall))$r.squared)
saveRDS(m1.all,"/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/Xmod1C.TR.PM25.pred.2002.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.all)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.all)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.all)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.all)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.all)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.all)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.all)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.all)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.all)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.all)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.all.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
saveRDS(m1.all.cv,"/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/Xmod1cv.TR.PM25.2002.rds")
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.all.cv<-lm(PM25~pred.m1.cv,data=m1.all.cv)
res[res$type=="PM25", 'm1cv.R2'] <- print(summary(lm(PM25~pred.m1.cv,data=m1.all.cv))$r.squared)
res[res$type=="PM25", 'm1cv.I'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.all.cv))$coef[1,1])
res[res$type=="PM25", 'm1cv.Ise'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.all.cv))$coef[1,2])
res[res$type=="PM25", 'm1cv.slope'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.all.cv))$coef[2,1])
res[res$type=="PM25", 'm1cv.slopese'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.all.cv))$coef[2,2])
#RMSPE
res[res$type=="PM25", 'm1cv.rmspe'] <- print(rmse(residuals(m1.fit.all.cv)))
#spatial
spatialall.cv<-m1.all.cv %>%
group_by(SiteCode) %>%
summarise(barpm = mean(PM25, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.all.cv.s <- lm(barpm ~ barpred, data=spatialall.cv)
res[res$type=="PM25", 'm1cv.R2.space'] <- print(summary(lm(barpm ~ barpred, data=spatialall.cv))$r.squared)
res[res$type=="PM25", 'm1cv.rmspe.space'] <- print(rmse(residuals(m1.fit.all.cv.s)))
#temporal
tempoall.cv<-left_join(m1.all.cv,spatialall.cv)
tempoall.cv$delpm <-tempoall.cv$PM25-tempoall.cv$barpm
tempoall.cv$delpred <-tempoall.cv$pred.m1.cv-tempoall.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempoall.cv)
res[res$type=="PM25", 'm1cv.R2.time'] <- print(summary(lm(delpm ~ delpred, data=tempoall.cv))$r.squared)
gc()
#### mod2
m2.all <- readRDS("/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/Xmod2.Tr.2002.rds")
#generate predictions
m2.all$logroad<-log(m2.all$Mjrrdden_1 +.1)
m2.all<-filter(m2.all,!is.na(Temp_C))
summary(m2.all)
m2.all[, pred.m2 := predict(object=m1_sc,newdata=m2.all,allow.new.levels=TRUE,re.form=NULL)]
summary(m2.all$pred.m2)
#delete implossible values
m2.all <- m2.all[pred.m2 > 0.00000000000001 , ]
m2.all <- m2.all[pred.m2 < 200 , ]
saveRDS(m2.all,"/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/Xmod2.AQ.PM25.pred.rds")
#-------------->prepare for mod3
m2.all[, bimon := (Month + 1) %/% 2]
setkey(m2.all,day, GUID)
m2.all.2002<-m2.all[!is.na(meanPM)]
rm(m2.all)
gc()
#2002
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth <- lme(pred.m2 ~ meanPM,random = list(GUID= ~1 + meanPM),control=lmeControl(opt = "optim"), data= m2.all.2002 )
#correlate to see everything from mod2 and the mpm works
m2.all.2002[, pred.t31 := predict(m2.smooth)]
m2.all.2002[, resid := residuals(m2.smooth)]
print(summary(lm(pred.m2~pred.t31,data=m2.all.2002))$r.squared)
#split the files to the separate bi monthly datsets
Tall_bimon1 <- subset(m2.all.2002 ,m2.all.2002$bimon == "1")
Tall_bimon2 <- subset(m2.all.2002 ,m2.all.2002$bimon == "2")
Tall_bimon3 <- subset(m2.all.2002 ,m2.all.2002$bimon == "3")
Tall_bimon4 <- subset(m2.all.2002 ,m2.all.2002$bimon == "4")
Tall_bimon5 <- subset(m2.all.2002 ,m2.all.2002$bimon == "5")
Tall_bimon6 <- subset(m2.all.2002 ,m2.all.2002$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(Long,Lat), data= Tall_bimon1 )
fit2_2 <- gam(resid ~ s(Long,Lat), data= Tall_bimon2 )
fit2_3 <- gam(resid ~ s(Long,Lat), data= Tall_bimon3 )
fit2_4 <- gam(resid ~ s(Long,Lat), data= Tall_bimon4 )
fit2_5 <- gam(resid ~ s(Long,Lat), data= Tall_bimon5 )
fit2_6 <- gam(resid ~ s(Long,Lat), data= Tall_bimon6 )
#get the predicted-fitted
Xpred_1 <- (Tall_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (Tall_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (Tall_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (Tall_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (Tall_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (Tall_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.all.2002$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.all.2002,day, GUID)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_all <- lme(pred.t32 ~ meanPM ,random = list(GUID= ~1 + meanPM ),control=lmeControl(opt = "optim"),data= m2.all.2002 )
m2.all.2002[, pred.t33 := predict(Final_pred_all)]
#check correlations
res[res$type=="PM25", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.all.2002))$r.squared)
#------------------------>>>foo
#import mod3
data.m3.2002 <- readRDS("/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/Xmod3.Tr.2002.rds")
#for PM25
data.m3.2002 <- select(data.m3.2002,day,GUID,Month,meanPM,Long,Lat)
data.m3.2002[, bimon := (Month + 1) %/% 2]
setkey(data.m3.2002,day, GUID)
data.m3.2002<-data.m3.2002[!is.na(meanPM)]
#generate m.3 initial pred
data.m3.2002$pred.m3.mix <- predict(Final_pred_all,data.m3.2002)
#create unique grid
ugrid <-data.m3.2002 %>%
group_by(GUID) %>%
summarise(Long = mean(Long, na.rm=TRUE), Lat = mean(Lat, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3.2002_bimon1 <- data.m3.2002[bimon == 1, ]
data.m3.2002_bimon2 <- data.m3.2002[bimon == 2, ]
data.m3.2002_bimon3 <- data.m3.2002[bimon == 3, ]
data.m3.2002_bimon4 <- data.m3.2002[bimon == 4, ]
data.m3.2002_bimon5 <- data.m3.2002[bimon == 5, ]
data.m3.2002_bimon6 <- data.m3.2002[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,GUID)
setkey(data.m3.2002_bimon1,GUID)
data.m3.2002_bimon1 <- merge(data.m3.2002_bimon1, uniq_gid_bimon1[,list(GUID,gpred)], all.x = T)
setkey(uniq_gid_bimon2,GUID)
setkey(data.m3.2002_bimon2,GUID)
data.m3.2002_bimon2 <- merge(data.m3.2002_bimon2, uniq_gid_bimon2[,list(GUID,gpred)], all.x = T)
setkey(uniq_gid_bimon3,GUID)
setkey(data.m3.2002_bimon3,GUID)
data.m3.2002_bimon3 <- merge(data.m3.2002_bimon3, uniq_gid_bimon3[,list(GUID,gpred)], all.x = T)
setkey(uniq_gid_bimon4,GUID)
setkey(data.m3.2002_bimon4,GUID)
data.m3.2002_bimon4 <- merge(data.m3.2002_bimon4, uniq_gid_bimon4[,list(GUID,gpred)], all.x = T)
setkey(uniq_gid_bimon5,GUID)
setkey(data.m3.2002_bimon5,GUID)
data.m3.2002_bimon5 <- merge(data.m3.2002_bimon5, uniq_gid_bimon5[,list(GUID,gpred)], all.x = T)
setkey(uniq_gid_bimon6,GUID)
setkey(data.m3.2002_bimon6,GUID)
data.m3.2002_bimon6 <- merge(data.m3.2002_bimon6, uniq_gid_bimon6[,list(GUID,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3.2002_bimon1,data.m3.2002_bimon2,data.m3.2002_bimon3,data.m3.2002_bimon4,data.m3.2002_bimon5,data.m3.2002_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
#mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/mod3.TR.PM25.2002.pred3.rds")
saveRDS(res,"/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/m3rds/res.2002.rds")
rm(list = ls(all = TRUE))
gc()
|
2b2e8525ba5cd49475cbc441dd1b38ee882c4748 | 66a2afd9c0dab1d55e6d236f3d85bc1b61a11a66 | /R/read-metadata.R | 6f3ece795555dbb3d965584746c7c450a4739170 | [
"MIT"
] | permissive | StevenMMortimer/salesforcer | 833b09465925fb3f1be8da3179e648d4009c69a9 | a1e1e9cd0aa4e4fe99c7acd3fcde566076dac732 | refs/heads/main | 2023-07-23T16:39:15.632082 | 2022-03-02T15:52:59 | 2022-03-02T15:52:59 | 94,126,513 | 91 | 19 | NOASSERTION | 2023-07-14T05:19:53 | 2017-06-12T18:14:00 | R | UTF-8 | R | false | false | 6,659 | r | read-metadata.R | #' Read Object or Field Metadata from Salesforce
#'
#' @description
#' `r lifecycle::badge("experimental")`
#'
#' This function takes a request of named elements in Salesforce and
#' returns their metadata
#'
#' @importFrom XML newXMLNode addChildren xmlParse xmlToList
#' @importFrom httr content
#' @importFrom xml2 xml_ns_strip xml_find_all
#' @importFrom purrr map
#' @references \url{https://developer.salesforce.com/docs/atlas.en-us.api_meta.meta/api_meta}
#' @template metadata_type
#' @param object_names a character vector of names that we wish to read metadata for
#' @template verbose
#' @return A \code{list} containing a response for each requested object
#' @examples
#' \dontrun{
#' metadata_info <- sf_read_metadata(metadata_type='CustomObject',
#' object_names=c('Account'))
#' }
#' @export
sf_read_metadata <- function(metadata_type, object_names, verbose=FALSE){
stopifnot(all(is.character(object_names)))
stopifnot(metadata_type %in% names(valid_metadata_list()))
# format names into list
object_list <- as.list(object_names)
names(object_list) <- rep('fullNames', length(object_list))
which_operation <- "readMetadata"
# define the operation
operation_node <- newXMLNode(which_operation,
namespaceDefinitions=c('http://soap.sforce.com/2006/04/metadata'),
suppressNamespaceWarning = TRUE)
type_node <- newXMLNode("type", metadata_type, parent=operation_node)
# and add the metadata to it
xml_dat <- build_metadata_xml_from_list(input_data=object_list, metatype=NULL, root=operation_node)
base_metadata_url <- make_base_metadata_url()
root <- make_soap_xml_skeleton(metadata_ns=TRUE)
body_node <- newXMLNode("soapenv:Body", parent=root)
body_node <- addChildren(body_node, xml_dat)
request_body <- as(root, "character")
httr_response <- rPOST(url = base_metadata_url,
headers = c("SOAPAction"=which_operation,
"Content-Type"="text/xml"),
body = request_body)
if(verbose){
make_verbose_httr_message(httr_response$request$method,
httr_response$request$url,
httr_response$request$headers,
request_body)
}
catch_errors(httr_response)
response_parsed <- content(httr_response, encoding="UTF-8")
invisible(capture.output(
# capture any xmlToList grumblings about Namespace prefix
resultset <- response_parsed %>%
xml_ns_strip() %>%
xml_find_all('.//records') %>%
# we must use XML because character elements are not automatically unboxed
# see https://github.com/r-lib/xml2/issues/215
map(.f=function(x){
xmlToList(xmlParse(as(object=x, Class="character")))
})
))
return(resultset)
}
#' Describe Object Fields
#'
#' @description
#' `r lifecycle::badge("stable")`
#'
#' This function takes the name of an object in Salesforce and returns a description
#' of the fields on that object by returning a tibble with one row per field.
#'
#' @importFrom dplyr select
#' @importFrom purrr map modify_at
#' @template object_name
#' @note The tibble only contains the fields that the user can view, as defined by
#' the user's field-level security settings.
#' @return A \code{tbl_df} containing one row per field for the requested object.
#' @examples
#' \dontrun{
#' acct_fields <- sf_describe_object_fields('Account')
#' }
#' @export
sf_describe_object_fields <- function(object_name){
stopifnot(length(object_name) == 1)
obj_dat <- sf_describe_objects(object_names = object_name, api_type = "SOAP")[[1]]
obj_fields_dat <- obj_dat[names(obj_dat) == "fields"] %>%
# explicitly combine duplicated names because many tidyverse functions
# break whenever that occurs
map(collapse_list_with_dupe_names) %>%
map(set_null_elements_to_na)
# check if all values are the same class (excluding NA)
obj_fields_dat_classes <- obj_fields_dat %>%
map(~map(.x, class)) %>%
safe_bind_rows()
classes_summary <- obj_fields_dat_classes %>% map_lgl(~length(unique(na.omit(.x))) > 1)
if(any(classes_summary)){
cols_to_fix <- names(classes_summary)[which(classes_summary)]
obj_fields_dat <- obj_fields_dat %>%
map(~modify_at(.x, cols_to_fix, list))
}
# check if all values are of length 1, if not then cast as list if not already a list
obj_fields_dat_lengths <- obj_fields_dat %>%
map(~map(.x, length)) %>%
safe_bind_rows()
lengths_summary <- obj_fields_dat_lengths %>% map_lgl(~any(.x > 1, na.rm=TRUE))
if(any(lengths_summary)){
cols_to_fix <- names(lengths_summary)[which(lengths_summary)]
obj_fields_dat <- obj_fields_dat %>%
map(~modify_at(.x, cols_to_fix, list))
}
obj_fields_dat <- obj_fields_dat %>%
safe_bind_rows() %>%
# sort column names as the API would return prior to the combining process above
select(sort(names(.)))
return(obj_fields_dat)
}
#' Collapse Elements in List with Same Name
#'
#' This function looks for instances of elements in a list that have the same name
#' and then combine them all into a single comma separated character string
#' (referenceTo) or \code{tbl_df} (picklistValues).
#'
#' @importFrom purrr map
#' @importFrom dplyr bind_rows
#' @importFrom utils head tail
#' @param x list; a list, typically returned from the API that we would parse through
#' @note The tibble only contains the fields that the user can view, as defined by
#' the user's field-level security settings.
#' @return A \code{list} containing one row per field for the requested object.
#' @examples \dontrun{
#' obj_dat <- sf_describe_objects(object_names = "Contact", api_type = "SOAP")[[1]]
#' obj_fields_list <- obj_dat[names(obj_dat) == "fields"] %>%
#' map(collapse_list_with_dupe_names)
#' }
#' @export
collapse_list_with_dupe_names <- function(x){
dupes_exist <- any(duplicated(names(x)))
if(dupes_exist){
dupe_field_names <- unique(names(x)[duplicated(names(x))])
for(f in dupe_field_names){
target_idx <- which(names(x) == f)
obj_field_dupes <- x[target_idx]
if(all(sapply(obj_field_dupes, length) == 1)){
collapsed <- list(unname(unlist(obj_field_dupes)))
} else {
collapsed <- obj_field_dupes %>%
map(set_null_elements_to_na) %>%
safe_bind_rows() %>%
list()
}
# replace into first
x[head(target_idx, 1)] <- collapsed
# remove the rest
x[tail(target_idx, -1)] <- NULL
}
}
return(x)
}
|
2c91715fe7044018f1e77c8a67c0a6647f726eef | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/tatest/examples/ta.test.Rd.R | 3286146a716ab115746cc9fcca8595d9df718a8c | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 475 | r | ta.test.Rd.R | library(tatest)
### Name: ta.test
### Title: t_alpha-test for testing difference between two conditions with
### small samples
### Aliases: ta.test
### Keywords: rhov ttest
### ** Examples
X<-c(112,122,108,127)
Y<-c(302, 314,322,328)
dat<-cbind(X,Y)
ta.test(X=dat,nci=NULL, na=4,nb=4, eqv=TRUE,
alpha=0.05, LOG="NULL", alternative = "two.sided")
data(Ecadherin)
res<-ta.test(X=Ecadherin[1:2000,],nci=1, na=3,nb=3, eqv=TRUE,
LOG="LOG2", alternative = "two.sided")
|
85d3eeda87966aed52863c9b2bdf8a3648243e2d | d4cfbbe9cc786edbd6e418f71f05223f2c309365 | /DC_Analysis.R | 169ff2c3eaaa31ec7c9377fcdf90cac592548dc5 | [] | no_license | deepamghosh/IPL-Exploratory-Data-Analysis | eed1a778ad754309684e96f95480bfdb004508dd | 19e6f557cd71b9db9e21c4a916b9e59da168c3c3 | refs/heads/master | 2020-03-09T20:25:35.329129 | 2018-10-23T05:51:05 | 2018-10-23T05:51:05 | 128,984,433 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,427 | r | DC_Analysis.R | #Analysis of DECCAN CHARGERS HYDERABAD
library(dplyr)
summary(dc_matches$winner)
dc_matches_played <- nrow(dc_matches)
dc_win <- nrow(filter(dc_matches, winner == "DC"))
dc_noresult <- nrow(filter(dc_matches, winner == ""))
dc_loss <- ( dc_matches_played - (dc_win) - (dc_noresult))
dc_tie <- nrow(filter(dc_matches, result == "tie"))
dc_win_percent <- round((((dc_win) / dc_matches_played) * 100 ), digits = 2)
dc_analysis <- data.frame(dc_matches_played, dc_win, dc_loss, dc_noresult,
dc_tie,dc_win_percent)
colnames(dc_analysis) <- c("Matches Played", "Win", "Loss", "No Result", "Tie",
"Win %")
#Toss Analysis
dc_toss_win <- nrow(filter(dc_matches, toss_winner == "DC"))
dc_toss_win_bat <- nrow(filter(dc_matches, toss_winner == "DC" &
toss_decision == "bat"))
dc_toss_win_bowl <- nrow(filter(dc_matches, toss_winner == "DC" &
toss_decision == "field"))
#Biggest Win in Runs
dc_win_run_max <- max(dc_matches$win_by_runs)
dc_high_run_win <- filter(dc_matches, win_by_runs == dc_win_run_max) %>%
select(match_id, team1_id, team2_id, win_by_runs, venue)
#Biggest Win in Wickets
dc_win_wkt_max <- max(dc_matches$win_by_wickets)
dc_high_wkt_win <- filter(dc_matches, win_by_wickets == dc_win_wkt_max) %>%
select(match_id, team1_id, team2_id, win_by_wickets, venue)
|
7c703e47859380cac90ee680d2e3df3f62807d38 | 1f99b0491c60411808a1d066ac49b8c211159dc2 | /plot3.R | 5ef5ccda1732da5cf47e67cffc18697ceb4da0a8 | [] | no_license | beyondsunny/exploratory-data-analysis-project2 | 197a336c8d14c6f37f25d9c5950807d97c2099d4 | 1acc21978556bcc06daa9b95869e501e2cf81458 | refs/heads/master | 2020-12-06T15:10:26.821839 | 2016-09-04T16:12:12 | 2016-09-04T16:12:12 | 67,354,355 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,509 | r | plot3.R | plot3<-function(){
#FUNCTION TO PLOT TOTAL EMISSION BY Baltimore City, Maryland TO SEE WHETHER TOTAL EMISSIONS HAVE INCREASED/DECREASED FROM 1999-2008
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#SUBSET NET FOR BALTIMORE LOCATION WHERE FLIP=24510
baltimore_NEI <- NEI[NEI$fips=="24510",]
#GET TOTAL EMISSIONS BY EACH YEAR FOR Baltimore City
NEI_SUM_BY_YEAR<- aggregate(Emissions~year,data = baltimore_NEI,FUN = sum)
#TO SAVE INTO PNG FILE
png("plot3.png",units="px",bg="transparent",width=450,height=450)
#LOAD GGPLOT2 LIBRARY
library(ggplot2)
#PLOT THE CHART
Baltimore_PLOT <- ggplot(baltimore_NEI,aes(factor(year),Emissions,fill=type)) +
labs(x="Observation years", y=expression("Total P2.5 Emission")) +
labs(title="PM2.5 Emissions in Baltimore City From 1999 to 2008 by Source Type") +
guides(fill=FALSE)+
geom_bar(stat="identity") +
facet_grid(.~type,scales = "free",space="free")
print(Baltimore_PLOT)
dev.off()
#Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad)
#variable, which of these four sources have seen decreases in emissions from 1999-2008
#for Baltimore City? Which have seen increases in emissions from 1999-2008?
#Use the ggplot2 plotting system to make a plot answer this question.
#ANSWER: NON-ROAD,NONPOINT,ON-ROAD HAVE SEEN INCREASES, WHILE POINT HAS SEEN DECREASE FROM
#1999-2000
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.