content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
# Name: Testing the Perceptron Learning Algo.
# Author: Ranaji Krishna.
#
# Notes: The code tests the PL algo. by creating and
# random data pts. and tagging them as +1 and -1.
# It then calls the PLA fxn. "percepAlgo" to classify the
# points as +ve or -ve.
#
# The dataframe "pts" contains the x- and y- coordinates of
# the points, the sign assigned to the point (+ve or -ve) and
# the classification estimated using the PLA. Out-of-sample
# points are generated, signed and classified.
#
# ----------
rm(list = ls(all = TRUE)) # clear all.
graphics.off() # close all.
tot_itr <- 100 # Total no. iterations.
store_prb <- as.data.frame(matrix(NA, tot_itr, 5))
colnames(store_prb) <- c("x1_value",
"x2_value",
"y_value",
"classify",
"verify")
# =============== Construct data =================
no_pts <- 10
# ----- Random data pts. for the separating line ---
pt_a <- runif(2, -1, 1)
pt_b <- runif(2, -1, 1)
# ----- Plot the data pts. & separating line ---
plot(-1:1, -1:1, 'n')
points(pt_a, pt_b, type='n') # Plot data pts.
fit <- lm( pt_b ~ pt_a)
abline(lm(pt_b ~ pt_a), col='blue') # Plot Separating line.
pts <- as.data.frame(matrix(NA, no_pts, 4)) # Data frame.
colnames(pts) <- c("x1_value",
"x2_value",
"y_value",
"classify")
# --- Generate the sample data pts. ---
pts$x1_value <- runif(no_pts, -1, 1)
pts$x2_value <- runif(no_pts, -1, 1)
# Assign signs (+ve above the line).
pts$y_value <- sign(pts$x2_value -
(fit$coefficients[2]*pts$x1_value +
fit$coefficients[1]))
# ----- Plot the sample data ---
up <- subset(pts, pts$y_value == 1)
dwn <- subset(pts, pts$y_value == -1)
points(up$x1_value, up$x2_value, col = 'green')
points(dwn$x1_value, dwn$x2_value, col = 'red')
# ===== Learning =====
source('~/myGitCode/ML/homeWorks/homeWork1/percepAlgo.R')
# Perceptron Learning Algo. - parse data frame of sample pts. and signs.
val <- percepAlgo(pts)
cat("Weights: ", val[[1]],"\n") # Computed weights.
w <- val[[1]]
for (j in 1:tot_itr){
store_prb[j, 1:2] <- runif(2,-1,1) # Out-of-sample pts.
# Assign Sign (+ve above).
store_prb$y_value[j] <- sign(store_prb$x2_value[j] -
(fit$coefficients[2]*store_prb$x1_value[j] +
:wfit$coefficients[1]))
# Estimate Sign.
store_prb$classify[j] <- sign(c(1,
store_prb$x1_value[j],
store_prb$x2_value[j]) %*% w)
# Check Sign.
store_prb$verify[j] <- as.numeric(store_prb$y_value[j] == store_prb$classify[j])
}
prb <- 1-sum(store_prb$verify)/tot_itr # Percentage of mis classification.
cat("Percentage of mis-classification: ", prb, "%\n")
avIte <- val[[3]] # Av. iterations to converge.
cat("Average no. iterations to converge: ", avIte)
|
/code_sample/code_R/test_percep.R
|
no_license
|
ranajikrishna/my_proj
|
R
| false
| false
| 3,110
|
r
|
# Name: Testing the Perceptron Learning Algo.
# Author: Ranaji Krishna.
#
# Notes: The code tests the PL algo. by creating and
# random data pts. and tagging them as +1 and -1.
# It then calls the PLA fxn. "percepAlgo" to classify the
# points as +ve or -ve.
#
# The dataframe "pts" contains the x- and y- coordinates of
# the points, the sign assigned to the point (+ve or -ve) and
# the classification estimated using the PLA. Out-of-sample
# points are generated, signed and classified.
#
# ----------
rm(list = ls(all = TRUE)) # clear all.
graphics.off() # close all.
tot_itr <- 100 # Total no. iterations.
store_prb <- as.data.frame(matrix(NA, tot_itr, 5))
colnames(store_prb) <- c("x1_value",
"x2_value",
"y_value",
"classify",
"verify")
# =============== Construct data =================
no_pts <- 10
# ----- Random data pts. for the separating line ---
pt_a <- runif(2, -1, 1)
pt_b <- runif(2, -1, 1)
# ----- Plot the data pts. & separating line ---
plot(-1:1, -1:1, 'n')
points(pt_a, pt_b, type='n') # Plot data pts.
fit <- lm( pt_b ~ pt_a)
abline(lm(pt_b ~ pt_a), col='blue') # Plot Separating line.
pts <- as.data.frame(matrix(NA, no_pts, 4)) # Data frame.
colnames(pts) <- c("x1_value",
"x2_value",
"y_value",
"classify")
# --- Generate the sample data pts. ---
pts$x1_value <- runif(no_pts, -1, 1)
pts$x2_value <- runif(no_pts, -1, 1)
# Assign signs (+ve above the line).
pts$y_value <- sign(pts$x2_value -
(fit$coefficients[2]*pts$x1_value +
fit$coefficients[1]))
# ----- Plot the sample data ---
up <- subset(pts, pts$y_value == 1)
dwn <- subset(pts, pts$y_value == -1)
points(up$x1_value, up$x2_value, col = 'green')
points(dwn$x1_value, dwn$x2_value, col = 'red')
# ===== Learning =====
source('~/myGitCode/ML/homeWorks/homeWork1/percepAlgo.R')
# Perceptron Learning Algo. - parse data frame of sample pts. and signs.
val <- percepAlgo(pts)
cat("Weights: ", val[[1]],"\n") # Computed weights.
w <- val[[1]]
for (j in 1:tot_itr){
store_prb[j, 1:2] <- runif(2,-1,1) # Out-of-sample pts.
# Assign Sign (+ve above).
store_prb$y_value[j] <- sign(store_prb$x2_value[j] -
(fit$coefficients[2]*store_prb$x1_value[j] +
:wfit$coefficients[1]))
# Estimate Sign.
store_prb$classify[j] <- sign(c(1,
store_prb$x1_value[j],
store_prb$x2_value[j]) %*% w)
# Check Sign.
store_prb$verify[j] <- as.numeric(store_prb$y_value[j] == store_prb$classify[j])
}
prb <- 1-sum(store_prb$verify)/tot_itr # Percentage of mis classification.
cat("Percentage of mis-classification: ", prb, "%\n")
avIte <- val[[3]] # Av. iterations to converge.
cat("Average no. iterations to converge: ", avIte)
|
## PA Data Analysis ###
### Author : Chandrasen
### Date : 22-March-2018
# Installing required libraries to read the data
install.packages('readxl')
library(readxl)
#Extracting and reading the data into vector
Pa<-read.csv("C:/Users/chandrasen.wadikar/Desktop/PA data.csv")
View(Pa) #View the data and validate whether it is now stored into Pa.
#Define the varibale names in machine learning
colnames(Pa)[1]<-"Project_Name"
colnames(Pa)[2]<-"PM_Lead"
colnames(Pa)[3]<-"Defects_by_PA"
colnames(Pa)[4]<-"Defects_by_UAT"
colnames(Pa)[5]<-"Defect_lek"
colnames(Pa)[6]<-"functional_defects"
colnames(Pa)[7]<-"Percen_functional_def"
colnames(Pa)[8]<-"Total_open_defects"
colnames(Pa)[9]<-"Defect_re_open"
colnames(Pa)[10]<-"Total_tc"
colnames(Pa)[11]<-"Time_taken_tc"
colnames(Pa)[12]<-"Total_tc_exectuted"
colnames(Pa)[13]<-"Time_taken_exec"
colnames(Pa)[14]<-"Total_RA"
colnames(Pa)[15]<-"test_RA"
colnames(Pa)[16]<-"RA_Coverage"
head(Pa)
length(Pa)
summary(Pa)
#boxplot(Pa,horizontal = T)
#r1<-Pa[Pa<500]
#r1
#boxplot(r1,horizontal = T)
#hist(r1)
#test21 <-Pa[Pa<]
#bench<-58.0+1.5*IQR(Pa)
# Model-1
pa_model1 <- lm(Defects_by_PA ~Defects_by_UAT+Defect_lek+Total_open_defects+Defect_re_open+Total_tc+Time_taken_exec+Total_RA+RA_Coverage,data = Pa)
summary(pa_model1)
plot(pa_model1)
predict_test1<-predict(pa_model1,Pa)
predict_actual_pa<-data.frame(predict_test1,Pa)
predict_actual_pa
??outlier
outlier(72,65)
#Output
#Coefficients:
# Estimate Std. Error t value Pr(>|t|)
#(Intercept) 20.203477 31.749729 0.636 0.528095
#Defects_by_UAT 9.023746 0.524740 17.197 < 2e-16 ***
# Defect_lek -1.253687 0.325144 -3.856 0.000399 ***
#Total_open_defects 0.233136 0.544373 0.428 0.670699
#Defect_re_open -0.133097 0.121168 -1.098 0.278417
#Total_tc 0.004997 0.031166 0.160 0.873404
#Time_taken_exec 0.054533 0.076452 0.713 0.479700
#Total_RA 0.503121 0.712820 0.706 0.484293
#RA_Coverage -0.090049 0.317830 -0.283 0.778352
---
# Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Residual standard error: 19.72 on 41 degrees of freedom
#(27 observations deleted due to missingness)
#Multiple R-squared: 0.9981, Adjusted R-squared: 0.9978
#F-statistic: 2727 on 8 and 41 DF, p-value: < 2.2e-16
#Model-2 with different analysis
pa_model2 <- lm(Defects_by_PA ~Defects_by_UAT+Defect_lek+functional_defects+Percen_functional_def+Total_open_defects+Defect_re_open,data = Pa)
cooksd<-cooks.distance(pa_model2)
summary(pa_model2)
plot(pa_model2)
data(Pa)
predict_test2<-predict(pa_model2,Pa)
predict_actual_pa_model2<-data.frame(predict_test2,Pa)
predict_actual_pa_model2
|
/PA_DATA_MODEL.R
|
no_license
|
chandrasenwadikar/Chandrasen
|
R
| false
| false
| 2,848
|
r
|
## PA Data Analysis ###
### Author : Chandrasen
### Date : 22-March-2018
# Installing required libraries to read the data
install.packages('readxl')
library(readxl)
#Extracting and reading the data into vector
Pa<-read.csv("C:/Users/chandrasen.wadikar/Desktop/PA data.csv")
View(Pa) #View the data and validate whether it is now stored into Pa.
#Define the varibale names in machine learning
colnames(Pa)[1]<-"Project_Name"
colnames(Pa)[2]<-"PM_Lead"
colnames(Pa)[3]<-"Defects_by_PA"
colnames(Pa)[4]<-"Defects_by_UAT"
colnames(Pa)[5]<-"Defect_lek"
colnames(Pa)[6]<-"functional_defects"
colnames(Pa)[7]<-"Percen_functional_def"
colnames(Pa)[8]<-"Total_open_defects"
colnames(Pa)[9]<-"Defect_re_open"
colnames(Pa)[10]<-"Total_tc"
colnames(Pa)[11]<-"Time_taken_tc"
colnames(Pa)[12]<-"Total_tc_exectuted"
colnames(Pa)[13]<-"Time_taken_exec"
colnames(Pa)[14]<-"Total_RA"
colnames(Pa)[15]<-"test_RA"
colnames(Pa)[16]<-"RA_Coverage"
head(Pa)
length(Pa)
summary(Pa)
#boxplot(Pa,horizontal = T)
#r1<-Pa[Pa<500]
#r1
#boxplot(r1,horizontal = T)
#hist(r1)
#test21 <-Pa[Pa<]
#bench<-58.0+1.5*IQR(Pa)
# Model-1
pa_model1 <- lm(Defects_by_PA ~Defects_by_UAT+Defect_lek+Total_open_defects+Defect_re_open+Total_tc+Time_taken_exec+Total_RA+RA_Coverage,data = Pa)
summary(pa_model1)
plot(pa_model1)
predict_test1<-predict(pa_model1,Pa)
predict_actual_pa<-data.frame(predict_test1,Pa)
predict_actual_pa
??outlier
outlier(72,65)
#Output
#Coefficients:
# Estimate Std. Error t value Pr(>|t|)
#(Intercept) 20.203477 31.749729 0.636 0.528095
#Defects_by_UAT 9.023746 0.524740 17.197 < 2e-16 ***
# Defect_lek -1.253687 0.325144 -3.856 0.000399 ***
#Total_open_defects 0.233136 0.544373 0.428 0.670699
#Defect_re_open -0.133097 0.121168 -1.098 0.278417
#Total_tc 0.004997 0.031166 0.160 0.873404
#Time_taken_exec 0.054533 0.076452 0.713 0.479700
#Total_RA 0.503121 0.712820 0.706 0.484293
#RA_Coverage -0.090049 0.317830 -0.283 0.778352
---
# Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Residual standard error: 19.72 on 41 degrees of freedom
#(27 observations deleted due to missingness)
#Multiple R-squared: 0.9981, Adjusted R-squared: 0.9978
#F-statistic: 2727 on 8 and 41 DF, p-value: < 2.2e-16
#Model-2 with different analysis
pa_model2 <- lm(Defects_by_PA ~Defects_by_UAT+Defect_lek+functional_defects+Percen_functional_def+Total_open_defects+Defect_re_open,data = Pa)
cooksd<-cooks.distance(pa_model2)
summary(pa_model2)
plot(pa_model2)
data(Pa)
predict_test2<-predict(pa_model2,Pa)
predict_actual_pa_model2<-data.frame(predict_test2,Pa)
predict_actual_pa_model2
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build_run_modify.r
\name{umxRAM2Ordinal}
\alias{umxRAM2Ordinal}
\title{umxRAM2Ordinal}
\usage{
umxRAM2Ordinal(model, verbose = T, thresholds = c("deviationBased",
"direct", "ignore", "left_censored"), name = NULL, showEstimates = TRUE,
refModels = NULL, autoRun = getOption("umx_auto_run"))
}
\arguments{
\item{model}{An RAM model to add thresholds too.}
\item{verbose}{Tell the user what was added and why (Default = TRUE)}
\item{thresholds}{How to implement thresholds: c("deviationBased", "direct", "ignore", "left_censored")}
\item{name}{= A new name for the modified model (NULL means leave it as it)}
\item{showEstimates}{= Whether to show estimates in the summary (if autoRun) TRUE}
\item{refModels}{pass in reference models if available. Use FALSE to suppress computing these if not provided.}
\item{autoRun}{= whether to run the model before returning it: defaults to getOption("umx_auto_run"))}
}
\value{
- \code{\link{mxModel}}
}
\description{
umxRAM2Ordinal: Convert a RAM model whose data contain ordinal variables to a threshold-based model
}
\examples{
\dontrun{
m1 = umxRAM2Ordinal(model)
}
}
\seealso{
- \code{\link{umxRAM}}
Other Advanced Model Building Functions: \code{\link{umxJiggle}},
\code{\link{umxLabel}}, \code{\link{umxLatent}},
\code{\link{umxThresholdMatrix}},
\code{\link{umxValues}}, \code{\link{umx_add_variances}},
\code{\link{umx_fix_first_loadings}},
\code{\link{umx_fix_latents}},
\code{\link{umx_get_bracket_addresses}},
\code{\link{umx_string_to_algebra}}, \code{\link{umx}}
}
|
/man/umxRAM2Ordinal.Rd
|
no_license
|
arinbasu/umx
|
R
| false
| true
| 1,619
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build_run_modify.r
\name{umxRAM2Ordinal}
\alias{umxRAM2Ordinal}
\title{umxRAM2Ordinal}
\usage{
umxRAM2Ordinal(model, verbose = T, thresholds = c("deviationBased",
"direct", "ignore", "left_censored"), name = NULL, showEstimates = TRUE,
refModels = NULL, autoRun = getOption("umx_auto_run"))
}
\arguments{
\item{model}{An RAM model to add thresholds too.}
\item{verbose}{Tell the user what was added and why (Default = TRUE)}
\item{thresholds}{How to implement thresholds: c("deviationBased", "direct", "ignore", "left_censored")}
\item{name}{= A new name for the modified model (NULL means leave it as it)}
\item{showEstimates}{= Whether to show estimates in the summary (if autoRun) TRUE}
\item{refModels}{pass in reference models if available. Use FALSE to suppress computing these if not provided.}
\item{autoRun}{= whether to run the model before returning it: defaults to getOption("umx_auto_run"))}
}
\value{
- \code{\link{mxModel}}
}
\description{
umxRAM2Ordinal: Convert a RAM model whose data contain ordinal variables to a threshold-based model
}
\examples{
\dontrun{
m1 = umxRAM2Ordinal(model)
}
}
\seealso{
- \code{\link{umxRAM}}
Other Advanced Model Building Functions: \code{\link{umxJiggle}},
\code{\link{umxLabel}}, \code{\link{umxLatent}},
\code{\link{umxThresholdMatrix}},
\code{\link{umxValues}}, \code{\link{umx_add_variances}},
\code{\link{umx_fix_first_loadings}},
\code{\link{umx_fix_latents}},
\code{\link{umx_get_bracket_addresses}},
\code{\link{umx_string_to_algebra}}, \code{\link{umx}}
}
|
test_that("geojson_read works with file inputs", {
skip_on_cran()
file <- example_sys_file("california.geojson")
aa <- geojson_read(file)
expect_type(aa, "list")
expect_equal(aa$type, "FeatureCollection")
expect_type(aa$features, "list")
expect_equal(aa$features[[1]]$type, "Feature")
expect_setequal(
names(aa$features[[1]]),
c("type", "properties", "geometry")
)
expect_true(all(c("type", "features") %in% names(aa)))
})
test_that("geojson_read works with url inputs", {
skip_on_cran()
skip_if_offline()
url <- "https://raw.githubusercontent.com/glynnbird/usstatesgeojson/master/california.geojson"
aa <- geojson_read(url)
expect_type(aa, "list")
expect_equal(aa$type, "FeatureCollection")
expect_type(aa$features, "list")
expect_equal(aa$features[[1]]$type, "Feature")
expect_setequal(
names(aa$features[[1]]),
c("type", "properties", "geometry")
)
})
test_that("geojson_read works with as.location inputs", {
skip_on_cran()
file <- example_sys_file("california.geojson")
aa <- geojson_read(as.location(file))
expect_type(aa, "list")
expect_equal(aa$type, "FeatureCollection")
expect_type(aa$features, "list")
expect_equal(aa$features[[1]]$type, "Feature")
expect_setequal(
names(aa$features[[1]]),
c("type", "properties", "geometry")
)
expect_true(all(c("type", "features") %in% names(aa)))
})
test_that("geojson_read works outputing spatial class object", {
skip_on_cran()
file <- example_sys_file("norway_maple.kml")
aa <- geojson_read(as.location(file), what = "sp")
expect_s4_class(aa, "SpatialPointsDataFrame")
expect_s3_class(aa@data, "data.frame")
expect_s4_class(aa@proj4string, "CRS")
})
|
/tests/testthat/test-geojson_read.R
|
permissive
|
ropensci/geojsonio
|
R
| false
| false
| 1,708
|
r
|
test_that("geojson_read works with file inputs", {
skip_on_cran()
file <- example_sys_file("california.geojson")
aa <- geojson_read(file)
expect_type(aa, "list")
expect_equal(aa$type, "FeatureCollection")
expect_type(aa$features, "list")
expect_equal(aa$features[[1]]$type, "Feature")
expect_setequal(
names(aa$features[[1]]),
c("type", "properties", "geometry")
)
expect_true(all(c("type", "features") %in% names(aa)))
})
test_that("geojson_read works with url inputs", {
skip_on_cran()
skip_if_offline()
url <- "https://raw.githubusercontent.com/glynnbird/usstatesgeojson/master/california.geojson"
aa <- geojson_read(url)
expect_type(aa, "list")
expect_equal(aa$type, "FeatureCollection")
expect_type(aa$features, "list")
expect_equal(aa$features[[1]]$type, "Feature")
expect_setequal(
names(aa$features[[1]]),
c("type", "properties", "geometry")
)
})
test_that("geojson_read works with as.location inputs", {
skip_on_cran()
file <- example_sys_file("california.geojson")
aa <- geojson_read(as.location(file))
expect_type(aa, "list")
expect_equal(aa$type, "FeatureCollection")
expect_type(aa$features, "list")
expect_equal(aa$features[[1]]$type, "Feature")
expect_setequal(
names(aa$features[[1]]),
c("type", "properties", "geometry")
)
expect_true(all(c("type", "features") %in% names(aa)))
})
test_that("geojson_read works outputing spatial class object", {
skip_on_cran()
file <- example_sys_file("norway_maple.kml")
aa <- geojson_read(as.location(file), what = "sp")
expect_s4_class(aa, "SpatialPointsDataFrame")
expect_s3_class(aa@data, "data.frame")
expect_s4_class(aa@proj4string, "CRS")
})
|
# Assignment: ASSIGNMENT 2
# Name: Shekhar, Manish
# Date: 2021-22-03
## Check your current working directory using `getwd()`
getwd()
# [1] "/Users/mshekhar/Desktop/R Programming/DSC520/stats_for_data_science/dsc520/dsc520"
## List the contents of the working directory with the `dir()` function
dir()
# [1] "assignments" "data" "dsc520" "dsc520.Rproj"
## If the current directory does not contain the `data` directory, set the
## working directory to project root folder (the folder should contain the `data` directory
## Use `setwd()` if needed
setwd("/home/jdoe/Workspaces/dsc520") # This was not needed for me as data directory is present
setwd("/Users/mshekhar/Desktop/R Programming/DSC520/stats_for_data_science/dsc520/dsc520/dsc520")
## Load the file `data/tidynomicon/person.csv` to `person_df1` using `read.csv`
## Examine the structure of `person_df1` using `str()`
person_df1 <- read.csv("data/tidynomicon/person.csv")
str(person_df1)
## R interpreted names as factors, which is not the behavior we want
## Load the same file to person_df2 using `read.csv` and setting `stringsAsFactors` to `FALSE`
## Examine the structure of `person_df2` using `str()`
person_df2 <- read.csv("data/tidynomicon/person.csv", stringsAsFactors = FALSE)
person_df2
str(person_df2)
## Read the file `data/scores.csv` to `scores_df`
## Display summary statistics using the `summary()` function
scores_df <- read.csv("data/scores.csv")
scores_df
summary(scores_df)
str(scores_df)
## Load the `readxl` library
install.packages("readxl") # had to install the library as it was not already there
library("readxl")
## Using the excel_sheets() function from the `readxl` package,
## list the worksheets from the file `data/G04ResultsDetail2004-11-02.xls`
excel_sheets("data/G04ResultsDetail2004-11-02.xls")
## Using the `read_excel` function, read the Voter Turnout sheet
## from the `data/G04ResultsDetail2004-11-02.xls`
## Assign the data to the `voter_turnout_df1`
## The header is in the second row, so make sure to skip the first row
## Examine the structure of `voter_turnout_df1` using `str()`
# read_excel("data/G04ResultsDetail2004-11-02.xls", sheet = 'Voter Turnout', skip = 1, col_names = TRUE)
voter_turnout_df1 <- read_excel("data/G04ResultsDetail2004-11-02.xls", sheet = 'Voter Turnout', skip = 1, col_names = TRUE)
str(voter_turnout_df1)
## Using the `read_excel()` function, read the Voter Turnout sheet
## from `data/G04ResultsDetail2004-11-02.xls`
## Skip the first two rows and manually assign the columns using `col_names`
## Use the names "ward_precint", "ballots_cast", "registered_voters", "voter_turnout"
## Assign the data to the `voter_turnout_df2`
## Examine the structure of `voter_turnout_df2` using `str()`
voter_turnout_df2 <- read_excel("data/G04ResultsDetail2004-11-02.xls", sheet = 'Voter Turnout', skip = 2,
col_names = c("ward_precint", "ballots_cast", "registered_voters", "voter_turnout"))
voter_turnout_df2
str(voter_turnout_df2)
## Load the `DBI` library
install.packages('DBI')
library('DBI')
## Create a database connection to `data/tidynomicon/example.db` using the dbConnect() function
## The first argument is the database driver which in this case is `RSQLite::SQLite()`
## The second argument is the path to the database file
## Assign the connection to `db` variable
# Have to install RSQLite to access dbConnect()
install.packages('RSQLite')
library('RSQLite')
# creating driver object using dbDriver()
drv <- dbDriver('SQLite')
# creating connection object passing driver and the database name
db <- dbConnect(drv, 'data/tidynomicon/example.db')
# checking type of db variable
class(db)
## Query the Person table using the `dbGetQuery` function and the
## `SELECT * FROM PERSON;` SQL statement
## Assign the result to the `person_df` variable
## Use `head()` to look at the first few rows of the `person_df` dataframe
# check the list of tables in the database
dbListTables(db)
person_df <- dbGetQuery(db,"SELECT * FROM PERSON")
head(person_df)
## List the tables using the `dbListTables()` function
## Assign the result to the `table_names` variable
table_names <- dbListTables(db)
class(table_names)
table_names
## Read all of the tables at once using the `lapply` function and assign the result to the `tables` variable
## Use `table_names`, `dbReadTable`, and `conn = db` as arguments
## Print out the tables
# lapply will take first argument as list or vector. We are passing list of tables that needs to be read
# second argument of lapply should be function that will execute on each element of the list passed as first argument
# second argument in our case in dvReadTable() which needs name of the table to read and database connection to connect
# function dbReadTable() will get it's first argument from elements of list table_names
# any additional argument to the function should be passed to the lapply() as additional arguments
# in this case conn = db, connection obeject is passed like that as below
tables <- lapply(table_names, dbReadTable, conn=db)
tables
## Use the `dbDisconnect` function to disconnect from the database
dbDisconnect(conn = db)
## Import the `jsonlite` library
install.packages('jsonlite')
library('jsonlite')
## Convert the scores_df dataframe to JSON using the `toJSON()` function
?toJSON()
toJSON(scores_df)
## Convert the scores dataframe to JSON using the `toJSON()` function with the `pretty=TRUE` option
toJSON(scores_df, pretty = TRUE)
|
/assignment_02_Shekhar_Manish.R
|
no_license
|
datatodecision/stats_for_data_science
|
R
| false
| false
| 5,485
|
r
|
# Assignment: ASSIGNMENT 2
# Name: Shekhar, Manish
# Date: 2021-22-03
## Check your current working directory using `getwd()`
getwd()
# [1] "/Users/mshekhar/Desktop/R Programming/DSC520/stats_for_data_science/dsc520/dsc520"
## List the contents of the working directory with the `dir()` function
dir()
# [1] "assignments" "data" "dsc520" "dsc520.Rproj"
## If the current directory does not contain the `data` directory, set the
## working directory to project root folder (the folder should contain the `data` directory
## Use `setwd()` if needed
setwd("/home/jdoe/Workspaces/dsc520") # This was not needed for me as data directory is present
setwd("/Users/mshekhar/Desktop/R Programming/DSC520/stats_for_data_science/dsc520/dsc520/dsc520")
## Load the file `data/tidynomicon/person.csv` to `person_df1` using `read.csv`
## Examine the structure of `person_df1` using `str()`
person_df1 <- read.csv("data/tidynomicon/person.csv")
str(person_df1)
## R interpreted names as factors, which is not the behavior we want
## Load the same file to person_df2 using `read.csv` and setting `stringsAsFactors` to `FALSE`
## Examine the structure of `person_df2` using `str()`
person_df2 <- read.csv("data/tidynomicon/person.csv", stringsAsFactors = FALSE)
person_df2
str(person_df2)
## Read the file `data/scores.csv` to `scores_df`
## Display summary statistics using the `summary()` function
scores_df <- read.csv("data/scores.csv")
scores_df
summary(scores_df)
str(scores_df)
## Load the `readxl` library
install.packages("readxl") # had to install the library as it was not already there
library("readxl")
## Using the excel_sheets() function from the `readxl` package,
## list the worksheets from the file `data/G04ResultsDetail2004-11-02.xls`
excel_sheets("data/G04ResultsDetail2004-11-02.xls")
## Using the `read_excel` function, read the Voter Turnout sheet
## from the `data/G04ResultsDetail2004-11-02.xls`
## Assign the data to the `voter_turnout_df1`
## The header is in the second row, so make sure to skip the first row
## Examine the structure of `voter_turnout_df1` using `str()`
# read_excel("data/G04ResultsDetail2004-11-02.xls", sheet = 'Voter Turnout', skip = 1, col_names = TRUE)
voter_turnout_df1 <- read_excel("data/G04ResultsDetail2004-11-02.xls", sheet = 'Voter Turnout', skip = 1, col_names = TRUE)
str(voter_turnout_df1)
## Using the `read_excel()` function, read the Voter Turnout sheet
## from `data/G04ResultsDetail2004-11-02.xls`
## Skip the first two rows and manually assign the columns using `col_names`
## Use the names "ward_precint", "ballots_cast", "registered_voters", "voter_turnout"
## Assign the data to the `voter_turnout_df2`
## Examine the structure of `voter_turnout_df2` using `str()`
voter_turnout_df2 <- read_excel("data/G04ResultsDetail2004-11-02.xls", sheet = 'Voter Turnout', skip = 2,
col_names = c("ward_precint", "ballots_cast", "registered_voters", "voter_turnout"))
voter_turnout_df2
str(voter_turnout_df2)
## Load the `DBI` library
install.packages('DBI')
library('DBI')
## Create a database connection to `data/tidynomicon/example.db` using the dbConnect() function
## The first argument is the database driver which in this case is `RSQLite::SQLite()`
## The second argument is the path to the database file
## Assign the connection to `db` variable
# Have to install RSQLite to access dbConnect()
install.packages('RSQLite')
library('RSQLite')
# creating driver object using dbDriver()
drv <- dbDriver('SQLite')
# creating connection object passing driver and the database name
db <- dbConnect(drv, 'data/tidynomicon/example.db')
# checking type of db variable
class(db)
## Query the Person table using the `dbGetQuery` function and the
## `SELECT * FROM PERSON;` SQL statement
## Assign the result to the `person_df` variable
## Use `head()` to look at the first few rows of the `person_df` dataframe
# check the list of tables in the database
dbListTables(db)
person_df <- dbGetQuery(db,"SELECT * FROM PERSON")
head(person_df)
## List the tables using the `dbListTables()` function
## Assign the result to the `table_names` variable
table_names <- dbListTables(db)
class(table_names)
table_names
## Read all of the tables at once using the `lapply` function and assign the result to the `tables` variable
## Use `table_names`, `dbReadTable`, and `conn = db` as arguments
## Print out the tables
# lapply will take first argument as list or vector. We are passing list of tables that needs to be read
# second argument of lapply should be function that will execute on each element of the list passed as first argument
# second argument in our case in dvReadTable() which needs name of the table to read and database connection to connect
# function dbReadTable() will get it's first argument from elements of list table_names
# any additional argument to the function should be passed to the lapply() as additional arguments
# in this case conn = db, connection obeject is passed like that as below
tables <- lapply(table_names, dbReadTable, conn=db)
tables
## Use the `dbDisconnect` function to disconnect from the database
dbDisconnect(conn = db)
## Import the `jsonlite` library
install.packages('jsonlite')
library('jsonlite')
## Convert the scores_df dataframe to JSON using the `toJSON()` function
?toJSON()
toJSON(scores_df)
## Convert the scores dataframe to JSON using the `toJSON()` function with the `pretty=TRUE` option
toJSON(scores_df, pretty = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lyl_plot.R
\name{plot.lyl_aggregated}
\alias{plot.lyl_aggregated}
\title{Plot Life Years Lost at one specific age for two different populations obtained from
aggregated data}
\usage{
\method{plot}{lyl_aggregated}(
x,
color_alive = NA,
colors = NA,
labels = c("Population of interest", "Reference population"),
...
)
}
\arguments{
\item{x}{An object of class \code{lyl_aggregated} (obtained with function \code{lyl_aggregated}).}
\item{color_alive}{Color to be used for the censoring category. Default is NA, and
default color is "white".}
\item{colors}{Vector with one color for each cause of death. Default is NA, and
default colors are used.}
\item{labels}{Vector with labels for the two populations (default are "Population of
interest" for \code{data}, and "Reference population" for \code{data0}; which are provided
to function \code{lyl_aggregated}.)}
\item{...}{Additional arguments affecting the plot produced.}
}
\value{
A plot with survival function and stacked cause-specific cumulative incidences for two
populations side by side.
}
\description{
\code{plot} for objects of class \code{lyl_aggregated} creates a figure of Life Years Lost
at one specific age for two different populations.
}
\examples{
# Load simulated data as example
data(aggreg_data)
data(pop_ref)
# Estimate remaining life expectancy and Life Years
# Lost after age 70 years and before age 90 years
lyl_summary_data70 <- lyl_aggregated(data = aggreg_data, age = age, rates = rate,
data0 = pop_ref, age0 = age, surv0 = survival,
age_specific = 70, tau = 90)
# Plot the data
plot(lyl_summary_data70)
}
\references{
\itemize{
\item{Plana-Ripoll et al. lillies โ An R package for the estimation of excess Life Years Lost among patients with a given disease or condition.
\emph{PLoS ONE}. 2020;15(3):e0228073.}
}
}
\seealso{
\itemize{
\item{\code{\link{lyl_aggregated}} for estimation of Life Years Lost at one specific age.}
}
}
|
/man/plot.lyl_aggregated.Rd
|
no_license
|
cran/lillies
|
R
| false
| true
| 2,090
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lyl_plot.R
\name{plot.lyl_aggregated}
\alias{plot.lyl_aggregated}
\title{Plot Life Years Lost at one specific age for two different populations obtained from
aggregated data}
\usage{
\method{plot}{lyl_aggregated}(
x,
color_alive = NA,
colors = NA,
labels = c("Population of interest", "Reference population"),
...
)
}
\arguments{
\item{x}{An object of class \code{lyl_aggregated} (obtained with function \code{lyl_aggregated}).}
\item{color_alive}{Color to be used for the censoring category. Default is NA, and
default color is "white".}
\item{colors}{Vector with one color for each cause of death. Default is NA, and
default colors are used.}
\item{labels}{Vector with labels for the two populations (default are "Population of
interest" for \code{data}, and "Reference population" for \code{data0}; which are provided
to function \code{lyl_aggregated}.)}
\item{...}{Additional arguments affecting the plot produced.}
}
\value{
A plot with survival function and stacked cause-specific cumulative incidences for two
populations side by side.
}
\description{
\code{plot} for objects of class \code{lyl_aggregated} creates a figure of Life Years Lost
at one specific age for two different populations.
}
\examples{
# Load simulated data as example
data(aggreg_data)
data(pop_ref)
# Estimate remaining life expectancy and Life Years
# Lost after age 70 years and before age 90 years
lyl_summary_data70 <- lyl_aggregated(data = aggreg_data, age = age, rates = rate,
data0 = pop_ref, age0 = age, surv0 = survival,
age_specific = 70, tau = 90)
# Plot the data
plot(lyl_summary_data70)
}
\references{
\itemize{
\item{Plana-Ripoll et al. lillies โ An R package for the estimation of excess Life Years Lost among patients with a given disease or condition.
\emph{PLoS ONE}. 2020;15(3):e0228073.}
}
}
\seealso{
\itemize{
\item{\code{\link{lyl_aggregated}} for estimation of Life Years Lost at one specific age.}
}
}
|
# ๋ฒกํฐ์ ์ฐ์ฐ
d <- c(1, 10, 5, 3)
d
# ๋ฒกํฐ์ ์ฐ์ ์ฐ์ฐ
2*d #๊ฐ ์์์๋ค๊ฐ 2๋ฅผ ๊ณฑํด๋ผ
d-2
3*d+4
# ๋ฒกํฐ์ ๋ฒกํฐ๊ฐ์ ์ฐ์ฐ --> ๋ฒกํฐ์ ์์์ ๊ฐฏ์๊ฐ ๊ฐ์์ผํ๋ค.
x <- c(2,4,6)
y <- c(1,3,5)
x+y # ๋์ํ๋ ์์๋ผ๋ฆฌ ๋ํ์ฌ ์ถ๋ ฅ
x*y
x/y
x%%y
z <- x+y
z
# ๋ฒกํฐ์ ํจ์ ์ด์ฉ
d <- c(1:10)
d
sum(d)
sum(3*d)
# ๋ฒกํฐ ์์์ ๊ธธ์ด(๊ฐ์)
length(d)
# ๋ฒกํฐ ์์์ ํ๊ท
mean(d[1:5])
max(d)
min(d)
# ์ ๋ ฌ(๊ธฐ๋ณธ์ ์ผ๋ก ์ค๋ฆ์ฐจ์ ์ ๋ ฌ)
sort(d)
sort(x = d, decreasing = T)
sort(d, T)
# ์ค์๊ฐ ๊ตฌํ๊ธฐ
v1 <- median(d)
v1
|
/workspace2/RLab_Chap02(๋ณ์์๋ฒกํฐ)/lab04.R
|
no_license
|
mjh1583/BigDataEducation
|
R
| false
| false
| 639
|
r
|
# ๋ฒกํฐ์ ์ฐ์ฐ
d <- c(1, 10, 5, 3)
d
# ๋ฒกํฐ์ ์ฐ์ ์ฐ์ฐ
2*d #๊ฐ ์์์๋ค๊ฐ 2๋ฅผ ๊ณฑํด๋ผ
d-2
3*d+4
# ๋ฒกํฐ์ ๋ฒกํฐ๊ฐ์ ์ฐ์ฐ --> ๋ฒกํฐ์ ์์์ ๊ฐฏ์๊ฐ ๊ฐ์์ผํ๋ค.
x <- c(2,4,6)
y <- c(1,3,5)
x+y # ๋์ํ๋ ์์๋ผ๋ฆฌ ๋ํ์ฌ ์ถ๋ ฅ
x*y
x/y
x%%y
z <- x+y
z
# ๋ฒกํฐ์ ํจ์ ์ด์ฉ
d <- c(1:10)
d
sum(d)
sum(3*d)
# ๋ฒกํฐ ์์์ ๊ธธ์ด(๊ฐ์)
length(d)
# ๋ฒกํฐ ์์์ ํ๊ท
mean(d[1:5])
max(d)
min(d)
# ์ ๋ ฌ(๊ธฐ๋ณธ์ ์ผ๋ก ์ค๋ฆ์ฐจ์ ์ ๋ ฌ)
sort(d)
sort(x = d, decreasing = T)
sort(d, T)
# ์ค์๊ฐ ๊ตฌํ๊ธฐ
v1 <- median(d)
v1
|
setwd("C:/Users/Leonie/Desktop/Machine_learning")
#install.packages("httr")
#install.packages("jsonlite")
library("httr")
library("readxl")
library("jsonlite")
library(dplyr)
################# Import the basic company data #####################
## Read in all files (condense code later) - create a folder in your working directory and paste the files in there
digiwhist_2009 <- read.csv("data-uk-2009.csv", sep = ";", stringsAsFactors = FALSE)
digiwhist_2010 <- read.csv("data-uk-2010.csv", sep = ";", stringsAsFactors = FALSE)
digiwhist_2011 <- read.csv("data-uk-2011.csv", sep = ";", stringsAsFactors = FALSE)
digiwhist_2012 <- read.csv("data-uk-2012.csv", sep = ";", stringsAsFactors = FALSE)
digiwhist_2013 <- read.csv("data-uk-2013.csv", sep = ";", stringsAsFactors = FALSE)
digiwhist_2014 <- read.csv("data-uk-2014.csv", sep = ";", stringsAsFactors = FALSE)
digiwhist_2015 <- read.csv("data-uk-2015.csv", sep = ";", stringsAsFactors = FALSE)
digiwhist_2016 <- read.csv("data-uk-2016.csv", sep = ";", stringsAsFactors = FALSE)
digiwhist_2017 <- read.csv("data-uk-2017.csv", sep = ";", stringsAsFactors = FALSE)
digiwhist_2018 <- read.csv("data-uk-2018.csv", sep = ";", stringsAsFactors = FALSE)
digiwhist_2019 <- read.csv("data-uk-2019.csv", sep = ";", stringsAsFactors = FALSE)
digiwhist_all <- rbind(digiwhist_2009,
digiwhist_2010,
digiwhist_2011,
digiwhist_2012,
digiwhist_2013,
digiwhist_2014,
digiwhist_2015,
digiwhist_2016,
digiwhist_2017,
digiwhist_2018,
digiwhist_2019)
rm(digiwhist_2009,
digiwhist_2010,
digiwhist_2011,
digiwhist_2012,
digiwhist_2013,
digiwhist_2014,
digiwhist_2015,
digiwhist_2016,
digiwhist_2017,
digiwhist_2018,
digiwhist_2019)
dw <- digiwhist_all
#single bid received
mean_single <- tapply(dw$tender_indicator_INTEGRITY_SINGLE_BID, dw$tender_year, mean, na.rm = TRUE)
df_mean_single <- as.data.frame(mean_single)
df_mean_single$tender_year <- c(2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019)
df_mean_single$mean_single <- 100 - df_mean_single$mean_single
#risky advertisement period
mean_ad <- tapply(dw$tender_indicator_INTEGRITY_ADVERTISEMENT_PERIOD, dw$tender_year, mean, na.rm = TRUE)
df_mean_ad <- as.data.frame(mean_ad)
df_mean_ad$tender_year <- c(2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019)
df_mean_ad$mean_ad <- 100 - df_mean_ad$mean_ad
df <- merge(df_mean_single, df_mean_ad, by = "tender_year")
#new company
mean_new <- tapply(dw$tender_indicator_INTEGRITY_NEW_COMPANY, dw$tender_year, mean, na.rm = TRUE)
df_mean_new <- as.data.frame(mean_new)
df_mean_new$tender_year <- c(2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019)
df_mean_new$mean_new <- 100 - df_mean_new$mean_new
df_all <- merge(df, df_mean_new, by = "tender_year")
View(df_all)
jpeg("bids_igraph.jpg",
width=6.8, height=6.8,
units='in',res=600)
plot(x = df_all$tender_year, y = df_all$mean_single, ylim = c(0,100), col = "red",
ylab = "", xlab = "tender year",
main = "Mean tender riskiness indicators over time")
points(x = df_all$tender_year, y = df_all$mean_ad, col = "blue")
points(x = df_all$tender_year, y = df_all$mean_new, col = "green")
legend(2009, 100, legend = c("single bid", "rushed tender", "new company wins"),
col = c("red", "blue", "green"), lty = 1, cex = 0.6)
lines(x = df_all$tender_year, y = df_all$mean_single, col = "red")
lines(x = df_all$tender_year, y = df_all$mean_ad, col = "blue")
lines(x = df_all$tender_year, y = df_all$mean_new, col = "green")
dev.off()
|
/tender.R
|
no_license
|
traghuram/17.835_Group3
|
R
| false
| false
| 3,725
|
r
|
setwd("C:/Users/Leonie/Desktop/Machine_learning")
#install.packages("httr")
#install.packages("jsonlite")
library("httr")
library("readxl")
library("jsonlite")
library(dplyr)
################# Import the basic company data #####################
## Read in all files (condense code later) - create a folder in your working directory and paste the files in there
digiwhist_2009 <- read.csv("data-uk-2009.csv", sep = ";", stringsAsFactors = FALSE)
digiwhist_2010 <- read.csv("data-uk-2010.csv", sep = ";", stringsAsFactors = FALSE)
digiwhist_2011 <- read.csv("data-uk-2011.csv", sep = ";", stringsAsFactors = FALSE)
digiwhist_2012 <- read.csv("data-uk-2012.csv", sep = ";", stringsAsFactors = FALSE)
digiwhist_2013 <- read.csv("data-uk-2013.csv", sep = ";", stringsAsFactors = FALSE)
digiwhist_2014 <- read.csv("data-uk-2014.csv", sep = ";", stringsAsFactors = FALSE)
digiwhist_2015 <- read.csv("data-uk-2015.csv", sep = ";", stringsAsFactors = FALSE)
digiwhist_2016 <- read.csv("data-uk-2016.csv", sep = ";", stringsAsFactors = FALSE)
digiwhist_2017 <- read.csv("data-uk-2017.csv", sep = ";", stringsAsFactors = FALSE)
digiwhist_2018 <- read.csv("data-uk-2018.csv", sep = ";", stringsAsFactors = FALSE)
digiwhist_2019 <- read.csv("data-uk-2019.csv", sep = ";", stringsAsFactors = FALSE)
digiwhist_all <- rbind(digiwhist_2009,
digiwhist_2010,
digiwhist_2011,
digiwhist_2012,
digiwhist_2013,
digiwhist_2014,
digiwhist_2015,
digiwhist_2016,
digiwhist_2017,
digiwhist_2018,
digiwhist_2019)
rm(digiwhist_2009,
digiwhist_2010,
digiwhist_2011,
digiwhist_2012,
digiwhist_2013,
digiwhist_2014,
digiwhist_2015,
digiwhist_2016,
digiwhist_2017,
digiwhist_2018,
digiwhist_2019)
dw <- digiwhist_all
#single bid received
mean_single <- tapply(dw$tender_indicator_INTEGRITY_SINGLE_BID, dw$tender_year, mean, na.rm = TRUE)
df_mean_single <- as.data.frame(mean_single)
df_mean_single$tender_year <- c(2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019)
df_mean_single$mean_single <- 100 - df_mean_single$mean_single
#risky advertisement period
mean_ad <- tapply(dw$tender_indicator_INTEGRITY_ADVERTISEMENT_PERIOD, dw$tender_year, mean, na.rm = TRUE)
df_mean_ad <- as.data.frame(mean_ad)
df_mean_ad$tender_year <- c(2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019)
df_mean_ad$mean_ad <- 100 - df_mean_ad$mean_ad
df <- merge(df_mean_single, df_mean_ad, by = "tender_year")
#new company
mean_new <- tapply(dw$tender_indicator_INTEGRITY_NEW_COMPANY, dw$tender_year, mean, na.rm = TRUE)
df_mean_new <- as.data.frame(mean_new)
df_mean_new$tender_year <- c(2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019)
df_mean_new$mean_new <- 100 - df_mean_new$mean_new
df_all <- merge(df, df_mean_new, by = "tender_year")
View(df_all)
jpeg("bids_igraph.jpg",
width=6.8, height=6.8,
units='in',res=600)
plot(x = df_all$tender_year, y = df_all$mean_single, ylim = c(0,100), col = "red",
ylab = "", xlab = "tender year",
main = "Mean tender riskiness indicators over time")
points(x = df_all$tender_year, y = df_all$mean_ad, col = "blue")
points(x = df_all$tender_year, y = df_all$mean_new, col = "green")
legend(2009, 100, legend = c("single bid", "rushed tender", "new company wins"),
col = c("red", "blue", "green"), lty = 1, cex = 0.6)
lines(x = df_all$tender_year, y = df_all$mean_single, col = "red")
lines(x = df_all$tender_year, y = df_all$mean_ad, col = "blue")
lines(x = df_all$tender_year, y = df_all$mean_new, col = "green")
dev.off()
|
mk.dygraph <- function(data=cats, dive=NA, theMain='',
channels=c('Acceleration_X', 'Acceleration_Y', 'Acceleration_Z', 'Depth'),
filtered=F, subsample=c(10, 10, 10, 1)) {
require(dygraphs)
require(RColorBrewer)
require(fields)
require(xts)
require(htmltools)
options(digits.secs = 3)
CustomAxisLabel <- 'function (ms) {
var d = new Date(ms);
return Dygraph.zeropad(d.getHours()) + ":" +
Dygraph.zeropad(d.getMinutes()) + ":" +
Dygraph.zeropad(d.getSeconds());
}'
CustomValueFormat = 'function (ms) {
var d = new Date(ms);
return Dygraph.zeropad(d.getHours()) + ":" +
Dygraph.zeropad(d.getMinutes()) + ":" +
Dygraph.zeropad(d.getSeconds()) + "." +
Dygraph.zeropad(d.getMilliseconds());
}'
start.time <- data$main.header$StartTime
dive.t <- start.time + (c(0:(length(data$data$Depth$data)-1)) * as.numeric(data$data$Depth$header[[3]]))
subs <- subsample[match('Depth', channels)]
if(subs>1) {
subs <- seq(1, length(data$data$Depth$data), by=subs)
} else {
subs <- c(1:length(data$data$Depth$data))
}
dive.d <- data$data$Depth$data[subs]
dive.p <- data$data$Depth$phase[subs]
dive.n <- data$data$Depth$dive[subs]
d.p <- paste(dive.n, dive.p, sep='')
d.p[which(d.p=='NANA')] <- NA
un.dp <- unique(d.p)
un.dp <- un.dp[which(!is.na(un.dp))]
if(!is.na(dive)) {
d.start <- dive.t[match(c(1:max(dive.n, na.rm=T)), dive.n)]
d.end <- dive.t[rev(length(dive.n)-match(c(max(dive.n, na.rm=T):1), rev(dive.n)))+1]
b.start <- dive.t[match(un.dp[grep('B', un.dp)], d.p)]
a.start <- dive.t[match(un.dp[grep('A', un.dp)], d.p)]
des.end <- dive.t[(length(d.p)-match(un.dp[grep('D', un.dp)], rev(d.p)))+1]
} else {
d.start <- head(dive.t, 1)
d.end <- tail(dive.t, 1)
b.start <- d.start
a.start <- d.end
des.end <- d.end
}
ts.list <- list()
for(i in 1:length(channels)) {
ch <- which.min(adist(channels[i], names(data$data)))
ch.interval <- as.numeric(data$data[[ch]]$header[[3]])
ch.length <- as.numeric(data$data[[ch]]$header[[2]])
if(subsample[i]>1) {
subs <- seq(1, ch.length, by=subsample[i])
} else {
subs <- c(1:ch.length)
}
Times <- start.time+((c(1:ch.length)-1)*ch.interval)[subs]
if(!is.na(dive)) {
if(length(dive)==1) {
which.dive <- which(data$data[[ch]]$dive[subs] %in% dive)
} else {
first.dive <- which(data$data[[ch]]$dive[subs] %in% dive[1])
last.dive <- which(data$data[[ch]]$dive[subs] %in% tail(dive, 1))
which.dive <- range(c(first.dive, last.dive))
}
} else {
which.dive <- c(1:(length(data$data[[ch]]$data[subs])))
}
if(length(grep('Acceleration', channels[i]))==1) {
if(filtered) {
ts.list[[i]] <- as.xts(data.frame(data$data[[ch]]$Static[subs][which.dive],
data$data[[ch]]$Dynamic[subs][which.dive]),
order.by=Times[which.dive])
} else {
ts.list[[i]] <- as.xts(data$data[[ch]]$data[subs][which.dive], order.by=Times[which.dive])
}
} else {
if(length(grep('Depth', channels[i]))==1) {
ts.list[[i]] <- as.xts(-data$data[[ch]]$data[subs][which.dive], order.by=Times[which.dive])
} else {
ts.list[[i]] <- as.xts(data$data[[ch]]$data[subs][which.dive], order.by=Times[which.dive])
}
}
}
names(ts.list) <- channels
## theCol <- rev(tim.colors(length(channels)))
theCol <- brewer.pal(8, 'Dark2')[c(1:length(channels))]
plot.height <- (0.95*dev.size("px")[2])/length(channels)
add_shades <- function(x, starts, ends, ...) {
for( i in 1:length(starts) ) {
x <- dyShading(x, from = starts[i] , to = ends[i], ... )
}
x
}
dyg.text <- 'browsable(tagList('
for(i in 1:(length(ts.list)-1)) {
y.lab <- names(ts.list)[i]
if(dim(ts.list[[i]])[2]>1) {
dyg <- paste('dygraph(ts.list[[', i,
']], group="A", main=theMain, height=plot.height) %>% dyOptions(colors=c("grey", "',
theCol[i],
'")) %>% dyAxis("x", axisLabelFormatter=CustomAxisLabel, valueFormatter=CustomValueFormat) %>% dyAxis("y", label="',
y.lab, '") %>% dyLegend(show="never") %>% dyEvent(d.start) %>% dyEvent(d.end) %>% add_shades(d.start, des.end, "#B3E2CD") %>% add_shades(des.end, a.start, "#FDCDAC") %>% add_shades(a.start, d.end, "#CBD5E8"),', sep='')
dyg.text <- paste(dyg.text, dyg, sep='')
} else {
dyg <- paste('dygraph(ts.list[[', i,
']], group="A", main=theMain, height=plot.height) %>% dyOptions(colors="',
theCol[i],
'") %>% dyAxis("x", axisLabelFormatter=CustomAxisLabel, valueFormatter=CustomValueFormat) %>% dyAxis("y", label="',
y.lab, '") %>% dyLegend(show="never") %>% dyEvent(d.start) %>% dyEvent(d.end) %>% add_shades(d.start, des.end, "#B3E2CD") %>% add_shades(des.end, a.start, "#FDCDAC") %>% add_shades(a.start, d.end, "#CBD5E8"),', sep='')
dyg.text <- paste(dyg.text, dyg, sep='')
}
}
y.lab <- tail(names(ts.list),1)
if(dim(ts.list[[length(ts.list)]])[2]>1) {
dyg.text <- paste(dyg.text, 'dygraph(ts.list[[length(ts.list)]], group="A", main=theMain, height=plot.height) %>% dyOptions(colors=c("grey", "',
tail(theCol, 1),
'")) %>%dyAxis("x", axisLabelFormatter=CustomAxisLabel, valueFormatter=CustomValueFormat) %>% dyAxis("y", label="',
y.lab, '") %>% dyLegend(show="never") %>% add_shades(d.start, des.end, "#B3E2CD") %>% dyEvent(d.start) %>% dyEvent(d.end) %>% add_shades(d.start, des.end, "#8DD3C7") %>% add_shades(des.end, a.start, "#FDCDAC") %>% add_shades(a.start, d.end, "#CBD5E8") %>% dyRangeSelector()))')
} else {
dyg.text <- paste(dyg.text, 'dygraph(ts.list[[length(ts.list)]], group="A", main=theMain, height=plot.height) %>% dyOptions(colors="',
tail(theCol, 1),
'") %>%dyAxis("x", axisLabelFormatter=CustomAxisLabel, valueFormatter=CustomValueFormat) %>% dyAxis("y", label="',
y.lab, '") %>% dyLegend(show="never") %>% add_shades(d.start, des.end, "#B3E2CD") %>% dyEvent(d.start) %>% dyEvent(d.end) %>% add_shades(d.start, des.end, "#8DD3C7") %>% add_shades(des.end, a.start, "#FDCDAC") %>% add_shades(a.start, d.end, "#CBD5E8") %>% dyRangeSelector()))')
}
eval(parse(text=dyg.text))
}
|
/R/mk.dygraph.R
|
no_license
|
embiuw/rCATS
|
R
| false
| false
| 6,554
|
r
|
mk.dygraph <- function(data=cats, dive=NA, theMain='',
channels=c('Acceleration_X', 'Acceleration_Y', 'Acceleration_Z', 'Depth'),
filtered=F, subsample=c(10, 10, 10, 1)) {
require(dygraphs)
require(RColorBrewer)
require(fields)
require(xts)
require(htmltools)
options(digits.secs = 3)
CustomAxisLabel <- 'function (ms) {
var d = new Date(ms);
return Dygraph.zeropad(d.getHours()) + ":" +
Dygraph.zeropad(d.getMinutes()) + ":" +
Dygraph.zeropad(d.getSeconds());
}'
CustomValueFormat = 'function (ms) {
var d = new Date(ms);
return Dygraph.zeropad(d.getHours()) + ":" +
Dygraph.zeropad(d.getMinutes()) + ":" +
Dygraph.zeropad(d.getSeconds()) + "." +
Dygraph.zeropad(d.getMilliseconds());
}'
start.time <- data$main.header$StartTime
dive.t <- start.time + (c(0:(length(data$data$Depth$data)-1)) * as.numeric(data$data$Depth$header[[3]]))
subs <- subsample[match('Depth', channels)]
if(subs>1) {
subs <- seq(1, length(data$data$Depth$data), by=subs)
} else {
subs <- c(1:length(data$data$Depth$data))
}
dive.d <- data$data$Depth$data[subs]
dive.p <- data$data$Depth$phase[subs]
dive.n <- data$data$Depth$dive[subs]
d.p <- paste(dive.n, dive.p, sep='')
d.p[which(d.p=='NANA')] <- NA
un.dp <- unique(d.p)
un.dp <- un.dp[which(!is.na(un.dp))]
if(!is.na(dive)) {
d.start <- dive.t[match(c(1:max(dive.n, na.rm=T)), dive.n)]
d.end <- dive.t[rev(length(dive.n)-match(c(max(dive.n, na.rm=T):1), rev(dive.n)))+1]
b.start <- dive.t[match(un.dp[grep('B', un.dp)], d.p)]
a.start <- dive.t[match(un.dp[grep('A', un.dp)], d.p)]
des.end <- dive.t[(length(d.p)-match(un.dp[grep('D', un.dp)], rev(d.p)))+1]
} else {
d.start <- head(dive.t, 1)
d.end <- tail(dive.t, 1)
b.start <- d.start
a.start <- d.end
des.end <- d.end
}
ts.list <- list()
for(i in 1:length(channels)) {
ch <- which.min(adist(channels[i], names(data$data)))
ch.interval <- as.numeric(data$data[[ch]]$header[[3]])
ch.length <- as.numeric(data$data[[ch]]$header[[2]])
if(subsample[i]>1) {
subs <- seq(1, ch.length, by=subsample[i])
} else {
subs <- c(1:ch.length)
}
Times <- start.time+((c(1:ch.length)-1)*ch.interval)[subs]
if(!is.na(dive)) {
if(length(dive)==1) {
which.dive <- which(data$data[[ch]]$dive[subs] %in% dive)
} else {
first.dive <- which(data$data[[ch]]$dive[subs] %in% dive[1])
last.dive <- which(data$data[[ch]]$dive[subs] %in% tail(dive, 1))
which.dive <- range(c(first.dive, last.dive))
}
} else {
which.dive <- c(1:(length(data$data[[ch]]$data[subs])))
}
if(length(grep('Acceleration', channels[i]))==1) {
if(filtered) {
ts.list[[i]] <- as.xts(data.frame(data$data[[ch]]$Static[subs][which.dive],
data$data[[ch]]$Dynamic[subs][which.dive]),
order.by=Times[which.dive])
} else {
ts.list[[i]] <- as.xts(data$data[[ch]]$data[subs][which.dive], order.by=Times[which.dive])
}
} else {
if(length(grep('Depth', channels[i]))==1) {
ts.list[[i]] <- as.xts(-data$data[[ch]]$data[subs][which.dive], order.by=Times[which.dive])
} else {
ts.list[[i]] <- as.xts(data$data[[ch]]$data[subs][which.dive], order.by=Times[which.dive])
}
}
}
names(ts.list) <- channels
## theCol <- rev(tim.colors(length(channels)))
theCol <- brewer.pal(8, 'Dark2')[c(1:length(channels))]
plot.height <- (0.95*dev.size("px")[2])/length(channels)
add_shades <- function(x, starts, ends, ...) {
for( i in 1:length(starts) ) {
x <- dyShading(x, from = starts[i] , to = ends[i], ... )
}
x
}
dyg.text <- 'browsable(tagList('
for(i in 1:(length(ts.list)-1)) {
y.lab <- names(ts.list)[i]
if(dim(ts.list[[i]])[2]>1) {
dyg <- paste('dygraph(ts.list[[', i,
']], group="A", main=theMain, height=plot.height) %>% dyOptions(colors=c("grey", "',
theCol[i],
'")) %>% dyAxis("x", axisLabelFormatter=CustomAxisLabel, valueFormatter=CustomValueFormat) %>% dyAxis("y", label="',
y.lab, '") %>% dyLegend(show="never") %>% dyEvent(d.start) %>% dyEvent(d.end) %>% add_shades(d.start, des.end, "#B3E2CD") %>% add_shades(des.end, a.start, "#FDCDAC") %>% add_shades(a.start, d.end, "#CBD5E8"),', sep='')
dyg.text <- paste(dyg.text, dyg, sep='')
} else {
dyg <- paste('dygraph(ts.list[[', i,
']], group="A", main=theMain, height=plot.height) %>% dyOptions(colors="',
theCol[i],
'") %>% dyAxis("x", axisLabelFormatter=CustomAxisLabel, valueFormatter=CustomValueFormat) %>% dyAxis("y", label="',
y.lab, '") %>% dyLegend(show="never") %>% dyEvent(d.start) %>% dyEvent(d.end) %>% add_shades(d.start, des.end, "#B3E2CD") %>% add_shades(des.end, a.start, "#FDCDAC") %>% add_shades(a.start, d.end, "#CBD5E8"),', sep='')
dyg.text <- paste(dyg.text, dyg, sep='')
}
}
y.lab <- tail(names(ts.list),1)
if(dim(ts.list[[length(ts.list)]])[2]>1) {
dyg.text <- paste(dyg.text, 'dygraph(ts.list[[length(ts.list)]], group="A", main=theMain, height=plot.height) %>% dyOptions(colors=c("grey", "',
tail(theCol, 1),
'")) %>%dyAxis("x", axisLabelFormatter=CustomAxisLabel, valueFormatter=CustomValueFormat) %>% dyAxis("y", label="',
y.lab, '") %>% dyLegend(show="never") %>% add_shades(d.start, des.end, "#B3E2CD") %>% dyEvent(d.start) %>% dyEvent(d.end) %>% add_shades(d.start, des.end, "#8DD3C7") %>% add_shades(des.end, a.start, "#FDCDAC") %>% add_shades(a.start, d.end, "#CBD5E8") %>% dyRangeSelector()))')
} else {
dyg.text <- paste(dyg.text, 'dygraph(ts.list[[length(ts.list)]], group="A", main=theMain, height=plot.height) %>% dyOptions(colors="',
tail(theCol, 1),
'") %>%dyAxis("x", axisLabelFormatter=CustomAxisLabel, valueFormatter=CustomValueFormat) %>% dyAxis("y", label="',
y.lab, '") %>% dyLegend(show="never") %>% add_shades(d.start, des.end, "#B3E2CD") %>% dyEvent(d.start) %>% dyEvent(d.end) %>% add_shades(d.start, des.end, "#8DD3C7") %>% add_shades(des.end, a.start, "#FDCDAC") %>% add_shades(a.start, d.end, "#CBD5E8") %>% dyRangeSelector()))')
}
eval(parse(text=dyg.text))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulate_data.R
\name{simulateBatchData}
\alias{simulateBatchData}
\title{Create simulated batch data for testing.}
\usage{
simulateBatchData(N = 2500, p, theta, sds, batch, zz)
}
\arguments{
\item{N}{number of observations}
\item{p}{a vector indicating probability of membership to each component}
\item{theta}{a vector of means, one per component/batch}
\item{sds}{a vector of standard deviations, one per component/batch}
\item{batch}{a vector of labels indication from which batch each simulation should come from}
\item{zz}{a vector indicating latent variable membership. Can be omitted.}
}
\value{
An object of class 'MultiBatchModel'
}
\description{
Create simulated batch data for testing.
}
\examples{
k <- 3
nbatch <- 3
means <- matrix(c(-1.2, -1.0, -0.8,
-0.2, 0, 0.2,
0.8, 1, 1.2), nbatch, k, byrow=FALSE)
sds <- matrix(0.1, nbatch, k)
N <- 1500
truth <- simulateBatchData(N=N,
batch=rep(letters[1:3], length.out=N),
theta=means,
sds=sds,
p=c(1/5, 1/3, 1-1/3-1/5))
}
|
/man/simulateBatchData.Rd
|
no_license
|
dmckean/CNPBayes
|
R
| false
| true
| 1,206
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulate_data.R
\name{simulateBatchData}
\alias{simulateBatchData}
\title{Create simulated batch data for testing.}
\usage{
simulateBatchData(N = 2500, p, theta, sds, batch, zz)
}
\arguments{
\item{N}{number of observations}
\item{p}{a vector indicating probability of membership to each component}
\item{theta}{a vector of means, one per component/batch}
\item{sds}{a vector of standard deviations, one per component/batch}
\item{batch}{a vector of labels indication from which batch each simulation should come from}
\item{zz}{a vector indicating latent variable membership. Can be omitted.}
}
\value{
An object of class 'MultiBatchModel'
}
\description{
Create simulated batch data for testing.
}
\examples{
k <- 3
nbatch <- 3
means <- matrix(c(-1.2, -1.0, -0.8,
-0.2, 0, 0.2,
0.8, 1, 1.2), nbatch, k, byrow=FALSE)
sds <- matrix(0.1, nbatch, k)
N <- 1500
truth <- simulateBatchData(N=N,
batch=rep(letters[1:3], length.out=N),
theta=means,
sds=sds,
p=c(1/5, 1/3, 1-1/3-1/5))
}
|
library(glmnet)
mydata = read.table("./TrainingSet/ReliefF/urinary_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.35,family="gaussian",standardize=TRUE)
sink('./Model/EN/ReliefF/urinary_tract/urinary_tract_047.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/ReliefF/urinary_tract/urinary_tract_047.R
|
no_license
|
leon1003/QSMART
|
R
| false
| false
| 373
|
r
|
library(glmnet)
mydata = read.table("./TrainingSet/ReliefF/urinary_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.35,family="gaussian",standardize=TRUE)
sink('./Model/EN/ReliefF/urinary_tract/urinary_tract_047.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
library(fredr)
### Name: fredr_category_related_tags
### Title: Get the related FRED tags within a category
### Aliases: fredr_category_related_tags
### ** Examples
## No test:
# First, get the tags for the "Production & Business Activity" category
fredr_category_tags(1L)
# Then, get the tags related to "business" and "monthly" for the
# "Production & Business Activity" category
fredr_category_related_tags(category_id = 1L, tag_names = "business;monthly")
## End(No test)
|
/data/genthat_extracted_code/fredr/examples/fredr_category_related_tags.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 484
|
r
|
library(fredr)
### Name: fredr_category_related_tags
### Title: Get the related FRED tags within a category
### Aliases: fredr_category_related_tags
### ** Examples
## No test:
# First, get the tags for the "Production & Business Activity" category
fredr_category_tags(1L)
# Then, get the tags related to "business" and "monthly" for the
# "Production & Business Activity" category
fredr_category_related_tags(category_id = 1L, tag_names = "business;monthly")
## End(No test)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/odds.us2dec.R
\name{odds.us2dec}
\alias{odds.us2dec}
\title{Convert US odds to Decimal odds}
\usage{
odds.us2dec(x)
}
\arguments{
\item{x}{A vector of US odds}
}
\value{
A vector of Decimal odds
}
\description{
Convert US odds to Decimal odds
}
\examples{
odds.us2dec(c(-200,150))
}
|
/man/odds.us2dec.Rd
|
no_license
|
cran/odds.converter
|
R
| false
| true
| 361
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/odds.us2dec.R
\name{odds.us2dec}
\alias{odds.us2dec}
\title{Convert US odds to Decimal odds}
\usage{
odds.us2dec(x)
}
\arguments{
\item{x}{A vector of US odds}
}
\value{
A vector of Decimal odds
}
\description{
Convert US odds to Decimal odds
}
\examples{
odds.us2dec(c(-200,150))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reactable.R
\name{reactable}
\alias{reactable}
\title{Create an interactive data table}
\usage{
reactable(data, columns = NULL, columnGroups = NULL, rownames = NULL,
groupBy = NULL, sortable = TRUE, resizable = FALSE,
filterable = FALSE, searchable = FALSE, defaultColDef = NULL,
defaultColGroup = NULL, defaultSortOrder = "asc",
defaultSorted = NULL, pagination = TRUE, defaultPageSize = 10,
showPageSizeOptions = FALSE, pageSizeOptions = c(10, 25, 50, 100),
paginationType = "numbers", showPagination = NULL,
showPageInfo = TRUE, minRows = 1, details = NULL,
selection = NULL, selectionId = NULL, defaultSelected = NULL,
onClick = NULL, highlight = FALSE, outlined = FALSE,
bordered = FALSE, borderless = FALSE, striped = FALSE,
compact = FALSE, wrap = TRUE, showSortIcon = TRUE,
showSortable = FALSE, class = NULL, style = NULL,
rowClass = NULL, rowStyle = NULL, fullWidth = TRUE,
width = "auto", height = "auto", elementId = NULL)
}
\arguments{
\item{data}{A data frame or matrix.}
\item{columns}{Named list of column definitions. See \code{\link[=colDef]{colDef()}}.}
\item{columnGroups}{List of column group definitions. See \code{\link[=colGroup]{colGroup()}}.}
\item{rownames}{Show row names? Defaults to \code{TRUE} if the data has row names.
To customize or group the row names column, use \code{".rownames"} as the
column name.}
\item{groupBy}{Character vector of column names to group by.}
\item{sortable}{Enable sorting? Defaults to \code{TRUE}.}
\item{resizable}{Enable column resizing?}
\item{filterable}{Enable column filtering?}
\item{searchable}{Enable global table searching?}
\item{defaultColDef}{Default column definition used by every column. See \code{\link[=colDef]{colDef()}}.}
\item{defaultColGroup}{Default column group definition used by every column group.
See \code{\link[=colGroup]{colGroup()}}.}
\item{defaultSortOrder}{Default sort order. Either \code{"asc"} for ascending
order or \code{"desc"} for descending order. Defaults to \code{"asc"}.}
\item{defaultSorted}{Optional vector of column names to sort by default.
Or to customize sort order, a named list with values of \code{"asc"} or \code{"desc"}.}
\item{pagination}{Enable pagination? Defaults to \code{TRUE}.}
\item{defaultPageSize}{Default page size for the table. Defaults to 10.}
\item{showPageSizeOptions}{Show page size options?}
\item{pageSizeOptions}{Page size options for the table. Defaults to 10, 25, 50, 100.}
\item{paginationType}{Pagination control to use. Either \code{"numbers"} for page
number buttons (the default), \code{"jump"} for a page jump, or \code{"simple"} to show
'Previous' and 'Next' buttons only.}
\item{showPagination}{Show pagination? Defaults to \code{TRUE} if the table has more
than one page.}
\item{showPageInfo}{Show page info? Defaults to \code{TRUE}.}
\item{minRows}{Minimum number of rows to show per page. Defaults to 1.}
\item{details}{Additional content to display when expanding a row. An R function
that takes a row index argument or a \code{\link[=JS]{JS()}} function that takes
a row info object as an argument. Can also be a \code{\link[=colDef]{colDef()}} to customize the
details expander column.}
\item{selection}{Enable row selection? Either \code{"multiple"} or \code{"single"} for
multiple or single row selection.}
\item{selectionId}{Shiny input ID for the selected rows. The selected rows are
given as a numeric vector of row indices, or \code{NULL} if no rows are selected.}
\item{defaultSelected}{A numeric vector of default selected row indices.}
\item{onClick}{Action to take when clicking a cell. Either \code{"expand"} to expand
the row, \code{"select"} to select the row, or a \code{\link[=JS]{JS()}} function that takes a
row info object, column info object, and table state object as arguments.}
\item{highlight}{Highlight table rows on hover?}
\item{outlined}{Add borders around the table?}
\item{bordered}{Add borders around the table and every cell?}
\item{borderless}{Remove inner borders from table?}
\item{striped}{Add zebra-striping to table rows?}
\item{compact}{Make tables more compact?}
\item{wrap}{Enable text wrapping? If \code{TRUE} (the default), long text will be
wrapped to multiple lines. If \code{FALSE}, text will be truncated to fit on one line.}
\item{showSortIcon}{Show a sort icon when sorting columns?}
\item{showSortable}{Show an indicator on sortable columns?}
\item{class}{Additional CSS classes to apply to the table.}
\item{style}{Inline styles to apply to the table. A named list or character string.
Note that if \code{style} is a named list, property names should be camelCased.}
\item{rowClass}{Additional CSS classes to apply to table rows. A character
string, a \code{\link[=JS]{JS()}} function that takes a row info object and table state object
as arguments, or an R function that takes a row index argument.}
\item{rowStyle}{Inline styles to apply to table rows. A named list, character
string, \code{\link[=JS]{JS()}} function that takes a row info object and table state object
as arguments, or an R function that takes a row index argument.
Note that if \code{rowStyle} is a named list, property names should be camelCased.
If \code{rowStyle} is a \code{\link[=JS]{JS()}} function, it should return a JavaScript object with
camelCased property names.}
\item{fullWidth}{Stretch the table to fill the full width of its container?
Defaults to \code{TRUE}.}
\item{width}{Width in pixels. Defaults to \code{"auto"} for automatic sizing.}
\item{height}{Height in pixels. Defaults to \code{"auto"} for automatic sizing.}
\item{elementId}{Element ID for the widget.}
}
\value{
A \code{reactable} HTML widget that can be used in R Markdown documents
and Shiny applications, or viewed from an R console.
}
\description{
\code{reactable()} creates a data table from tabular data with sorting
and pagination by default. The data table is an HTML widget that can be
used in R Markdown documents and Shiny applications, or viewed from an
R console.
}
\note{
See the \href{https://glin.github.io/reactable}{online documentation} for
additional details and examples.
}
\examples{
# Basic usage
reactable(iris)
# Grouping and aggregation
reactable(iris, groupBy = "Species", columns = list(
Sepal.Length = colDef(aggregate = "count"),
Sepal.Width = colDef(aggregate = "mean"),
Petal.Length = colDef(aggregate = "sum"),
Petal.Width = colDef(aggregate = "max")
))
# Row details
reactable(iris, details = function(index) {
htmltools::div(
"Details for row: ", index,
htmltools::tags$pre(paste(capture.output(iris[index, ]), collapse = "\\n"))
)
})
# Conditional styling
reactable(sleep, columns = list(
extra = colDef(style = function(value) {
if (value > 0) {
color <- "green"
} else if (value < 0) {
color <- "red"
} else {
color <- "#777"
}
list(color = color, fontWeight = "bold")
})
))
}
\seealso{
\code{\link[=renderReactable]{renderReactable()}} and \code{\link[=reactableOutput]{reactableOutput()}} for using reactable
in Shiny applications or interactive R Markdown documents.
}
|
/man/reactable.Rd
|
permissive
|
jienagu/reactable
|
R
| false
| true
| 7,184
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reactable.R
\name{reactable}
\alias{reactable}
\title{Create an interactive data table}
\usage{
reactable(data, columns = NULL, columnGroups = NULL, rownames = NULL,
groupBy = NULL, sortable = TRUE, resizable = FALSE,
filterable = FALSE, searchable = FALSE, defaultColDef = NULL,
defaultColGroup = NULL, defaultSortOrder = "asc",
defaultSorted = NULL, pagination = TRUE, defaultPageSize = 10,
showPageSizeOptions = FALSE, pageSizeOptions = c(10, 25, 50, 100),
paginationType = "numbers", showPagination = NULL,
showPageInfo = TRUE, minRows = 1, details = NULL,
selection = NULL, selectionId = NULL, defaultSelected = NULL,
onClick = NULL, highlight = FALSE, outlined = FALSE,
bordered = FALSE, borderless = FALSE, striped = FALSE,
compact = FALSE, wrap = TRUE, showSortIcon = TRUE,
showSortable = FALSE, class = NULL, style = NULL,
rowClass = NULL, rowStyle = NULL, fullWidth = TRUE,
width = "auto", height = "auto", elementId = NULL)
}
\arguments{
\item{data}{A data frame or matrix.}
\item{columns}{Named list of column definitions. See \code{\link[=colDef]{colDef()}}.}
\item{columnGroups}{List of column group definitions. See \code{\link[=colGroup]{colGroup()}}.}
\item{rownames}{Show row names? Defaults to \code{TRUE} if the data has row names.
To customize or group the row names column, use \code{".rownames"} as the
column name.}
\item{groupBy}{Character vector of column names to group by.}
\item{sortable}{Enable sorting? Defaults to \code{TRUE}.}
\item{resizable}{Enable column resizing?}
\item{filterable}{Enable column filtering?}
\item{searchable}{Enable global table searching?}
\item{defaultColDef}{Default column definition used by every column. See \code{\link[=colDef]{colDef()}}.}
\item{defaultColGroup}{Default column group definition used by every column group.
See \code{\link[=colGroup]{colGroup()}}.}
\item{defaultSortOrder}{Default sort order. Either \code{"asc"} for ascending
order or \code{"desc"} for descending order. Defaults to \code{"asc"}.}
\item{defaultSorted}{Optional vector of column names to sort by default.
Or to customize sort order, a named list with values of \code{"asc"} or \code{"desc"}.}
\item{pagination}{Enable pagination? Defaults to \code{TRUE}.}
\item{defaultPageSize}{Default page size for the table. Defaults to 10.}
\item{showPageSizeOptions}{Show page size options?}
\item{pageSizeOptions}{Page size options for the table. Defaults to 10, 25, 50, 100.}
\item{paginationType}{Pagination control to use. Either \code{"numbers"} for page
number buttons (the default), \code{"jump"} for a page jump, or \code{"simple"} to show
'Previous' and 'Next' buttons only.}
\item{showPagination}{Show pagination? Defaults to \code{TRUE} if the table has more
than one page.}
\item{showPageInfo}{Show page info? Defaults to \code{TRUE}.}
\item{minRows}{Minimum number of rows to show per page. Defaults to 1.}
\item{details}{Additional content to display when expanding a row. An R function
that takes a row index argument or a \code{\link[=JS]{JS()}} function that takes
a row info object as an argument. Can also be a \code{\link[=colDef]{colDef()}} to customize the
details expander column.}
\item{selection}{Enable row selection? Either \code{"multiple"} or \code{"single"} for
multiple or single row selection.}
\item{selectionId}{Shiny input ID for the selected rows. The selected rows are
given as a numeric vector of row indices, or \code{NULL} if no rows are selected.}
\item{defaultSelected}{A numeric vector of default selected row indices.}
\item{onClick}{Action to take when clicking a cell. Either \code{"expand"} to expand
the row, \code{"select"} to select the row, or a \code{\link[=JS]{JS()}} function that takes a
row info object, column info object, and table state object as arguments.}
\item{highlight}{Highlight table rows on hover?}
\item{outlined}{Add borders around the table?}
\item{bordered}{Add borders around the table and every cell?}
\item{borderless}{Remove inner borders from table?}
\item{striped}{Add zebra-striping to table rows?}
\item{compact}{Make tables more compact?}
\item{wrap}{Enable text wrapping? If \code{TRUE} (the default), long text will be
wrapped to multiple lines. If \code{FALSE}, text will be truncated to fit on one line.}
\item{showSortIcon}{Show a sort icon when sorting columns?}
\item{showSortable}{Show an indicator on sortable columns?}
\item{class}{Additional CSS classes to apply to the table.}
\item{style}{Inline styles to apply to the table. A named list or character string.
Note that if \code{style} is a named list, property names should be camelCased.}
\item{rowClass}{Additional CSS classes to apply to table rows. A character
string, a \code{\link[=JS]{JS()}} function that takes a row info object and table state object
as arguments, or an R function that takes a row index argument.}
\item{rowStyle}{Inline styles to apply to table rows. A named list, character
string, \code{\link[=JS]{JS()}} function that takes a row info object and table state object
as arguments, or an R function that takes a row index argument.
Note that if \code{rowStyle} is a named list, property names should be camelCased.
If \code{rowStyle} is a \code{\link[=JS]{JS()}} function, it should return a JavaScript object with
camelCased property names.}
\item{fullWidth}{Stretch the table to fill the full width of its container?
Defaults to \code{TRUE}.}
\item{width}{Width in pixels. Defaults to \code{"auto"} for automatic sizing.}
\item{height}{Height in pixels. Defaults to \code{"auto"} for automatic sizing.}
\item{elementId}{Element ID for the widget.}
}
\value{
A \code{reactable} HTML widget that can be used in R Markdown documents
and Shiny applications, or viewed from an R console.
}
\description{
\code{reactable()} creates a data table from tabular data with sorting
and pagination by default. The data table is an HTML widget that can be
used in R Markdown documents and Shiny applications, or viewed from an
R console.
}
\note{
See the \href{https://glin.github.io/reactable}{online documentation} for
additional details and examples.
}
\examples{
# Basic usage
reactable(iris)
# Grouping and aggregation
reactable(iris, groupBy = "Species", columns = list(
Sepal.Length = colDef(aggregate = "count"),
Sepal.Width = colDef(aggregate = "mean"),
Petal.Length = colDef(aggregate = "sum"),
Petal.Width = colDef(aggregate = "max")
))
# Row details
reactable(iris, details = function(index) {
htmltools::div(
"Details for row: ", index,
htmltools::tags$pre(paste(capture.output(iris[index, ]), collapse = "\\n"))
)
})
# Conditional styling
reactable(sleep, columns = list(
extra = colDef(style = function(value) {
if (value > 0) {
color <- "green"
} else if (value < 0) {
color <- "red"
} else {
color <- "#777"
}
list(color = color, fontWeight = "bold")
})
))
}
\seealso{
\code{\link[=renderReactable]{renderReactable()}} and \code{\link[=reactableOutput]{reactableOutput()}} for using reactable
in Shiny applications or interactive R Markdown documents.
}
|
library(TSdist)
### Name: DissimDistance
### Title: The Dissim distance is calculated.
### Aliases: DissimDistance
### ** Examples
#The objects example.series1 and example.series2 are two
#numeric series of length 100 contained in the TSdist package.
data(example.series1)
data(example.series2)
#For information on their generation and shape see help
#page of example.series.
help(example.series)
#Calculate the Dissim distance assuming even sampling:
DissimDistance(example.series1, example.series2)
#Calculate the Dissim distance assuming uneven sampling:
tx<-unique(c(seq(2, 175, 2), seq(7, 175, 7)))
tx <- tx[order(tx)]
ty <- tx
DissimDistance(example.series1, example.series2, tx, ty)
|
/data/genthat_extracted_code/TSdist/examples/DissimDistance.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 708
|
r
|
library(TSdist)
### Name: DissimDistance
### Title: The Dissim distance is calculated.
### Aliases: DissimDistance
### ** Examples
#The objects example.series1 and example.series2 are two
#numeric series of length 100 contained in the TSdist package.
data(example.series1)
data(example.series2)
#For information on their generation and shape see help
#page of example.series.
help(example.series)
#Calculate the Dissim distance assuming even sampling:
DissimDistance(example.series1, example.series2)
#Calculate the Dissim distance assuming uneven sampling:
tx<-unique(c(seq(2, 175, 2), seq(7, 175, 7)))
tx <- tx[order(tx)]
ty <- tx
DissimDistance(example.series1, example.series2, tx, ty)
|
####################################################
#
# This gives me the Rice center Data from the Pier
# ~ 8199 lines of stuff
#
####################################################
getRiceData <- function() {
suppressMessages( require(RCurl) )
suppressMessages( require(tidyverse) )
suppressMessages( require(lubridate) )
long_link <- c( "https://docs.google.com/spreadsheets/d",
"/1Mk1YGH9LqjF7drJE-td1G_JkdADOU0eMlrP01WFBT8s",
"/pub?gid=0&single=true&output=csv"
)
link <- paste(long_link, collapse="")
url <- getURL( link )
con <- textConnection( url )
rice_data <- read.csv(con)
# Format the DataTime Object
format <- "%m/%d/%Y %I:%M:%S %p"
rice_data$Date <- parse_date_time( rice_data$DateTime,
orders=format,
tz="EST" )
rice_data$DateTime <- NULL
rice_data$AirTemp <- 5/9 * (rice_data$AirTempF - 32)
rice_data$AirTempF <- NULL
rice_data$Rain <- rice_data$Rain_in * 2.54
rice_data$Rain_in <- NULL
rice_data$WindSpeed <- rice_data$WindSpeed_mph * 1.60934
rice_data$WindSpeed_mph <- NULL
rice_data$PH_mv <- NULL
rice_data$BGAPC_rfu <- NULL
rice_data$Depth_ft <- NULL
rice_data$SurfaceWaterElev_m_levelNad83m <- NULL
rice_data$Weekday <- factor( weekdays(rice_data$Date),
ordered=TRUE,
levels=c("Monday","Tuesday", "Wednesday",
"Thursday", "Friday", "Saturday", "Sunday"))
return( rice_data %>%
select( Date, Weekday, AirTemp, Rain, WindSpeed, everything() ) )
}
|
/Content/Analyses/Ordination/getRiceData.R
|
no_license
|
anhnguyendepocen/ENVS543
|
R
| false
| false
| 1,683
|
r
|
####################################################
#
# This gives me the Rice center Data from the Pier
# ~ 8199 lines of stuff
#
####################################################
getRiceData <- function() {
suppressMessages( require(RCurl) )
suppressMessages( require(tidyverse) )
suppressMessages( require(lubridate) )
long_link <- c( "https://docs.google.com/spreadsheets/d",
"/1Mk1YGH9LqjF7drJE-td1G_JkdADOU0eMlrP01WFBT8s",
"/pub?gid=0&single=true&output=csv"
)
link <- paste(long_link, collapse="")
url <- getURL( link )
con <- textConnection( url )
rice_data <- read.csv(con)
# Format the DataTime Object
format <- "%m/%d/%Y %I:%M:%S %p"
rice_data$Date <- parse_date_time( rice_data$DateTime,
orders=format,
tz="EST" )
rice_data$DateTime <- NULL
rice_data$AirTemp <- 5/9 * (rice_data$AirTempF - 32)
rice_data$AirTempF <- NULL
rice_data$Rain <- rice_data$Rain_in * 2.54
rice_data$Rain_in <- NULL
rice_data$WindSpeed <- rice_data$WindSpeed_mph * 1.60934
rice_data$WindSpeed_mph <- NULL
rice_data$PH_mv <- NULL
rice_data$BGAPC_rfu <- NULL
rice_data$Depth_ft <- NULL
rice_data$SurfaceWaterElev_m_levelNad83m <- NULL
rice_data$Weekday <- factor( weekdays(rice_data$Date),
ordered=TRUE,
levels=c("Monday","Tuesday", "Wednesday",
"Thursday", "Friday", "Saturday", "Sunday"))
return( rice_data %>%
select( Date, Weekday, AirTemp, Rain, WindSpeed, everything() ) )
}
|
ReadData <- function() {
forNames <-read.table("household_power_consumption.txt", na.strings = "?", sep = ";", colClasses = c(rep("character", 2), rep("numeric", 7)), header = TRUE, comment.char = "", nrows = 10)
data <- read.table("household_power_consumption.txt", na.strings = "?", sep = ";", colClasses = c(rep("character", 2), rep("numeric", 7)), header = TRUE, comment.char = "", skip = 66600, nrows = 3000)
names(data) <- names(forNames)
rm(forNames)
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
data <- subset(data, data$Date == as.Date("2007-02-01", format = "%Y-%m-%d") | data$Date == as.Date("2007-02-02", format = "%Y-%m-%d"))
data$Time <- strptime(data$Time, "%H:%M:%S")
data
}
if (file.exists("household_power_consumption.txt")) {
data <- ReadData()
png("plot1.png", width = 480, height = 480)
hist(data$Global_active_power, xlab = "Global Active Power (kilowatts)", main = "Global Active Power", col = "red")
dev.off()
} else {
print("Cannot locate the file household_power_consupmtion.txt in your working directory.")
print("Please located the data and extract it there.")
}
|
/plot1.R
|
no_license
|
chajadan/ExData_Plotting1
|
R
| false
| false
| 1,114
|
r
|
ReadData <- function() {
forNames <-read.table("household_power_consumption.txt", na.strings = "?", sep = ";", colClasses = c(rep("character", 2), rep("numeric", 7)), header = TRUE, comment.char = "", nrows = 10)
data <- read.table("household_power_consumption.txt", na.strings = "?", sep = ";", colClasses = c(rep("character", 2), rep("numeric", 7)), header = TRUE, comment.char = "", skip = 66600, nrows = 3000)
names(data) <- names(forNames)
rm(forNames)
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
data <- subset(data, data$Date == as.Date("2007-02-01", format = "%Y-%m-%d") | data$Date == as.Date("2007-02-02", format = "%Y-%m-%d"))
data$Time <- strptime(data$Time, "%H:%M:%S")
data
}
if (file.exists("household_power_consumption.txt")) {
data <- ReadData()
png("plot1.png", width = 480, height = 480)
hist(data$Global_active_power, xlab = "Global Active Power (kilowatts)", main = "Global Active Power", col = "red")
dev.off()
} else {
print("Cannot locate the file household_power_consupmtion.txt in your working directory.")
print("Please located the data and extract it there.")
}
|
# library(tidyverse)
# library(naniar)
# library(forcats)
# data_rec <-
# read_csv("/Users/gabrielburcea/Rprojects/data/csvdata.csv")
# nrow(distinct(data_rec))
# data_rename <- data_rec %>%
# dplyr::rename(
# id = ID,
# age = Age,
# gender = Gender,
# location = Location,
# country = Country,
# chills = Chills,
# cough = Cough,
# diarrhoea = Diarrhoea,
# fatigue = Fatigue,
# headache = Headcahe,
# pregnant = Pregnant,
# sputum = Sputum,
# temperature = Temperature,
# sneezing = Sneezing,
# loss_of_smell_and_taste = "Loss of smell and taste",
# muscle_ache = 'Muscle Ache',
# nasal_congestion = "Nasal Congestion",
# nausea_vomiting = "Nausea and Vomiting",
# self_diagnosis = "Self Diagnosis",
# shortness_breath = "Shortness of Breath",
# sore_throat = "Sore Throat",
# loss_appetite = "Loss of Appetite",
# chest_pain = "Chest Pain",
# itchy_eyes = "Itchy Eyes",
# joint_pain = "Joint Pain",
# care_home_worker = 'Care Home Worker',
# health_care_worker = "Healthcare Worker",
# date_completed = 'Date Completed',
# how_unwell = "How Unwell",
# number_days_symptom_showing = "Number Of Days Symptoms Showing",
# reason_for_help = "Reason For Helping",
# language = Language,
# health_condition = "Long Standing Health Issues"
# ) %>%
# dplyr::mutate(id = str_sub(id, -10))
# data_com <- data_rename %>%
# rowwise() %>%
# mutate(health_condition =
# str_extract_all(health_condition,
# pattern = "None|Diabetes Type 2|Asthma \\(managed with an inhaler\\)|Obesity|High Blood Pressure \\(hypertension\\)|Long-Standing Lung Condition|Long-Standing Liver Disease|Long-Standing Heart Disease|Long-Standing Kidney Disease|Diabetes Type 1 \\(controlled by insulin\\)"),
# health_condition = paste(health_condition, collapse = ","))
# data_com$health_condition <- sub("^$", "None", data_com$health_condition)
# reason_for_help_levels <-
# c(
# "negative" = "Curious",
# "negative" = "Curious,Self-Isolating With No Symptoms" ,
# "negative" = "Live With Someone With Coronavirus",
# "negative" = "Live With Someone With Coronavirus,Curious" ,
# "negative" = "Live With Someone With Coronavirus,Curious,Self-Isolating With No Symptoms",
# "negative" = "Live With Someone With Coronavirus,Self-Isolating With No Symptoms",
# "negative" = "Muscle Ache",
# "negative" = "No",
# "negative" = "None",
# "negative" = "Reason For Helping",
# "showing symptoms" = "Recovered But Have New Symptoms",
# "showing symptoms" = "Recovered But Have New Symptoms,Curious",
# "showing symptoms" = "Recovered But Have New Symptoms,Curious,Self-Isolating With No Symptoms",
# "showing symptoms" = "Recovered But Have New Symptoms,Self-Isolating With No Symptoms",
# "negative" = "Self-Isolating With No Symptoms",
# "showing symptoms" = "Showing Symptoms But Not Tested",
# "showing symptoms" = "Showing Symptoms But Not Tested,Curious",
# "showing symptoms" = "Showing Symptoms But Not Tested,Curious,Self-Isolating With No Symptoms",
# "showing symptoms" = "Showing Symptoms But Not Tested,Live With Someone With Coronavirus",
# "showing symptoms" = "Showing Symptoms But Not Tested,Live With Someone With Coronavirus,Curious",
# "showing symptoms" = "Showing Symptoms But Not Tested,Live With Someone With Coronavirus,Recovered But Have New Symptoms",
# "showing symptoms" = "Showing Symptoms But Not Tested,Recovered But Have New Symptoms",
# "showing symptoms" = "Showing Symptoms But Not Tested,Recovered But Have New Symptoms,Curious",
# "showing symptoms" = "Showing Symptoms But Not Tested,Self-Isolating With No Symptoms",
# "showing symptoms" = "Tested Negative But Have Symptoms",
# "showing symptoms" = "Tested Negative But Have Symptoms,Curious",
# "showing symptoms" = "Tested Negative But Have Symptoms,Curious,Self-Isolating With No Symptoms",
# "showing symptoms" = "Tested Negative But Have Symptoms,Live With Someone With Coronavirus",
# "showing symptoms" = "Tested Negative But Have Symptoms,Live With Someone With Coronavirus,Recovered But Have New Symptoms",
# "showing symptoms" = "Tested Negative But Have Symptoms,Self-Isolating With No Symptoms",
# "showing symptoms" = "Tested Negative But Have Symptoms,Showing Symptoms But Not Tested",
# "showing symptoms" = "Tested Negative But Have Symptoms,Showing Symptoms But Not Tested,Curious",
# "showing symptoms" = "Tested Negative But Have Symptoms,Showing Symptoms But Not Tested,Recovered But Have New Symptoms,Curious,Self-Isolating With No Symptoms",
# "positive" = "Tested Positive",
# "positive" = "Tested Positive,Curious",
# "positive" = "Tested Positive,Live With Someone With Coronavirus",
# "positive" = "Tested Positive,Recovered But Have New Symptoms",
# "positive" = "Tested Positive,Recovered But Have New Symptoms,Curious",
# "positive" = "Tested Positive,Self-Isolating With No Symptom",
# "positive" = "Tested Positive,Showing Symptoms But Not Tested",
# "positive" = "Tested Positive,Tested Negative But Have Symptoms",
# "positive" = "Tested Positive,Tested Negative But Have Symptoms,Recovered But Have New Symptoms",
# "positive" = "Tested Positive,Tested Negative But Have Symptoms,Showing Symptoms But Not Tested",
# "positive" = "Tested Positive,Tested Negative But Have Symptoms,Showing Symptoms But Not Tested,Curious",
# "positive" = "Tested Positive,Tested Negative But Have Symptoms,Showing Symptoms But Not Tested,Live With Someone With Coronavirus,Recovered But Have New Symptoms,Curious,Self-Isolating With No Symptoms",
# "positive" = "Tested Positive,Tested Negative But Have Symptoms,Showing Symptoms But Not Tested,Recovered But Have New Symptoms,Curious,Self-Isolating With No Symptom)",
# "positive" = "Tested Positive,Tested Negative But Have Symptoms,Showing Symptoms But Not Tested,Recovered But Have New Symptoms,Curious,Self-Isolating With No Symptoms",
# "positive" = "Tested Positive,Self-Isolating With No Symptoms"
# )
# data_comorb <- data_com %>%
# tidyr::separate(
# health_condition,
# c(
# 'Comorbidity_one',
# 'Comorbidity_two',
# 'Comorbidity_three',
# 'Comorbidity_four',
# 'Comorbidity_five',
# 'Comorbidity_six',
# 'Comorbidity_seven',
# 'Comorbidity_eight',
# 'Comorbidity_nine'
# ),
# sep = ","
# )
# data_comorb %>% distinct(Comorbidity_one)
# data_comorb %>% distinct(Comorbidity_two)
# data_comorb %>% distinct(Comorbidity_three)
# data_comorb %>% distinct(Comorbidity_four)
# data_comorb %>% distinct(Comorbidity_five)
# data_comorb %>% distinct(Comorbidity_six)
# data_comorb %>% distinct(Comorbidity_seven)
# data_comorb %>% distinct(Comorbidity_eight)
# data_comorb %>% distinct(Comorbidity_nine)
# data_long_wid <- data_comorb %>%
# tidyr::pivot_longer(cols = starts_with('Comorbidity'),
# names_to = 'Comorbidity_count',
# values_to = 'Comorbidity') %>%
# naniar::replace_with_na(replace = list(Comorbidity = "None")) %>%
# distinct() %>%
# dplyr::group_by(id) %>%
# dplyr::mutate(number_morbidities = sum(!is.na(Comorbidity))) %>%
# tidyr::drop_na('Comorbidity') %>%
# dplyr::select(-Comorbidity_count) %>%
# dplyr::distinct() %>%
# dplyr::mutate(Condition = 'Yes') %>%
# tidyr::pivot_wider(names_from = Comorbidity, values_from = Condition, values_fill = list(Condition = 'No'))
#
# df_comorbidity_unique <- distinct(data_comorb, id, .keep_all = TRUE)
# ###########################################################
# ### Get patients without multiple comorbidities but also count the number of patients
# make sure count on unique number of patients #########
# ##########################################################
# data_c <- df_comorbidity_unique %>%
# tidyr::pivot_longer(cols = starts_with('Comorbidity'),
# names_to = 'Comorbidity_count',
# values_to = 'Comorbidity') %>%
# dplyr::mutate(Comorbidity = if_else(Comorbidity == "None", NA_character_, Comorbidity)) %>%
# distinct() %>%
# dplyr::group_by(id) %>%
# mutate(number_morbidities = sum(!is.na(Comorbidity)))
#
# data_unique_comorb <- data_c %>%
# tidyr::pivot_longer(cols = starts_with('Comorbidity'),
# names_to = 'Comorbidity_count',
# values_to = 'Comorbidity') %>%
# tidyr::drop_na('Comorbidity') %>%
# dplyr::select(-Comorbidity_count) %>%
# dplyr::distinct() %>%
# dplyr::mutate(Condition = 'Yes') %>%
# tidyr::pivot_wider(id_cols = -c(Comorbidity, Condition), names_from = Comorbidity, values_from = Condition, values_fill = list(Condition = 'No')) %>%
# dplyr::select(-Comorbidity_one)
# # # # #################################################
# # # # ######### Get a numeric dataset #################
# # # # #################################################
# data_model <- data_unique_comorb %>% # here make sure the dataset is ritght - either patients with multiple comorbidities or patients without multitple comorbidties
# dplyr::mutate(covid_tested = forcats::fct_recode(reason_for_help, !!!reason_for_help_levels)) %>%
# dplyr::select(-reason_for_help)
# data_model$gender <- as.factor(data_model$gender)
# data_model$country <- as.factor(data_model$country)
# data_model$chills <- as.factor(data_model$chills)
# data_model$cough <- as.factor(data_model$cough)
# data_model$diarrhoea <- as.factor(data_model$diarrhoea)
# data_model$fatigue <- as.factor(data_model$fatigue)
# data_model$headache <- as.factor(data_model$headache)
# data_model$loss_of_smell_and_taste <- as.factor(data_model$loss_of_smell_and_taste)
# data_model$muscle_ache <- as.factor(data_model$muscle_ache)
# data_model$nasal_congestion <- as.factor(data_model$nasal_congestion)
# data_model$nausea_vomiting <- as.factor(data_model$nausea_vomiting)
# data_model$self_diagnosis <- as.factor(data_model$self_diagnosis)
# data_model$shortness_breath <- as.factor(data_model$shortness_breath)
# data_model$sore_throat <- as.factor(data_model$sore_throat)
# data_model$sputum <- as.factor(data_model$sputum)
# data_model$temperature <- as.factor(data_model$temperature)
# data_model$health_care_worker <- as.factor(data_model$health_care_worker)
# data_model$care_home_worker <- as.factor(data_model$care_home_worker)
# data_model$asthma <- as.factor(data_model$`Asthma (managed with an inhaler)`)
# data_model$diabetes_type_two <- as.factor(data_model$`Diabetes Type 2`)
# data_model$obesity <- as.factor(data_model$Obesity)
# data_model$hypertension <- as.factor(data_model$`High Blood Pressure (hypertension)`)
# data_model$heart_disease <- as.factor(data_model$`Long-Standing Heart Disease`)
# data_model$kidney_disease <- as.factor(data_model$`Long-Standing Kidney Disease`)
# data_model$lung_condition <- as.factor(data_model$`Long-Standing Lung Condition`)
# data_model$liver_disease <- as.factor(data_model$`Long-Standing Liver Disease`)
# data_model$diabetes_type_one <- as.factor(data_model$`Diabetes Type 1 (controlled by insulin)`)
# data_model$how_unwell <- as.factor(data_model$how_unwell)
# data_model$covid_tested <- as.factor(data_model$covid_tested)
# data_model$id <- as.character(data_model$id)
# data_model$age <- as.numeric(data_model$age)
# data_model$gender <- as.factor(data_model$gender)
# data_model$pregnant <- as.factor(data_model$pregnant)
# data_model$date_completed <- as.Date(data_model$date_completed)
# data_model$location <- as.factor(data_model$location)
# data_model$loss_appetite <- as.factor(data_model$loss_appetite)
# data_model$sneezing <- as.factor(data_model$sneezing)
# data_model$chest_pain <- as.factor(data_model$chest_pain)
# data_model$itchy_eyes <- as.factor(data_model$itchy_eyes)
# data_model$joint_pain <- as.factor(data_model$joint_pain)
# # # #### Refactor the levels ##################################################
# data_sel <- data_model %>% # here make sure the dataset is ritght - either patients with multiple comorbidities or patients without multitple comorbidties
# dplyr::select(
# id,
# covid_tested,
# age,
# gender,
# country,
# location,
# date_completed,
# care_home_worker,
# chills,
# cough,
# diarrhoea,
# fatigue,
# headache,
# health_care_worker,
# how_unwell,
# loss_of_smell_and_taste,
# muscle_ache,
# nasal_congestion,
# nausea_vomiting,
# number_days_symptom_showing,
# pregnant,
# self_diagnosis,
# shortness_breath,
# sore_throat,
# sputum,
# temperature,
# language,
# loss_appetite,
# sneezing,
# chest_pain,
# itchy_eyes,
# joint_pain,
# covid_tested,
# asthma,
# diabetes_type_one,
# diabetes_type_two,
# obesity,
# hypertension,
# heart_disease,
# lung_condition,
# liver_disease,
# kidney_disease,
# number_morbidities
# )
# data_sel %>% distinct(cough)
#
# data_model %>% distinct(chills)
#
# level_key_chills <-
# c( 'Yes' = "Chills",
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe",
# 'No' = "Location",
# 'No' = "Gender")
# level_key_cough <-
# c( 'Yes' = "Cough",
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe",
# 'Yes' = 'Cough')
# data_sel %>% distinct(diarrhoea)
# level_key_diarrhoea <-
# c(
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe",
# 'No' = "Diarrhoea",
# 'No' ='Country')
# data_sel %>% distinct(fatigue)
# level_key_fatigue <-
# c( 'No' = 'Date Completed',
# 'No' = "Fatigue",
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe"
# )
# data_sel %>% distinct(headache)
# level_key_headache <-
# c('No' = 'Headache',
# 'No' = 'Care Home Worker',
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe",
# 'Yes' = "Headcahe")
# data_sel %>% distinct(loss_of_smell_and_taste)
# level_key_loss_smell_taste <-
# c( 'No' = "Loss of smell and taste",
# 'No' = 'Brazil',
# 'No' = 'Peru',
# 'No' = 'Fatigue',
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe")
# data_sel %>% distinct(muscle_ache)
# level_key_muschle_ache <-
# c( 'No' = "Muscle Ache",
# 'No' = "2020-08-04 17:11:00",
# 'No' = "2020-08-02 01:55:00",
# 'No' = "2020-08-01 15:11:00",
# 'No' = "Headcahe",
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe")
# data_sel %>% distinct(nasal_congestion)
# level_key_nasal_congestion <-
# c( 'No' = "Healthcare Worker",
# 'Yes' = "Nasal Congestion",
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe")
# data_sel %>% distinct(nausea_vomiting)
# level_key_nausea_vomiting <-
# c( 'No' = "How Unwell",
# 'Yes' = "Nausea and Vomiting",
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe")
# data_sel %>% distinct(self_diagnosis)
# level_key_self_diagnosis <-
# c( 'No' = "None",
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe",
# 'No' = "Self Diagnosis",
# 'No' = "Nasal Congestion")
# data_sel %>% distinct(shortness_breath)
# level_key_short_breath <-
# c( 'No' = "Nausea and Vomiting",
# 'Yes' = "Shortness of Breath",
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe")
# data_sel %>% distinct(sore_throat)
# level_key_sore_throat <-
# c( 'No' = "Number Of Days Symptoms Showing",
# 'No' = '1',
# 'No' = "Sore Throat",
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe")
# data_sel %>% distinct(sputum)
# level_key_sputum <-
# c( 'No' = "None",
# 'No' = "Long-Standing Lung Condition",
# 'No' = "Pregnant",
# "No" = 'High Blood Pressure (hypertension),No,No,Yes,No,2,No,Showing Symptoms But Not Tested,Mild,No,No,No,No,Portuguese,No, No,No,No,No\n380a7278-700f-441f-9c7c-6013e80f2f78,62,Male,"Cd. Madero Tamaulipas',
# 'Yes' = "Sputum",
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe")
# level_key_care_home_worker <-
# c(
# 'No' = 'Care Home Worker',
# 'No' = 'Age')
# data_sel %>% distinct(temperature)
# level_key_temperature <-
# c('No' = 'Temperature',
# 'No' = "Mexico",
# 'No' = "Reason For Helping",
# 'Yes' = "37.5-38",
# 'Yes' = "38.1-39",
# 'Yes' = "39.1-41",
# 'Yes' = "38.2-39")
# data_sel %>% distinct(loss_appetite)
# level_key_loss_appetite <-
# c('No' = "Loss of Appetite",
# 'No' = "Shortness of Breath")
# data_sel %>% distinct(sneezing)
# level_key_sneezing <-
# c('No' = "Sneezing",
# 'No' = "Sore Throat")
# data_sel %>% distinct(chest_pain)
# level_key_chest_pain <-
# c('No' = 'Chest Pain',
# 'No' = '0',
# 'No' = 'Sputum')
# data_sel %>% distinct(itchy_eyes)
# level_key_itchy_eyes <-
# c('No' = "Itchy Eyes",
# 'No' = "Temperature")
# data_sel %>% distinct(joint_pain)
# level_key_joint_pain <-
# c('No' = 'Joint Pain',
# 'No' = "Showing Symptoms But Not Tested",
# 'No' = "Language")
# itchy_eyes_t <- table(data_sel$itchy_eyes)
# # # #### Refactor the levels ##################################################
# data_categ_nosev <- data_sel %>%
# dplyr::mutate(chills = forcats::fct_recode(chills, !!!level_key_chills),
# cough = forcats::fct_recode(cough, !!!level_key_cough),
# diarrhoea = forcats::fct_recode(diarrhoea, !!!level_key_diarrhoea),
# fatigue = forcats::fct_recode(fatigue, !!!level_key_fatigue),
# headache = forcats::fct_recode(headache, !!!level_key_headache),
# loss_smell_taste = forcats::fct_recode(loss_of_smell_and_taste, !!!level_key_loss_smell_taste),
# muscle_ache = forcats::fct_recode(muscle_ache, !!!level_key_muschle_ache),
# nasal_congestion = forcats::fct_recode(nasal_congestion, !!!level_key_nasal_congestion),
# nausea_vomiting = forcats::fct_recode(nausea_vomiting, !!!level_key_nausea_vomiting),
# self_diagnosis = forcats::fct_recode(self_diagnosis, !!!level_key_self_diagnosis),
# shortness_breath = forcats::fct_recode(shortness_breath, !!!level_key_short_breath),
# sore_throat = forcats::fct_recode(sore_throat, !!!level_key_sore_throat),
# sputum = forcats::fct_recode(sputum, !!!level_key_sputum),
# temperature = forcats::fct_recode(temperature, !!!level_key_temperature),
# loss_appetite = forcats::fct_recode(loss_appetite, !!!level_key_loss_appetite),
# sneezing = forcats::fct_recode(sneezing, !!!level_key_sneezing),
# chest_pain = forcats::fct_recode(chest_pain, !!!level_key_chest_pain),
# itchy_eyes = forcats::fct_recode(itchy_eyes, !!!level_key_itchy_eyes),
# joint_pain = forcats::fct_recode(joint_pain, !!!level_key_joint_pain))
# sputum_lev <-table(data_categ_nosev$sputum)
# data_categ_nosev_comorbidity_one <- data_categ_nosev
# write.csv(data_categ_nosev_comorbidity_one, file = "/Users/gabrielburcea/Rprojects/stats_data_whole/cleaned_data_18_08_2020.csv", row.names = FALSE)
|
/R/cleaning_scripts/Run_this_first_18.R
|
no_license
|
gabrielburcea/cvindia
|
R
| false
| false
| 19,423
|
r
|
# library(tidyverse)
# library(naniar)
# library(forcats)
# data_rec <-
# read_csv("/Users/gabrielburcea/Rprojects/data/csvdata.csv")
# nrow(distinct(data_rec))
# data_rename <- data_rec %>%
# dplyr::rename(
# id = ID,
# age = Age,
# gender = Gender,
# location = Location,
# country = Country,
# chills = Chills,
# cough = Cough,
# diarrhoea = Diarrhoea,
# fatigue = Fatigue,
# headache = Headcahe,
# pregnant = Pregnant,
# sputum = Sputum,
# temperature = Temperature,
# sneezing = Sneezing,
# loss_of_smell_and_taste = "Loss of smell and taste",
# muscle_ache = 'Muscle Ache',
# nasal_congestion = "Nasal Congestion",
# nausea_vomiting = "Nausea and Vomiting",
# self_diagnosis = "Self Diagnosis",
# shortness_breath = "Shortness of Breath",
# sore_throat = "Sore Throat",
# loss_appetite = "Loss of Appetite",
# chest_pain = "Chest Pain",
# itchy_eyes = "Itchy Eyes",
# joint_pain = "Joint Pain",
# care_home_worker = 'Care Home Worker',
# health_care_worker = "Healthcare Worker",
# date_completed = 'Date Completed',
# how_unwell = "How Unwell",
# number_days_symptom_showing = "Number Of Days Symptoms Showing",
# reason_for_help = "Reason For Helping",
# language = Language,
# health_condition = "Long Standing Health Issues"
# ) %>%
# dplyr::mutate(id = str_sub(id, -10))
# data_com <- data_rename %>%
# rowwise() %>%
# mutate(health_condition =
# str_extract_all(health_condition,
# pattern = "None|Diabetes Type 2|Asthma \\(managed with an inhaler\\)|Obesity|High Blood Pressure \\(hypertension\\)|Long-Standing Lung Condition|Long-Standing Liver Disease|Long-Standing Heart Disease|Long-Standing Kidney Disease|Diabetes Type 1 \\(controlled by insulin\\)"),
# health_condition = paste(health_condition, collapse = ","))
# data_com$health_condition <- sub("^$", "None", data_com$health_condition)
# reason_for_help_levels <-
# c(
# "negative" = "Curious",
# "negative" = "Curious,Self-Isolating With No Symptoms" ,
# "negative" = "Live With Someone With Coronavirus",
# "negative" = "Live With Someone With Coronavirus,Curious" ,
# "negative" = "Live With Someone With Coronavirus,Curious,Self-Isolating With No Symptoms",
# "negative" = "Live With Someone With Coronavirus,Self-Isolating With No Symptoms",
# "negative" = "Muscle Ache",
# "negative" = "No",
# "negative" = "None",
# "negative" = "Reason For Helping",
# "showing symptoms" = "Recovered But Have New Symptoms",
# "showing symptoms" = "Recovered But Have New Symptoms,Curious",
# "showing symptoms" = "Recovered But Have New Symptoms,Curious,Self-Isolating With No Symptoms",
# "showing symptoms" = "Recovered But Have New Symptoms,Self-Isolating With No Symptoms",
# "negative" = "Self-Isolating With No Symptoms",
# "showing symptoms" = "Showing Symptoms But Not Tested",
# "showing symptoms" = "Showing Symptoms But Not Tested,Curious",
# "showing symptoms" = "Showing Symptoms But Not Tested,Curious,Self-Isolating With No Symptoms",
# "showing symptoms" = "Showing Symptoms But Not Tested,Live With Someone With Coronavirus",
# "showing symptoms" = "Showing Symptoms But Not Tested,Live With Someone With Coronavirus,Curious",
# "showing symptoms" = "Showing Symptoms But Not Tested,Live With Someone With Coronavirus,Recovered But Have New Symptoms",
# "showing symptoms" = "Showing Symptoms But Not Tested,Recovered But Have New Symptoms",
# "showing symptoms" = "Showing Symptoms But Not Tested,Recovered But Have New Symptoms,Curious",
# "showing symptoms" = "Showing Symptoms But Not Tested,Self-Isolating With No Symptoms",
# "showing symptoms" = "Tested Negative But Have Symptoms",
# "showing symptoms" = "Tested Negative But Have Symptoms,Curious",
# "showing symptoms" = "Tested Negative But Have Symptoms,Curious,Self-Isolating With No Symptoms",
# "showing symptoms" = "Tested Negative But Have Symptoms,Live With Someone With Coronavirus",
# "showing symptoms" = "Tested Negative But Have Symptoms,Live With Someone With Coronavirus,Recovered But Have New Symptoms",
# "showing symptoms" = "Tested Negative But Have Symptoms,Self-Isolating With No Symptoms",
# "showing symptoms" = "Tested Negative But Have Symptoms,Showing Symptoms But Not Tested",
# "showing symptoms" = "Tested Negative But Have Symptoms,Showing Symptoms But Not Tested,Curious",
# "showing symptoms" = "Tested Negative But Have Symptoms,Showing Symptoms But Not Tested,Recovered But Have New Symptoms,Curious,Self-Isolating With No Symptoms",
# "positive" = "Tested Positive",
# "positive" = "Tested Positive,Curious",
# "positive" = "Tested Positive,Live With Someone With Coronavirus",
# "positive" = "Tested Positive,Recovered But Have New Symptoms",
# "positive" = "Tested Positive,Recovered But Have New Symptoms,Curious",
# "positive" = "Tested Positive,Self-Isolating With No Symptom",
# "positive" = "Tested Positive,Showing Symptoms But Not Tested",
# "positive" = "Tested Positive,Tested Negative But Have Symptoms",
# "positive" = "Tested Positive,Tested Negative But Have Symptoms,Recovered But Have New Symptoms",
# "positive" = "Tested Positive,Tested Negative But Have Symptoms,Showing Symptoms But Not Tested",
# "positive" = "Tested Positive,Tested Negative But Have Symptoms,Showing Symptoms But Not Tested,Curious",
# "positive" = "Tested Positive,Tested Negative But Have Symptoms,Showing Symptoms But Not Tested,Live With Someone With Coronavirus,Recovered But Have New Symptoms,Curious,Self-Isolating With No Symptoms",
# "positive" = "Tested Positive,Tested Negative But Have Symptoms,Showing Symptoms But Not Tested,Recovered But Have New Symptoms,Curious,Self-Isolating With No Symptom)",
# "positive" = "Tested Positive,Tested Negative But Have Symptoms,Showing Symptoms But Not Tested,Recovered But Have New Symptoms,Curious,Self-Isolating With No Symptoms",
# "positive" = "Tested Positive,Self-Isolating With No Symptoms"
# )
# data_comorb <- data_com %>%
# tidyr::separate(
# health_condition,
# c(
# 'Comorbidity_one',
# 'Comorbidity_two',
# 'Comorbidity_three',
# 'Comorbidity_four',
# 'Comorbidity_five',
# 'Comorbidity_six',
# 'Comorbidity_seven',
# 'Comorbidity_eight',
# 'Comorbidity_nine'
# ),
# sep = ","
# )
# data_comorb %>% distinct(Comorbidity_one)
# data_comorb %>% distinct(Comorbidity_two)
# data_comorb %>% distinct(Comorbidity_three)
# data_comorb %>% distinct(Comorbidity_four)
# data_comorb %>% distinct(Comorbidity_five)
# data_comorb %>% distinct(Comorbidity_six)
# data_comorb %>% distinct(Comorbidity_seven)
# data_comorb %>% distinct(Comorbidity_eight)
# data_comorb %>% distinct(Comorbidity_nine)
# data_long_wid <- data_comorb %>%
# tidyr::pivot_longer(cols = starts_with('Comorbidity'),
# names_to = 'Comorbidity_count',
# values_to = 'Comorbidity') %>%
# naniar::replace_with_na(replace = list(Comorbidity = "None")) %>%
# distinct() %>%
# dplyr::group_by(id) %>%
# dplyr::mutate(number_morbidities = sum(!is.na(Comorbidity))) %>%
# tidyr::drop_na('Comorbidity') %>%
# dplyr::select(-Comorbidity_count) %>%
# dplyr::distinct() %>%
# dplyr::mutate(Condition = 'Yes') %>%
# tidyr::pivot_wider(names_from = Comorbidity, values_from = Condition, values_fill = list(Condition = 'No'))
#
# df_comorbidity_unique <- distinct(data_comorb, id, .keep_all = TRUE)
# ###########################################################
# ### Get patients without multiple comorbidities but also count the number of patients
# make sure count on unique number of patients #########
# ##########################################################
# data_c <- df_comorbidity_unique %>%
# tidyr::pivot_longer(cols = starts_with('Comorbidity'),
# names_to = 'Comorbidity_count',
# values_to = 'Comorbidity') %>%
# dplyr::mutate(Comorbidity = if_else(Comorbidity == "None", NA_character_, Comorbidity)) %>%
# distinct() %>%
# dplyr::group_by(id) %>%
# mutate(number_morbidities = sum(!is.na(Comorbidity)))
#
# data_unique_comorb <- data_c %>%
# tidyr::pivot_longer(cols = starts_with('Comorbidity'),
# names_to = 'Comorbidity_count',
# values_to = 'Comorbidity') %>%
# tidyr::drop_na('Comorbidity') %>%
# dplyr::select(-Comorbidity_count) %>%
# dplyr::distinct() %>%
# dplyr::mutate(Condition = 'Yes') %>%
# tidyr::pivot_wider(id_cols = -c(Comorbidity, Condition), names_from = Comorbidity, values_from = Condition, values_fill = list(Condition = 'No')) %>%
# dplyr::select(-Comorbidity_one)
# # # # #################################################
# # # # ######### Get a numeric dataset #################
# # # # #################################################
# data_model <- data_unique_comorb %>% # here make sure the dataset is ritght - either patients with multiple comorbidities or patients without multitple comorbidties
# dplyr::mutate(covid_tested = forcats::fct_recode(reason_for_help, !!!reason_for_help_levels)) %>%
# dplyr::select(-reason_for_help)
# data_model$gender <- as.factor(data_model$gender)
# data_model$country <- as.factor(data_model$country)
# data_model$chills <- as.factor(data_model$chills)
# data_model$cough <- as.factor(data_model$cough)
# data_model$diarrhoea <- as.factor(data_model$diarrhoea)
# data_model$fatigue <- as.factor(data_model$fatigue)
# data_model$headache <- as.factor(data_model$headache)
# data_model$loss_of_smell_and_taste <- as.factor(data_model$loss_of_smell_and_taste)
# data_model$muscle_ache <- as.factor(data_model$muscle_ache)
# data_model$nasal_congestion <- as.factor(data_model$nasal_congestion)
# data_model$nausea_vomiting <- as.factor(data_model$nausea_vomiting)
# data_model$self_diagnosis <- as.factor(data_model$self_diagnosis)
# data_model$shortness_breath <- as.factor(data_model$shortness_breath)
# data_model$sore_throat <- as.factor(data_model$sore_throat)
# data_model$sputum <- as.factor(data_model$sputum)
# data_model$temperature <- as.factor(data_model$temperature)
# data_model$health_care_worker <- as.factor(data_model$health_care_worker)
# data_model$care_home_worker <- as.factor(data_model$care_home_worker)
# data_model$asthma <- as.factor(data_model$`Asthma (managed with an inhaler)`)
# data_model$diabetes_type_two <- as.factor(data_model$`Diabetes Type 2`)
# data_model$obesity <- as.factor(data_model$Obesity)
# data_model$hypertension <- as.factor(data_model$`High Blood Pressure (hypertension)`)
# data_model$heart_disease <- as.factor(data_model$`Long-Standing Heart Disease`)
# data_model$kidney_disease <- as.factor(data_model$`Long-Standing Kidney Disease`)
# data_model$lung_condition <- as.factor(data_model$`Long-Standing Lung Condition`)
# data_model$liver_disease <- as.factor(data_model$`Long-Standing Liver Disease`)
# data_model$diabetes_type_one <- as.factor(data_model$`Diabetes Type 1 (controlled by insulin)`)
# data_model$how_unwell <- as.factor(data_model$how_unwell)
# data_model$covid_tested <- as.factor(data_model$covid_tested)
# data_model$id <- as.character(data_model$id)
# data_model$age <- as.numeric(data_model$age)
# data_model$gender <- as.factor(data_model$gender)
# data_model$pregnant <- as.factor(data_model$pregnant)
# data_model$date_completed <- as.Date(data_model$date_completed)
# data_model$location <- as.factor(data_model$location)
# data_model$loss_appetite <- as.factor(data_model$loss_appetite)
# data_model$sneezing <- as.factor(data_model$sneezing)
# data_model$chest_pain <- as.factor(data_model$chest_pain)
# data_model$itchy_eyes <- as.factor(data_model$itchy_eyes)
# data_model$joint_pain <- as.factor(data_model$joint_pain)
# # # #### Refactor the levels ##################################################
# data_sel <- data_model %>% # here make sure the dataset is ritght - either patients with multiple comorbidities or patients without multitple comorbidties
# dplyr::select(
# id,
# covid_tested,
# age,
# gender,
# country,
# location,
# date_completed,
# care_home_worker,
# chills,
# cough,
# diarrhoea,
# fatigue,
# headache,
# health_care_worker,
# how_unwell,
# loss_of_smell_and_taste,
# muscle_ache,
# nasal_congestion,
# nausea_vomiting,
# number_days_symptom_showing,
# pregnant,
# self_diagnosis,
# shortness_breath,
# sore_throat,
# sputum,
# temperature,
# language,
# loss_appetite,
# sneezing,
# chest_pain,
# itchy_eyes,
# joint_pain,
# covid_tested,
# asthma,
# diabetes_type_one,
# diabetes_type_two,
# obesity,
# hypertension,
# heart_disease,
# lung_condition,
# liver_disease,
# kidney_disease,
# number_morbidities
# )
# data_sel %>% distinct(cough)
#
# data_model %>% distinct(chills)
#
# level_key_chills <-
# c( 'Yes' = "Chills",
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe",
# 'No' = "Location",
# 'No' = "Gender")
# level_key_cough <-
# c( 'Yes' = "Cough",
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe",
# 'Yes' = 'Cough')
# data_sel %>% distinct(diarrhoea)
# level_key_diarrhoea <-
# c(
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe",
# 'No' = "Diarrhoea",
# 'No' ='Country')
# data_sel %>% distinct(fatigue)
# level_key_fatigue <-
# c( 'No' = 'Date Completed',
# 'No' = "Fatigue",
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe"
# )
# data_sel %>% distinct(headache)
# level_key_headache <-
# c('No' = 'Headache',
# 'No' = 'Care Home Worker',
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe",
# 'Yes' = "Headcahe")
# data_sel %>% distinct(loss_of_smell_and_taste)
# level_key_loss_smell_taste <-
# c( 'No' = "Loss of smell and taste",
# 'No' = 'Brazil',
# 'No' = 'Peru',
# 'No' = 'Fatigue',
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe")
# data_sel %>% distinct(muscle_ache)
# level_key_muschle_ache <-
# c( 'No' = "Muscle Ache",
# 'No' = "2020-08-04 17:11:00",
# 'No' = "2020-08-02 01:55:00",
# 'No' = "2020-08-01 15:11:00",
# 'No' = "Headcahe",
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe")
# data_sel %>% distinct(nasal_congestion)
# level_key_nasal_congestion <-
# c( 'No' = "Healthcare Worker",
# 'Yes' = "Nasal Congestion",
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe")
# data_sel %>% distinct(nausea_vomiting)
# level_key_nausea_vomiting <-
# c( 'No' = "How Unwell",
# 'Yes' = "Nausea and Vomiting",
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe")
# data_sel %>% distinct(self_diagnosis)
# level_key_self_diagnosis <-
# c( 'No' = "None",
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe",
# 'No' = "Self Diagnosis",
# 'No' = "Nasal Congestion")
# data_sel %>% distinct(shortness_breath)
# level_key_short_breath <-
# c( 'No' = "Nausea and Vomiting",
# 'Yes' = "Shortness of Breath",
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe")
# data_sel %>% distinct(sore_throat)
# level_key_sore_throat <-
# c( 'No' = "Number Of Days Symptoms Showing",
# 'No' = '1',
# 'No' = "Sore Throat",
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe")
# data_sel %>% distinct(sputum)
# level_key_sputum <-
# c( 'No' = "None",
# 'No' = "Long-Standing Lung Condition",
# 'No' = "Pregnant",
# "No" = 'High Blood Pressure (hypertension),No,No,Yes,No,2,No,Showing Symptoms But Not Tested,Mild,No,No,No,No,Portuguese,No, No,No,No,No\n380a7278-700f-441f-9c7c-6013e80f2f78,62,Male,"Cd. Madero Tamaulipas',
# 'Yes' = "Sputum",
# 'Yes' = "Mild",
# 'Yes' = "Moderate",
# 'Yes' = "Severe")
# level_key_care_home_worker <-
# c(
# 'No' = 'Care Home Worker',
# 'No' = 'Age')
# data_sel %>% distinct(temperature)
# level_key_temperature <-
# c('No' = 'Temperature',
# 'No' = "Mexico",
# 'No' = "Reason For Helping",
# 'Yes' = "37.5-38",
# 'Yes' = "38.1-39",
# 'Yes' = "39.1-41",
# 'Yes' = "38.2-39")
# data_sel %>% distinct(loss_appetite)
# level_key_loss_appetite <-
# c('No' = "Loss of Appetite",
# 'No' = "Shortness of Breath")
# data_sel %>% distinct(sneezing)
# level_key_sneezing <-
# c('No' = "Sneezing",
# 'No' = "Sore Throat")
# data_sel %>% distinct(chest_pain)
# level_key_chest_pain <-
# c('No' = 'Chest Pain',
# 'No' = '0',
# 'No' = 'Sputum')
# data_sel %>% distinct(itchy_eyes)
# level_key_itchy_eyes <-
# c('No' = "Itchy Eyes",
# 'No' = "Temperature")
# data_sel %>% distinct(joint_pain)
# level_key_joint_pain <-
# c('No' = 'Joint Pain',
# 'No' = "Showing Symptoms But Not Tested",
# 'No' = "Language")
# itchy_eyes_t <- table(data_sel$itchy_eyes)
# # # #### Refactor the levels ##################################################
# data_categ_nosev <- data_sel %>%
# dplyr::mutate(chills = forcats::fct_recode(chills, !!!level_key_chills),
# cough = forcats::fct_recode(cough, !!!level_key_cough),
# diarrhoea = forcats::fct_recode(diarrhoea, !!!level_key_diarrhoea),
# fatigue = forcats::fct_recode(fatigue, !!!level_key_fatigue),
# headache = forcats::fct_recode(headache, !!!level_key_headache),
# loss_smell_taste = forcats::fct_recode(loss_of_smell_and_taste, !!!level_key_loss_smell_taste),
# muscle_ache = forcats::fct_recode(muscle_ache, !!!level_key_muschle_ache),
# nasal_congestion = forcats::fct_recode(nasal_congestion, !!!level_key_nasal_congestion),
# nausea_vomiting = forcats::fct_recode(nausea_vomiting, !!!level_key_nausea_vomiting),
# self_diagnosis = forcats::fct_recode(self_diagnosis, !!!level_key_self_diagnosis),
# shortness_breath = forcats::fct_recode(shortness_breath, !!!level_key_short_breath),
# sore_throat = forcats::fct_recode(sore_throat, !!!level_key_sore_throat),
# sputum = forcats::fct_recode(sputum, !!!level_key_sputum),
# temperature = forcats::fct_recode(temperature, !!!level_key_temperature),
# loss_appetite = forcats::fct_recode(loss_appetite, !!!level_key_loss_appetite),
# sneezing = forcats::fct_recode(sneezing, !!!level_key_sneezing),
# chest_pain = forcats::fct_recode(chest_pain, !!!level_key_chest_pain),
# itchy_eyes = forcats::fct_recode(itchy_eyes, !!!level_key_itchy_eyes),
# joint_pain = forcats::fct_recode(joint_pain, !!!level_key_joint_pain))
# sputum_lev <-table(data_categ_nosev$sputum)
# data_categ_nosev_comorbidity_one <- data_categ_nosev
# write.csv(data_categ_nosev_comorbidity_one, file = "/Users/gabrielburcea/Rprojects/stats_data_whole/cleaned_data_18_08_2020.csv", row.names = FALSE)
|
testlist <- list(x = structure(c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(9L, 9L)))
result <- do.call(multivariance:::doubleCenterBiasCorrected,testlist)
str(result)
|
/multivariance/inst/testfiles/doubleCenterBiasCorrected/libFuzzer_doubleCenterBiasCorrected/doubleCenterBiasCorrected_valgrind_files/1612734888-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 381
|
r
|
testlist <- list(x = structure(c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(9L, 9L)))
result <- do.call(multivariance:::doubleCenterBiasCorrected,testlist)
str(result)
|
library(VGAM)
### Name: identitylink
### Title: Identity Link Function
### Aliases: identitylink negidentity
### Keywords: math models regression
### ** Examples
identitylink((-5):5)
identitylink((-5):5, deriv = 1)
identitylink((-5):5, deriv = 2)
negidentity((-5):5)
negidentity((-5):5, deriv = 1)
negidentity((-5):5, deriv = 2)
|
/data/genthat_extracted_code/VGAM/examples/identitylink.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 336
|
r
|
library(VGAM)
### Name: identitylink
### Title: Identity Link Function
### Aliases: identitylink negidentity
### Keywords: math models regression
### ** Examples
identitylink((-5):5)
identitylink((-5):5, deriv = 1)
identitylink((-5):5, deriv = 2)
negidentity((-5):5)
negidentity((-5):5, deriv = 1)
negidentity((-5):5, deriv = 2)
|
#' ---
#' title: "LGCP Notebook template"
#' author: "Brian Capaldo"
#' date: "4/12/2019"
#' output:
#' github_document:
#' toc: true
#' ---
#' Document preamble
#'
#' This is a spin document, full details are available [here](https://rmarkdown.rstudio.com/articles_report_from_r_script.html). This is the template version for use at the LGCP at the NIH and all are welcome to distribute and modify.
#' This document will run as a regular Rscript if called using `source()` or will render into a github flavored markdown document if called using `rmarkdown::render()`.
#' For additional details, see [here](https://rmarkdown.rstudio.com/github_document_format.html)
#'
#'# Introduction
#'
#'Introduce the project and any pertinent background.
#'
#'# Methods
#'
#'Describe the underlying approach to analyzing the data
#'
#'# Results
#'
#'Analysis code and plots goes here.
#+ r example R block
library(edgeR)
library(tidyverse)
library(annotables)
#'# References
|
/notes/notebook-template-github_document.R
|
no_license
|
CBIIT/lgcp
|
R
| false
| false
| 973
|
r
|
#' ---
#' title: "LGCP Notebook template"
#' author: "Brian Capaldo"
#' date: "4/12/2019"
#' output:
#' github_document:
#' toc: true
#' ---
#' Document preamble
#'
#' This is a spin document, full details are available [here](https://rmarkdown.rstudio.com/articles_report_from_r_script.html). This is the template version for use at the LGCP at the NIH and all are welcome to distribute and modify.
#' This document will run as a regular Rscript if called using `source()` or will render into a github flavored markdown document if called using `rmarkdown::render()`.
#' For additional details, see [here](https://rmarkdown.rstudio.com/github_document_format.html)
#'
#'# Introduction
#'
#'Introduce the project and any pertinent background.
#'
#'# Methods
#'
#'Describe the underlying approach to analyzing the data
#'
#'# Results
#'
#'Analysis code and plots goes here.
#+ r example R block
library(edgeR)
library(tidyverse)
library(annotables)
#'# References
|
# read in data and see if it merges well adn rename columns then we can start plotting everyhting accordingly
# we will have 4 circle plots for each catagory and three species to see the overlap of each catagory
rm(list = ls())
setwd("~/Desktop/Dist_matrix_TE_div/merged.tables/")
Bovine <- read.table("Human_Bovinemerged.CE.class", header = TRUE)
colnames(Bovine)[c(14, 16)] <- c("Bovine_Classification","Bovine_r_class")
Dog <- read.table("Human_Dogmerged.CE.class", header = TRUE)
colnames(Dog)[c(14, 16)] <- c("Dog_Classification","Dog_r_class")
Dog.class <- Dog[,c('bin.ID', 'Dog_Classification', 'Dog_r_class')]
Mouse <- read.table("Human_Mousemerged.CE.class", header = TRUE)
colnames(Mouse)[c(14, 16)] <- c("Mouse_Classification","Mouse_r_class")
Mouse.class <- Mouse[,c('bin.ID', 'Mouse_Classification', 'Mouse_r_class')]
all.spec <- merge(Bovine, Dog.class, by.x=1, by.y=1)
all.spec <- merge(all.spec, Mouse.class, by.x = 1, by.y = 1)
# all the data is merged we can begin to plot things to see what all the species look like in human
require(ggbio)
require(GenomicRanges)
require(topGO)
library(GO.db)
library(org.Hs.eg.db)
#load data into genomic ranges for conversion into plots
UCSCspec = "hg19"
con <- gzcon(url(paste("http://hgdownload.soe.ucsc.edu/goldenPath/",UCSCspec,"/database/chromInfo.txt.gz", sep="")))
txt <- readLines(con)
dat <- read.table(textConnection(txt))
SEQ <- dat[,1:2]
colnames(SEQ) <- c("chr", "size")
sLengths <- SEQ$size
names(sLengths) <- SEQ[,'chr']
sLengths <- sLengths[names(sLengths) %in% as.character(unique(all.spec$Chromosome))]
hg.bin.gr <- GRanges(seqnames = Rle(all.spec$Chromosome),
ranges = IRanges(start = all.spec$Start, end = all.spec$End),
seqlengths = sLengths, CE_P = all.spec$P_CE)
hg.chr <- GRanges(seqnames = Rle(names(sLengths)),
ranges = IRanges(start = 1, end = sLengths),
seqlengths = sLengths)
#groups
Bov.HH <- as.integer(rownames(all.spec[(all.spec$Bovine_Classification == "H") & (all.spec$Bovine_r_class == "H"),]))
Mou.HH <- as.integer(rownames(all.spec[(all.spec$Mouse_Classification == "H") & (all.spec$Mouse_r_class == "H"),]))
Dog.HH <- as.integer(rownames(all.spec[(all.spec$Dog_Classification == "H") & (all.spec$Dog_r_class == "H"),]))
Bov.HL <- as.integer(rownames(all.spec[(all.spec$Bovine_Classification == "L") & (all.spec$Bovine_r_class == "H"),]))
Mou.HL <- as.integer(rownames(all.spec[(all.spec$Mouse_Classification == "L") & (all.spec$Mouse_r_class == "H"),]))
Dog.HL <- as.integer(rownames(all.spec[(all.spec$Dog_Classification == "L") & (all.spec$Dog_r_class == "H"),]))
Bov.LH <- as.integer(rownames(all.spec[(all.spec$Bovine_Classification == "H") & (all.spec$Bovine_r_class == "L"),]))
Mou.LH <- as.integer(rownames(all.spec[(all.spec$Mouse_Classification == "H") & (all.spec$Mouse_r_class == "L"),]))
Dog.LH <- as.integer(rownames(all.spec[(all.spec$Dog_Classification == "H") & (all.spec$Dog_r_class == "L"),]))
Bov.LL <- as.integer(rownames(all.spec[(all.spec$Bovine_Classification == "L") & (all.spec$Bovine_r_class == "L"),]))
Mou.LL <- as.integer(rownames(all.spec[(all.spec$Mouse_Classification == "L") & (all.spec$Mouse_r_class == "L"),]))
Dog.LL <- as.integer(rownames(all.spec[(all.spec$Dog_Classification == "L") & (all.spec$Dog_r_class == "L"),]))
Bov.HA <- as.integer(rownames(all.spec[(all.spec$Bovine_Classification == "H"),]))
Mou.HA <- as.integer(rownames(all.spec[(all.spec$Mouse_Classification == "H"),]))
Dog.HA <- as.integer(rownames(all.spec[(all.spec$Dog_Classification == "H"),]))
Bov.LA <- as.integer(rownames(all.spec[(all.spec$Bovine_Classification == "L"),]))
Mou.LA <- as.integer(rownames(all.spec[(all.spec$Mouse_Classification == "L"),]))
Dog.LA <- as.integer(rownames(all.spec[(all.spec$Dog_Classification == "L"),]))
Bov.HO <- as.integer(rownames(all.spec[(all.spec$Bovine_r_class == "H"),]))
Mou.HO <- as.integer(rownames(all.spec[(all.spec$Mouse_r_class == "H"),]))
Dog.HO <- as.integer(rownames(all.spec[(all.spec$Dog_r_class == "H"),]))
Bov.LO <- as.integer(rownames(all.spec[(all.spec$Bovine_r_class == "L"),]))
Mou.LO <- as.integer(rownames(all.spec[(all.spec$Mouse_r_class == "L"),]))
Dog.LO <- as.integer(rownames(all.spec[(all.spec$Dog_r_class == "L"),]))
#plots
q <- autoplot((hg.chr),layout = "circle",fill = "white", color = "white", main = "high PC2 and high organization")
q <- q + layout_circle(hg.chr, geom = "ideo", fill = "white", color= "black", trackWidth= 6.6, radius = 8.7, main = "thing")
q <- q + layout_circle(hg.chr, geom = "scale", size = 2, radius = 15, trackWidth = 1)
q <- q + layout_circle(hg.chr, geom = "text", aes(label = seqnames), vjust = 0, size = 3, radius = 16)
q <- q + layout_circle(reduce(hg.bin.gr[Bov.HH]), geom = "rect", color= "blue", fill = "blue", radius = 13, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Mou.HH]), geom = "rect", color = "red", fill = "red", radius = 11, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Dog.HH]), geom = "rect", color = "green", fill = "green", radius = 9, trackWidth = 2)
print(q)
q <- autoplot((hg.chr),layout = "circle",fill = "white", color = "white")
q <- q + layout_circle(hg.chr, geom = "ideo", fill = "white", color= "black", trackWidth= 6.6, radius = 8.7)
q <- q + layout_circle(hg.chr, geom = "scale", size = 2, radius = 15, trackWidth = 1)
q <- q + layout_circle(hg.chr, geom = "text", aes(label = seqnames), vjust = 0, size = 3, radius = 16)
q <- q + layout_circle(reduce(hg.bin.gr[Bov.HL]), geom = "rect", color= "blue", fill = "blue", radius = 13, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Mou.HL]), geom = "rect", color = "red", fill = "red", radius = 11, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Dog.HL]), geom = "rect", color = "green", fill = "green", radius = 9, trackWidth = 2)
print(q)
q <- autoplot((hg.chr),layout = "circle",fill = "white", color = "white")
q <- q + layout_circle(hg.chr, geom = "ideo", fill = "white", color= "black", trackWidth= 6.6, radius = 8.7)
q <- q + layout_circle(hg.chr, geom = "scale", size = 2, radius = 15, trackWidth = 1)
q <- q + layout_circle(hg.chr, geom = "text", aes(label = seqnames), vjust = 0, size = 3, radius = 16)
q <- q + layout_circle(reduce(hg.bin.gr[Bov.LH]), geom = "rect", color= "blue", fill = "blue", radius = 13, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Mou.LH]), geom = "rect", color = "red", fill = "red", radius = 11, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Dog.LH]), geom = "rect", color = "green", fill = "green", radius = 9, trackWidth = 2)
print(q)
q <- autoplot((hg.chr),layout = "circle",fill = "white", color = "white")
q <- q + layout_circle(hg.chr, geom = "ideo", fill = "white", color= "black", trackWidth= 6.6, radius = 8.7)
q <- q + layout_circle(hg.chr, geom = "scale", size = 2, radius = 15, trackWidth = 1)
q <- q + layout_circle(hg.chr, geom = "text", aes(label = seqnames), vjust = 0, size = 3, radius = 16)
q <- q + layout_circle(reduce(hg.bin.gr[Bov.LL]), geom = "rect", color= "blue", fill = "blue", radius = 13, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Mou.LL]), geom = "rect", color = "red", fill = "red", radius = 11, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Dog.LL]), geom = "rect", color = "green", fill = "green", radius = 9, trackWidth = 2)
print(q)
q <- autoplot((hg.chr),layout = "circle",fill = "white", color = "white")
q <- q + layout_circle(hg.chr, geom = "ideo", fill = "white", color= "black", trackWidth= 6.6, radius = 8.7)
q <- q + layout_circle(hg.chr, geom = "scale", size = 2, radius = 15, trackWidth = 1)
q <- q + layout_circle(hg.chr, geom = "text", aes(label = seqnames), vjust = 0, size = 3, radius = 16)
q <- q + layout_circle(reduce(hg.bin.gr[Bov.HO]), geom = "rect", color= "blue", fill = "blue", radius = 13, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Mou.HO]), geom = "rect", color = "red", fill = "red", radius = 11, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Dog.HO]), geom = "rect", color = "green", fill = "green", radius = 9, trackWidth = 2)
print(q)
q <- autoplot((hg.chr),layout = "circle",fill = "white", color = "white")
q <- q + layout_circle(hg.chr, geom = "ideo", fill = "white", color= "black", trackWidth= 6.6, radius = 8.7)
q <- q + layout_circle(hg.chr, geom = "scale", size = 2, radius = 15, trackWidth = 1)
q <- q + layout_circle(hg.chr, geom = "text", aes(label = seqnames), vjust = 0, size = 3, radius = 16)
q <- q + layout_circle(reduce(hg.bin.gr[Bov.LO]), geom = "rect", color= "blue", fill = "blue", radius = 13, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Mou.LO]), geom = "rect", color = "red", fill = "red", radius = 11, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Dog.LO]), geom = "rect", color = "green", fill = "green", radius = 9, trackWidth = 2)
print(q)
p <- q + layout_circle(hg.bin.gr, geom = "bar", aes(y = CE_P), grid =FALSE, radius = 15) + scale_size(range = c(1, 2))
p
# the next step is to start thinking about what genes are in these bins with different repeat content
# also how to think about these bins and how much they overlap and wheater or not to use the union or the intersect
# when getting measurments rember to think what aprt of each species we are measureing
#
#
#
#
# In this script after I do all the circle work I cna start looking for GO terms
# it is probably best I loop this some how to go through each term
# get the genes that overlap a region
# and find what processes they are enriched for
# make note that IRanges and topGO score functions overwrite each other
# download all refseq data and subset approriatly
con <- gzcon(url("http://hgdownload.soe.ucsc.edu/goldenPath/hg19/database/refGene.txt.gz"))
txt <- readLines(con)
hg19 <- read.table(textConnection(txt))
hg19eg <- unique(as.character(mget(as.character(hg19[,2]), org.Hs.egREFSEQ2EG,ifnotfound = NA)))
hg19eg <- hg19eg[!(hg19eg == "NA")]
all.genes.eg <- rep(0,length(hg19eg))
names(all.genes.eg) <- hg19eg
hg.gene.gr <- GRanges(seqnames = Rle(hg19[,3]),
ranges = IRanges(start = hg19[,5], end = hg19[,6]))
# I have a way of selecting terms now I need a way of saving terms
region <- c("Bov.HH","Bov.HL","Bov.LH","Bov.LL",
"Mou.HH","Mou.HL","Mou.LH","Mou.LL",
"Dog.HH","Dog.HL","Dog.LH","Dog.LL")
# over each loop I change G to be a different region
for(i in seq(region)){
all.genes.eg[1:length(all.genes.eg)] <- 0
region.loop <- get(region[i])
OL <- as.matrix(findOverlaps(hg.gene.gr,hg.bin.gr[region.loop]))
G <- as.character(hg19[OL[,1],2])
m <- unique(as.character(mget(G, org.Hs.egREFSEQ2EG,ifnotfound = NA)))
m <- m[!(m == "NA")]
all.genes.eg[names(all.genes.eg) %in% m] <- 1
GOdata <- new("topGOdata",
description = "Simple session",
ontology = "BP",
allGenes = as.factor(all.genes.eg),
annot = annFUN.org,
mapping = "org.Hs.eg.db",
ID = "entrez",
nodeSize = 10)
resultFisher.pc <- runTest(GOdata, algorithm = "parentchild", statistic = "fisher")
resultFisher.wt <- runTest(GOdata, algorithm = "weight", statistic = "fisher")
resultFisher.wt1 <- runTest(GOdata, algorithm = "weight01", statistic = "fisher")
resultFisher.cl <- runTest(GOdata, algorithm = "classic", statistic = "fisher")
# tables
allRes.cl <- GenTable(GOdata, classic_Fisher = resultFisher.cl, orderBy = "classic_Fisher", ranksOf = "classic_Fisher", topNodes = 20)
allRes.wt <- GenTable(GOdata, weight_Fisher = resultFisher.wt, orderBy = "weight_Fisher", ranksOf = "weight_Fisher", topNodes = 20)
allRes.wt1 <- GenTable(GOdata, weight_Fisher_01 = resultFisher.wt1, orderBy = "weight_Fisher_01", ranksOf = "weight_Fisher_01", topNodes = 20)
allRes.pc <- GenTable(GOdata, parent_child_Fisher = resultFisher.pc, orderBy = "parent_child_Fisher", ranksOf = "parent_child_Fisher", topNodes = 20)
#save tables
write.table(allRes.cl, file = paste("~/Desktop/Dist_matrix_TE_div/plot_results/GO_enrich/classic/", region[i], "_enrichment_classic_sig.txt",sep = ""), quote = FALSE, sep = "\t", row.names = FALSE )
write.table(allRes.wt, file = paste("~/Desktop/Dist_matrix_TE_div/plot_results/GO_enrich/weight/", region[i], "_enrichment_weight_sig.txt",sep = ""), quote = FALSE, sep = "\t", row.names = FALSE )
write.table(allRes.wt1, file = paste("~/Desktop/Dist_matrix_TE_div/plot_results/GO_enrich/weight01/", region[i], "_enrichment_weight01_sig.txt",sep = ""), quote = FALSE, sep = "\t", row.names = FALSE )
write.table(allRes.pc, file = paste("~/Desktop/Dist_matrix_TE_div/plot_results/GO_enrich/parent_child/", region[i], "_enrichment_parent_child_sig.txt",sep = ""), quote = FALSE, sep = "\t", row.names = FALSE )
# save DAG plots
pdf(file = paste("~/Desktop/Dist_matrix_TE_div/plot_results/GO_DAG/classic/", region[i], "_DAG_classic_plot.pdf",sep = ""), onefile = TRUE)
showSigOfNodes(GOdata, score(resultFisher.cl), firstSigNodes = 5, useInfo = 'all')
dev.off()
pdf(file = paste("~/Desktop/Dist_matrix_TE_div/plot_results/GO_DAG/weight/", region[i], "_DAG_weight_plot.pdf",sep = ""), onefile = TRUE)
showSigOfNodes(GOdata, score(resultFisher.wt), firstSigNodes = 5, useInfo = 'all')
dev.off()
pdf(file = paste("~/Desktop/Dist_matrix_TE_div/plot_results/GO_DAG/weight01/", region[i], "_DAG_weight01_plot.pdf",sep = ""), onefile = TRUE)
showSigOfNodes(GOdata, score(resultFisher.wt1), firstSigNodes = 5, useInfo = 'all')
dev.off()
pdf(file = paste("~/Desktop/Dist_matrix_TE_div/plot_results/GO_DAG/parent_child/", region[i], "_DAG_parent_cild_plot.pdf",sep = ""), onefile = TRUE)
showSigOfNodes(GOdata, score(resultFisher.pc), firstSigNodes = 5, useInfo = 'all')
dev.off()
}
inter <- intersect(hg.bin.gr[Dog.LH], intersect(hg.bin.gr[Mou.LH], hg.bin.gr[Bov.LH]))
inter <- intersect(hg.bin.gr[Dog.LH], hg.bin.gr[Bov.LH])
goID <- allRes[1, "GO.ID"]
print(showGroupDensity(GOdata,goID, ranks = TRUE))
showGroupDensity(GOdata,allRes[10, "GO.ID"], ranks = T, rm.one = F)
# it may be worth doing a sanity check with the repeat classes in these bins
# however i next step will probably come from the line of thinking of what does it all mean.
|
/region_compare.R
|
no_license
|
ReubenBuck/Dist_matrix_method
|
R
| false
| false
| 14,169
|
r
|
# read in data and see if it merges well adn rename columns then we can start plotting everyhting accordingly
# we will have 4 circle plots for each catagory and three species to see the overlap of each catagory
rm(list = ls())
setwd("~/Desktop/Dist_matrix_TE_div/merged.tables/")
Bovine <- read.table("Human_Bovinemerged.CE.class", header = TRUE)
colnames(Bovine)[c(14, 16)] <- c("Bovine_Classification","Bovine_r_class")
Dog <- read.table("Human_Dogmerged.CE.class", header = TRUE)
colnames(Dog)[c(14, 16)] <- c("Dog_Classification","Dog_r_class")
Dog.class <- Dog[,c('bin.ID', 'Dog_Classification', 'Dog_r_class')]
Mouse <- read.table("Human_Mousemerged.CE.class", header = TRUE)
colnames(Mouse)[c(14, 16)] <- c("Mouse_Classification","Mouse_r_class")
Mouse.class <- Mouse[,c('bin.ID', 'Mouse_Classification', 'Mouse_r_class')]
all.spec <- merge(Bovine, Dog.class, by.x=1, by.y=1)
all.spec <- merge(all.spec, Mouse.class, by.x = 1, by.y = 1)
# all the data is merged we can begin to plot things to see what all the species look like in human
require(ggbio)
require(GenomicRanges)
require(topGO)
library(GO.db)
library(org.Hs.eg.db)
#load data into genomic ranges for conversion into plots
UCSCspec = "hg19"
con <- gzcon(url(paste("http://hgdownload.soe.ucsc.edu/goldenPath/",UCSCspec,"/database/chromInfo.txt.gz", sep="")))
txt <- readLines(con)
dat <- read.table(textConnection(txt))
SEQ <- dat[,1:2]
colnames(SEQ) <- c("chr", "size")
sLengths <- SEQ$size
names(sLengths) <- SEQ[,'chr']
sLengths <- sLengths[names(sLengths) %in% as.character(unique(all.spec$Chromosome))]
hg.bin.gr <- GRanges(seqnames = Rle(all.spec$Chromosome),
ranges = IRanges(start = all.spec$Start, end = all.spec$End),
seqlengths = sLengths, CE_P = all.spec$P_CE)
hg.chr <- GRanges(seqnames = Rle(names(sLengths)),
ranges = IRanges(start = 1, end = sLengths),
seqlengths = sLengths)
#groups
Bov.HH <- as.integer(rownames(all.spec[(all.spec$Bovine_Classification == "H") & (all.spec$Bovine_r_class == "H"),]))
Mou.HH <- as.integer(rownames(all.spec[(all.spec$Mouse_Classification == "H") & (all.spec$Mouse_r_class == "H"),]))
Dog.HH <- as.integer(rownames(all.spec[(all.spec$Dog_Classification == "H") & (all.spec$Dog_r_class == "H"),]))
Bov.HL <- as.integer(rownames(all.spec[(all.spec$Bovine_Classification == "L") & (all.spec$Bovine_r_class == "H"),]))
Mou.HL <- as.integer(rownames(all.spec[(all.spec$Mouse_Classification == "L") & (all.spec$Mouse_r_class == "H"),]))
Dog.HL <- as.integer(rownames(all.spec[(all.spec$Dog_Classification == "L") & (all.spec$Dog_r_class == "H"),]))
Bov.LH <- as.integer(rownames(all.spec[(all.spec$Bovine_Classification == "H") & (all.spec$Bovine_r_class == "L"),]))
Mou.LH <- as.integer(rownames(all.spec[(all.spec$Mouse_Classification == "H") & (all.spec$Mouse_r_class == "L"),]))
Dog.LH <- as.integer(rownames(all.spec[(all.spec$Dog_Classification == "H") & (all.spec$Dog_r_class == "L"),]))
Bov.LL <- as.integer(rownames(all.spec[(all.spec$Bovine_Classification == "L") & (all.spec$Bovine_r_class == "L"),]))
Mou.LL <- as.integer(rownames(all.spec[(all.spec$Mouse_Classification == "L") & (all.spec$Mouse_r_class == "L"),]))
Dog.LL <- as.integer(rownames(all.spec[(all.spec$Dog_Classification == "L") & (all.spec$Dog_r_class == "L"),]))
Bov.HA <- as.integer(rownames(all.spec[(all.spec$Bovine_Classification == "H"),]))
Mou.HA <- as.integer(rownames(all.spec[(all.spec$Mouse_Classification == "H"),]))
Dog.HA <- as.integer(rownames(all.spec[(all.spec$Dog_Classification == "H"),]))
Bov.LA <- as.integer(rownames(all.spec[(all.spec$Bovine_Classification == "L"),]))
Mou.LA <- as.integer(rownames(all.spec[(all.spec$Mouse_Classification == "L"),]))
Dog.LA <- as.integer(rownames(all.spec[(all.spec$Dog_Classification == "L"),]))
Bov.HO <- as.integer(rownames(all.spec[(all.spec$Bovine_r_class == "H"),]))
Mou.HO <- as.integer(rownames(all.spec[(all.spec$Mouse_r_class == "H"),]))
Dog.HO <- as.integer(rownames(all.spec[(all.spec$Dog_r_class == "H"),]))
Bov.LO <- as.integer(rownames(all.spec[(all.spec$Bovine_r_class == "L"),]))
Mou.LO <- as.integer(rownames(all.spec[(all.spec$Mouse_r_class == "L"),]))
Dog.LO <- as.integer(rownames(all.spec[(all.spec$Dog_r_class == "L"),]))
#plots
q <- autoplot((hg.chr),layout = "circle",fill = "white", color = "white", main = "high PC2 and high organization")
q <- q + layout_circle(hg.chr, geom = "ideo", fill = "white", color= "black", trackWidth= 6.6, radius = 8.7, main = "thing")
q <- q + layout_circle(hg.chr, geom = "scale", size = 2, radius = 15, trackWidth = 1)
q <- q + layout_circle(hg.chr, geom = "text", aes(label = seqnames), vjust = 0, size = 3, radius = 16)
q <- q + layout_circle(reduce(hg.bin.gr[Bov.HH]), geom = "rect", color= "blue", fill = "blue", radius = 13, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Mou.HH]), geom = "rect", color = "red", fill = "red", radius = 11, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Dog.HH]), geom = "rect", color = "green", fill = "green", radius = 9, trackWidth = 2)
print(q)
q <- autoplot((hg.chr),layout = "circle",fill = "white", color = "white")
q <- q + layout_circle(hg.chr, geom = "ideo", fill = "white", color= "black", trackWidth= 6.6, radius = 8.7)
q <- q + layout_circle(hg.chr, geom = "scale", size = 2, radius = 15, trackWidth = 1)
q <- q + layout_circle(hg.chr, geom = "text", aes(label = seqnames), vjust = 0, size = 3, radius = 16)
q <- q + layout_circle(reduce(hg.bin.gr[Bov.HL]), geom = "rect", color= "blue", fill = "blue", radius = 13, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Mou.HL]), geom = "rect", color = "red", fill = "red", radius = 11, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Dog.HL]), geom = "rect", color = "green", fill = "green", radius = 9, trackWidth = 2)
print(q)
q <- autoplot((hg.chr),layout = "circle",fill = "white", color = "white")
q <- q + layout_circle(hg.chr, geom = "ideo", fill = "white", color= "black", trackWidth= 6.6, radius = 8.7)
q <- q + layout_circle(hg.chr, geom = "scale", size = 2, radius = 15, trackWidth = 1)
q <- q + layout_circle(hg.chr, geom = "text", aes(label = seqnames), vjust = 0, size = 3, radius = 16)
q <- q + layout_circle(reduce(hg.bin.gr[Bov.LH]), geom = "rect", color= "blue", fill = "blue", radius = 13, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Mou.LH]), geom = "rect", color = "red", fill = "red", radius = 11, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Dog.LH]), geom = "rect", color = "green", fill = "green", radius = 9, trackWidth = 2)
print(q)
q <- autoplot((hg.chr),layout = "circle",fill = "white", color = "white")
q <- q + layout_circle(hg.chr, geom = "ideo", fill = "white", color= "black", trackWidth= 6.6, radius = 8.7)
q <- q + layout_circle(hg.chr, geom = "scale", size = 2, radius = 15, trackWidth = 1)
q <- q + layout_circle(hg.chr, geom = "text", aes(label = seqnames), vjust = 0, size = 3, radius = 16)
q <- q + layout_circle(reduce(hg.bin.gr[Bov.LL]), geom = "rect", color= "blue", fill = "blue", radius = 13, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Mou.LL]), geom = "rect", color = "red", fill = "red", radius = 11, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Dog.LL]), geom = "rect", color = "green", fill = "green", radius = 9, trackWidth = 2)
print(q)
q <- autoplot((hg.chr),layout = "circle",fill = "white", color = "white")
q <- q + layout_circle(hg.chr, geom = "ideo", fill = "white", color= "black", trackWidth= 6.6, radius = 8.7)
q <- q + layout_circle(hg.chr, geom = "scale", size = 2, radius = 15, trackWidth = 1)
q <- q + layout_circle(hg.chr, geom = "text", aes(label = seqnames), vjust = 0, size = 3, radius = 16)
q <- q + layout_circle(reduce(hg.bin.gr[Bov.HO]), geom = "rect", color= "blue", fill = "blue", radius = 13, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Mou.HO]), geom = "rect", color = "red", fill = "red", radius = 11, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Dog.HO]), geom = "rect", color = "green", fill = "green", radius = 9, trackWidth = 2)
print(q)
q <- autoplot((hg.chr),layout = "circle",fill = "white", color = "white")
q <- q + layout_circle(hg.chr, geom = "ideo", fill = "white", color= "black", trackWidth= 6.6, radius = 8.7)
q <- q + layout_circle(hg.chr, geom = "scale", size = 2, radius = 15, trackWidth = 1)
q <- q + layout_circle(hg.chr, geom = "text", aes(label = seqnames), vjust = 0, size = 3, radius = 16)
q <- q + layout_circle(reduce(hg.bin.gr[Bov.LO]), geom = "rect", color= "blue", fill = "blue", radius = 13, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Mou.LO]), geom = "rect", color = "red", fill = "red", radius = 11, trackWidth = 2)
q <- q + layout_circle(reduce(hg.bin.gr[Dog.LO]), geom = "rect", color = "green", fill = "green", radius = 9, trackWidth = 2)
print(q)
p <- q + layout_circle(hg.bin.gr, geom = "bar", aes(y = CE_P), grid =FALSE, radius = 15) + scale_size(range = c(1, 2))
p
# the next step is to start thinking about what genes are in these bins with different repeat content
# also how to think about these bins and how much they overlap and wheater or not to use the union or the intersect
# when getting measurments rember to think what aprt of each species we are measureing
#
#
#
#
# In this script after I do all the circle work I cna start looking for GO terms
# it is probably best I loop this some how to go through each term
# get the genes that overlap a region
# and find what processes they are enriched for
# make note that IRanges and topGO score functions overwrite each other
# download all refseq data and subset approriatly
con <- gzcon(url("http://hgdownload.soe.ucsc.edu/goldenPath/hg19/database/refGene.txt.gz"))
txt <- readLines(con)
hg19 <- read.table(textConnection(txt))
hg19eg <- unique(as.character(mget(as.character(hg19[,2]), org.Hs.egREFSEQ2EG,ifnotfound = NA)))
hg19eg <- hg19eg[!(hg19eg == "NA")]
all.genes.eg <- rep(0,length(hg19eg))
names(all.genes.eg) <- hg19eg
hg.gene.gr <- GRanges(seqnames = Rle(hg19[,3]),
ranges = IRanges(start = hg19[,5], end = hg19[,6]))
# I have a way of selecting terms now I need a way of saving terms
region <- c("Bov.HH","Bov.HL","Bov.LH","Bov.LL",
"Mou.HH","Mou.HL","Mou.LH","Mou.LL",
"Dog.HH","Dog.HL","Dog.LH","Dog.LL")
# over each loop I change G to be a different region
for(i in seq(region)){
all.genes.eg[1:length(all.genes.eg)] <- 0
region.loop <- get(region[i])
OL <- as.matrix(findOverlaps(hg.gene.gr,hg.bin.gr[region.loop]))
G <- as.character(hg19[OL[,1],2])
m <- unique(as.character(mget(G, org.Hs.egREFSEQ2EG,ifnotfound = NA)))
m <- m[!(m == "NA")]
all.genes.eg[names(all.genes.eg) %in% m] <- 1
GOdata <- new("topGOdata",
description = "Simple session",
ontology = "BP",
allGenes = as.factor(all.genes.eg),
annot = annFUN.org,
mapping = "org.Hs.eg.db",
ID = "entrez",
nodeSize = 10)
resultFisher.pc <- runTest(GOdata, algorithm = "parentchild", statistic = "fisher")
resultFisher.wt <- runTest(GOdata, algorithm = "weight", statistic = "fisher")
resultFisher.wt1 <- runTest(GOdata, algorithm = "weight01", statistic = "fisher")
resultFisher.cl <- runTest(GOdata, algorithm = "classic", statistic = "fisher")
# tables
allRes.cl <- GenTable(GOdata, classic_Fisher = resultFisher.cl, orderBy = "classic_Fisher", ranksOf = "classic_Fisher", topNodes = 20)
allRes.wt <- GenTable(GOdata, weight_Fisher = resultFisher.wt, orderBy = "weight_Fisher", ranksOf = "weight_Fisher", topNodes = 20)
allRes.wt1 <- GenTable(GOdata, weight_Fisher_01 = resultFisher.wt1, orderBy = "weight_Fisher_01", ranksOf = "weight_Fisher_01", topNodes = 20)
allRes.pc <- GenTable(GOdata, parent_child_Fisher = resultFisher.pc, orderBy = "parent_child_Fisher", ranksOf = "parent_child_Fisher", topNodes = 20)
#save tables
write.table(allRes.cl, file = paste("~/Desktop/Dist_matrix_TE_div/plot_results/GO_enrich/classic/", region[i], "_enrichment_classic_sig.txt",sep = ""), quote = FALSE, sep = "\t", row.names = FALSE )
write.table(allRes.wt, file = paste("~/Desktop/Dist_matrix_TE_div/plot_results/GO_enrich/weight/", region[i], "_enrichment_weight_sig.txt",sep = ""), quote = FALSE, sep = "\t", row.names = FALSE )
write.table(allRes.wt1, file = paste("~/Desktop/Dist_matrix_TE_div/plot_results/GO_enrich/weight01/", region[i], "_enrichment_weight01_sig.txt",sep = ""), quote = FALSE, sep = "\t", row.names = FALSE )
write.table(allRes.pc, file = paste("~/Desktop/Dist_matrix_TE_div/plot_results/GO_enrich/parent_child/", region[i], "_enrichment_parent_child_sig.txt",sep = ""), quote = FALSE, sep = "\t", row.names = FALSE )
# save DAG plots
pdf(file = paste("~/Desktop/Dist_matrix_TE_div/plot_results/GO_DAG/classic/", region[i], "_DAG_classic_plot.pdf",sep = ""), onefile = TRUE)
showSigOfNodes(GOdata, score(resultFisher.cl), firstSigNodes = 5, useInfo = 'all')
dev.off()
pdf(file = paste("~/Desktop/Dist_matrix_TE_div/plot_results/GO_DAG/weight/", region[i], "_DAG_weight_plot.pdf",sep = ""), onefile = TRUE)
showSigOfNodes(GOdata, score(resultFisher.wt), firstSigNodes = 5, useInfo = 'all')
dev.off()
pdf(file = paste("~/Desktop/Dist_matrix_TE_div/plot_results/GO_DAG/weight01/", region[i], "_DAG_weight01_plot.pdf",sep = ""), onefile = TRUE)
showSigOfNodes(GOdata, score(resultFisher.wt1), firstSigNodes = 5, useInfo = 'all')
dev.off()
pdf(file = paste("~/Desktop/Dist_matrix_TE_div/plot_results/GO_DAG/parent_child/", region[i], "_DAG_parent_cild_plot.pdf",sep = ""), onefile = TRUE)
showSigOfNodes(GOdata, score(resultFisher.pc), firstSigNodes = 5, useInfo = 'all')
dev.off()
}
inter <- intersect(hg.bin.gr[Dog.LH], intersect(hg.bin.gr[Mou.LH], hg.bin.gr[Bov.LH]))
inter <- intersect(hg.bin.gr[Dog.LH], hg.bin.gr[Bov.LH])
goID <- allRes[1, "GO.ID"]
print(showGroupDensity(GOdata,goID, ranks = TRUE))
showGroupDensity(GOdata,allRes[10, "GO.ID"], ranks = T, rm.one = F)
# it may be worth doing a sanity check with the repeat classes in these bins
# however i next step will probably come from the line of thinking of what does it all mean.
|
#' @name RDASC
#' @title Reproducible Data Analysis of Scientific Cooperations
#' @docType package
#' @description \code{RDASC} is a Reproducible Data Analysis
#' Package. This is an R package to keep, in a reproducible and free
#' accessible way, cientific colaborations that I
#' participated. Datasets of each cooperation are documented and its
#' analysis are done as package vignettes.
#' @import wzRfun
NULL
|
/R/wzCoop.R
|
no_license
|
walmes/RDASC
|
R
| false
| false
| 430
|
r
|
#' @name RDASC
#' @title Reproducible Data Analysis of Scientific Cooperations
#' @docType package
#' @description \code{RDASC} is a Reproducible Data Analysis
#' Package. This is an R package to keep, in a reproducible and free
#' accessible way, cientific colaborations that I
#' participated. Datasets of each cooperation are documented and its
#' analysis are done as package vignettes.
#' @import wzRfun
NULL
|
\name{clean.formula}
\alias{clean.formula}
\title{
Makes a shorter version of a formula for display
}
\description{
Used in stepAICc and others. If you want to use formulas from a big AIC analysis in a table, they need to be shorter and prettier. This sorts the terms, replaces the top model form with '...', etc.
}
\usage{
clean.formula(x, left = F, base.form = NULL)
}
\arguments{
\item{x}{
A formula, either as a character string or a factor. Or it could be a vector of formulas.
}
\item{left}{
A logical, do you want the most common/consistent terms on the left side of the formula or the right. How do you want the formulas to line up?
}
\item{base.form}{
I don't think this does anything anymore
}
}
\details{
Takes the formula. Turns it into text. Substitutes out spaces and removes the response variable. Splits the strings by terms ('+'). Finds interaction terms and drops repeated terms, switch to '*' notation instead of including both interaction and main effects in formula. Sorts the terms by their frequency of appearance in the vector of formulas. Sort and return the cleaned up vector of formulas.
}
\value{
A much neater formula
}
\author{
Martha
}
\examples{
x<-c("SURV ~ Denali.snow + Denali.snow:initial.weight + initial.weight + summer.rain.yr1",
"SURV ~ Denali.snow + Denali.snow:initial.weight + initial.weight",
"SURV ~ Denali.snow + Denali.snow:initial.weight + initial.weight + summer.rain.yr2"
)
clean.formula(x)
}
|
/HelpfulFunctionsMME/man/clean.formula.Rd
|
no_license
|
mmellis/HelpfulFunctionsMME
|
R
| false
| false
| 1,478
|
rd
|
\name{clean.formula}
\alias{clean.formula}
\title{
Makes a shorter version of a formula for display
}
\description{
Used in stepAICc and others. If you want to use formulas from a big AIC analysis in a table, they need to be shorter and prettier. This sorts the terms, replaces the top model form with '...', etc.
}
\usage{
clean.formula(x, left = F, base.form = NULL)
}
\arguments{
\item{x}{
A formula, either as a character string or a factor. Or it could be a vector of formulas.
}
\item{left}{
A logical, do you want the most common/consistent terms on the left side of the formula or the right. How do you want the formulas to line up?
}
\item{base.form}{
I don't think this does anything anymore
}
}
\details{
Takes the formula. Turns it into text. Substitutes out spaces and removes the response variable. Splits the strings by terms ('+'). Finds interaction terms and drops repeated terms, switch to '*' notation instead of including both interaction and main effects in formula. Sorts the terms by their frequency of appearance in the vector of formulas. Sort and return the cleaned up vector of formulas.
}
\value{
A much neater formula
}
\author{
Martha
}
\examples{
x<-c("SURV ~ Denali.snow + Denali.snow:initial.weight + initial.weight + summer.rain.yr1",
"SURV ~ Denali.snow + Denali.snow:initial.weight + initial.weight",
"SURV ~ Denali.snow + Denali.snow:initial.weight + initial.weight + summer.rain.yr2"
)
clean.formula(x)
}
|
context("Testing permci")
test_that("permci_glm parallel works as it should", {
ds <- gendata_crt(nclus = c(5, 5), size = c(10, 10), theta = 0, sigma = 1,
mu = 0, sd = 1)
tmp1 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 10, ncores = 1, quietly = T)
expect_true(getDoParWorkers() == 1)
tmp2 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 10, ncores = 2, quietly = T)
expect_true(getDoParWorkers() == 2)
tmp3 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 10, ncores = 1, quietly = T)
expect_true(getDoParWorkers() == 1)
})
test_that("permci_glm seed parameter works as it should", {
ds <- gendata_crt(nclus = c(5, 5), size = c(10, 10), theta = 0, sigma = 1,
mu = 0, sd = 1)
# not parallel
tmp1 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = 1, seed = 123, quietly = T)
tmp2 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = 1, seed = 123, quietly = T)
tmp3 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = 1, quietly = T)
tmp4 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = 1, seed = 124, quietly = T)
expect_true(identical(tmp1$trace, tmp2$trace))
expect_false(identical(tmp2$trace, tmp3$trace))
expect_false(identical(tmp2$trace, tmp4$trace))
# parallel
ncores <- parallel::detectCores() - 1
tmp1 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = ncores, seed = 123, quietly = T)
tmp2 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = ncores, seed = 123, quietly = T)
tmp3 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = ncores, quietly = T)
tmp4 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = ncores, seed = 124, quietly = T)
expect_true(identical(tmp1$trace, tmp2$trace))
expect_false(identical(tmp2$trace, tmp3$trace))
expect_false(identical(tmp2$trace, tmp4$trace))
# not parallel vs parallel
tmp1 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = 1, seed = 123, quietly = T)
tmp2 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = 2, seed = 123, quietly = T)
expect_true(identical(tmp1$trace, tmp2$trace))
})
test_that("permci works like permci_glm", {
ds <- gendata_crt(nclus = c(5, 5), size = c(10, 10), theta = 0, sigma = 1,
mu = 0, sd = 1)
# permci_glm
tmp1 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = 2, seed = 123, quietly = T)
# permci
m1 <- glm(y ~ trt, data = ds)
tmp2 <- permci(m1, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = 2, seed = 123, quietly = T)
expect_true(identical(tmp1$trace, tmp2$trace))
})
# should add other permci_xxx tests, but first need dummy data
|
/tests/testthat/test_permci.R
|
no_license
|
djrabideau/permuter
|
R
| false
| false
| 3,438
|
r
|
context("Testing permci")
test_that("permci_glm parallel works as it should", {
ds <- gendata_crt(nclus = c(5, 5), size = c(10, 10), theta = 0, sigma = 1,
mu = 0, sd = 1)
tmp1 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 10, ncores = 1, quietly = T)
expect_true(getDoParWorkers() == 1)
tmp2 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 10, ncores = 2, quietly = T)
expect_true(getDoParWorkers() == 2)
tmp3 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 10, ncores = 1, quietly = T)
expect_true(getDoParWorkers() == 1)
})
test_that("permci_glm seed parameter works as it should", {
ds <- gendata_crt(nclus = c(5, 5), size = c(10, 10), theta = 0, sigma = 1,
mu = 0, sd = 1)
# not parallel
tmp1 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = 1, seed = 123, quietly = T)
tmp2 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = 1, seed = 123, quietly = T)
tmp3 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = 1, quietly = T)
tmp4 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = 1, seed = 124, quietly = T)
expect_true(identical(tmp1$trace, tmp2$trace))
expect_false(identical(tmp2$trace, tmp3$trace))
expect_false(identical(tmp2$trace, tmp4$trace))
# parallel
ncores <- parallel::detectCores() - 1
tmp1 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = ncores, seed = 123, quietly = T)
tmp2 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = ncores, seed = 123, quietly = T)
tmp3 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = ncores, quietly = T)
tmp4 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = ncores, seed = 124, quietly = T)
expect_true(identical(tmp1$trace, tmp2$trace))
expect_false(identical(tmp2$trace, tmp3$trace))
expect_false(identical(tmp2$trace, tmp4$trace))
# not parallel vs parallel
tmp1 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = 1, seed = 123, quietly = T)
tmp2 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = 2, seed = 123, quietly = T)
expect_true(identical(tmp1$trace, tmp2$trace))
})
test_that("permci works like permci_glm", {
ds <- gendata_crt(nclus = c(5, 5), size = c(10, 10), theta = 0, sigma = 1,
mu = 0, sd = 1)
# permci_glm
tmp1 <- permci_glm(y ~ trt, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = 2, seed = 123, quietly = T)
# permci
m1 <- glm(y ~ trt, data = ds)
tmp2 <- permci(m1, trtname = 'trt', runit = 'clusid', data = ds,
nperm = 100, ncores = 2, seed = 123, quietly = T)
expect_true(identical(tmp1$trace, tmp2$trace))
})
# should add other permci_xxx tests, but first need dummy data
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/curve_pair_align.R
\name{curve_pair_align}
\alias{curve_pair_align}
\title{Pairwise align two curves}
\usage{
curve_pair_align(beta1, beta2)
}
\arguments{
\item{beta1}{array describing curve 1 (n,T)}
\item{beta2}{array describing curve 2 (n,T)}
}
\value{
a list containing \item{beta2n}{aligned curve 2 to 1}
\item{q2n}{aligned srvf 2 to 1}
\item{gam}{warping function}
\item{q1}{srvf of curve 1}
}
\description{
This function aligns to curves using Elastic Framework
}
\examples{
data("mpeg7")
out = curve_pair_align(beta[,,1,1], beta[,,1,5])
}
\references{
Srivastava, A., Klassen, E., Joshi, S., Jermyn, I., (2011). Shape analysis of elastic curves in euclidean spaces. Pattern Analysis and Machine Intelligence, IEEE Transactions on 33 (7), 1415-1428.
}
\keyword{alignment}
\keyword{srvf}
|
/fuzzedpackages/fdasrvf/man/curve_pair_align.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false
| true
| 872
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/curve_pair_align.R
\name{curve_pair_align}
\alias{curve_pair_align}
\title{Pairwise align two curves}
\usage{
curve_pair_align(beta1, beta2)
}
\arguments{
\item{beta1}{array describing curve 1 (n,T)}
\item{beta2}{array describing curve 2 (n,T)}
}
\value{
a list containing \item{beta2n}{aligned curve 2 to 1}
\item{q2n}{aligned srvf 2 to 1}
\item{gam}{warping function}
\item{q1}{srvf of curve 1}
}
\description{
This function aligns to curves using Elastic Framework
}
\examples{
data("mpeg7")
out = curve_pair_align(beta[,,1,1], beta[,,1,5])
}
\references{
Srivastava, A., Klassen, E., Joshi, S., Jermyn, I., (2011). Shape analysis of elastic curves in euclidean spaces. Pattern Analysis and Machine Intelligence, IEEE Transactions on 33 (7), 1415-1428.
}
\keyword{alignment}
\keyword{srvf}
|
require(testthat)
test_check("hbbr")
|
/tests/testthat.R
|
no_license
|
cran/hbbr
|
R
| false
| false
| 36
|
r
|
require(testthat)
test_check("hbbr")
|
library(ggplot2)
windowsFonts(malgun = "๋ง์ ๊ณ ๋")
theme_update(text = element_text(family = "malgun"))
# No.1
irse <- iris[iris$Species == "virginica",]
plot(jitter(irse$Sepal.Width)~jitter(irse$Sepal.Length))
# No.2
line<-lm(irse$Sepal.Width~irse$Sepal.Length)
abline(line, col="red", lwd=2)
summary(line)
# No.3
library(dplyr)
data <- mpg %>%
filter(class=="suv") %>%
group_by(manufacturer) %>%
summarise(cty_mean=mean(cty)) %>%
arrange(-cty_mean) %>%
head(7)
ggplot(data=data,
aes(x=reorder(manufacturer, -cty_mean), y=cty_mean,
fill=manufacturer )) +
geom_bar(stat = "identity")
# No.4
library(tibble)
states_map <- map_data("state")
crime <- rownames_to_column(USArrests, var="state")
crime$state <-tolower(crime$state)
library(ggiraphExtra)
p1 <- ggChoropleth(data=crime,
aes(fill=Murder,map_id=state),
map = states_map)+
ggtitle("๋ฏธ๊ตญ ์ฃผ๋ณ ์ด์ธ๋ฒ์ฃ ๋น์จ","(๋จ์: ์ธ๊ตฌ 10๋ง๋ช
๋ง ๊ฑด์)") +
xlab("๊ฒฝ๋") +
ylab("์๋") +
theme(plot.title = element_text(face="bold", hjust=0.5, size=15,
color="darkblue"),
plot.subtitle = element_text(hjust=1.0))
p2 <- ggChoropleth(data=crime,
aes(fill=Rape,map_id=state),
map = states_map)+
ggtitle("๋ฏธ๊ตญ ์ฃผ๋ณ ๊ฐ๊ฐ๋ฒ์ฃ ๋น์จ","(๋จ์: ์ธ๊ตฌ 10๋ง๋ช
๋ง ๊ฑด์)") +
xlab("๊ฒฝ๋")+
ylab("์๋") +
theme(plot.title = element_text(face="bold", hjust=0.5, size=15,
color="darkblue"),
plot.subtitle = element_text(hjust=1.0))
p3 <- ggChoropleth(data=crime,
aes(fill=Assault,map_id=state),
map = states_map)+
ggtitle("๋ฏธ๊ตญ ์ฃผ๋ณ ๊ฐ๋๋ฒ์ฃ ๋น์จ","(๋จ์: ์ธ๊ตฌ 10๋ง๋ช
๋ง ๊ฑด์)") +
xlab("๊ฒฝ๋")+
ylab("์๋") +
theme(plot.title = element_text(face="bold", hjust=0.5, size=15,
color="darkblue"),
plot.subtitle = element_text(hjust=1.0))
p4 <- ggChoropleth(data=crime,
aes(fill=UrbanPop,map_id=state),
map = states_map)+
ggtitle("๋ฏธ๊ตญ ์ฃผ๋ณ ๋์ ๋น์จ","(๋จ์: ์ธ๊ตฌ 10๋ง๋ช
๋ง ๊ฑด์)") +
xlab("๊ฒฝ๋")+
ylab("์๋") +
theme(plot.title = element_text(face="bold", hjust=0.5, size=15,
color="darkblue"),
plot.subtitle = element_text(hjust=1.0))
library(gridExtra)
grid.arrange(p1,p2,p3,p4, ncol=2,nrow=2)
# No.5
data <- read.csv("f:/Workspace/R/r_stat/data/GDP.csv")
str(data)
tsdata <- data %>%
ungroup %>%
arrange(๋
๋) %>%
ts(start = c(1988,1))
tsdata <- tsdata[,-c(1)] # ๋
๋ ์ด์ ์ ๊ฑฐ
str(tsdata)
library(dygraphs)
dygraph(tsdata)%>% dyRangeSelector
co <- read.csv("f:/Workspace/R/r_stat/data/GDP.csv",
header=T)
str(co)
ggplot() +
geom_line(data=co, aes(x=๋
๋, y=ํ๊ตญ, colour="green")) +
geom_line(data=co, aes(x=๋
๋, y=๋ฏธ๊ตญ, colour="blue")) +
geom_line(data=co, aes(x=๋
๋, y=์ค๊ตญ, colour="red")) +
ylab("GDP ์ฑ์ฅ๋ฅ ")
library(reshape2)
gdp_cagr <- melt(co, id="๋
๋") # convert to long format
str(gdp_cagr)
names(gdp_cagr) <- c("๋
๋", "๊ตญ๊ฐ", "rate")
ggplot(data=gdp_cagr,
aes(x=๋
๋, y=rate, colour=๊ตญ๊ฐ)) +
geom_line(lwd=1) +
ylab("GDP ์ฑ์ฅ๋ฅ ")
# No.6
df <-3
m4 <- rep(NA, 1000)
m8 <- rep(NA, 1000)
m32 <- rep(NA, 1000)
m64 <- rep(NA, 1000)
set.seed(9)
for(i in 1:1000){
m4[i] <-mean(rchisq(4, df))
m8[i] <-mean(rchisq(8, df))
m32[i] <-mean(rchisq(32, df))
m64[i] <-mean(rchisq(64, df))
}
par(mfrow = c(2,2))
hist(m4, xlim=c(0, 8), prob=T , main="ํ๋ณธ์ ๊ฐ์ : 4๊ฐ",
xlab="x", ylab="", col="cyan", border = "blue")
x1 <- seq(min(m4), max(m4),length=1000)
y1 <- dnorm(x= x1, mean=df, sd=sqrt(df*2/4))
lines(x1, y1, lty=2, lwd=2, col="red" )
hist(m8, xlim=c(1, 6), ylim=c(0,0.5), prob=T, main="ํ๋ณธ์ ๊ฐ์ : 8๊ฐ",
xlab="x", ylab="", col="cyan", border = "blue")
x2 <- seq(min(m8), max(m8),length=1000)
y2 <- dnorm(x= x2, mean=df, sd=sqrt(df*2/8))
lines(x2, y2, lty=2, lwd=2, col="red" )
hist(m32, xlim=c(1.5, 4.5), prob=T, main="ํ๋ณธ์ ๊ฐ์ : 32๊ฐ",
xlab="x", ylab="", col="cyan", border = "blue")
x3 <- seq(min(m32), max(m32),length=1000)
y3 <- dnorm(x= x3, mean=df, sd=sqrt(df*2/32))
lines(x3, y3, lty=2, lwd=2, col="red" )
hist(m64, xlim=c(2, 4), ylim=c(0,1.35), prob=T, main="ํ๋ณธ์ ๊ฐ์ : 64๊ฐ",
xlab="x", ylab="", col="cyan", border = "blue")
x4 <- seq(min(m64), max(m64),length=1000)
y4 <- dnorm(x= x4, mean=df, sd=sqrt(df*2/64))
lines(x4, y4, lty=2, lwd=2, col="red" )
par(mfrow = c(1, 1))
|
/test2.R
|
no_license
|
ckiekim/BigDataWithR-Lecture
|
R
| false
| false
| 4,665
|
r
|
library(ggplot2)
windowsFonts(malgun = "๋ง์ ๊ณ ๋")
theme_update(text = element_text(family = "malgun"))
# No.1
irse <- iris[iris$Species == "virginica",]
plot(jitter(irse$Sepal.Width)~jitter(irse$Sepal.Length))
# No.2
line<-lm(irse$Sepal.Width~irse$Sepal.Length)
abline(line, col="red", lwd=2)
summary(line)
# No.3
library(dplyr)
data <- mpg %>%
filter(class=="suv") %>%
group_by(manufacturer) %>%
summarise(cty_mean=mean(cty)) %>%
arrange(-cty_mean) %>%
head(7)
ggplot(data=data,
aes(x=reorder(manufacturer, -cty_mean), y=cty_mean,
fill=manufacturer )) +
geom_bar(stat = "identity")
# No.4
library(tibble)
states_map <- map_data("state")
crime <- rownames_to_column(USArrests, var="state")
crime$state <-tolower(crime$state)
library(ggiraphExtra)
p1 <- ggChoropleth(data=crime,
aes(fill=Murder,map_id=state),
map = states_map)+
ggtitle("๋ฏธ๊ตญ ์ฃผ๋ณ ์ด์ธ๋ฒ์ฃ ๋น์จ","(๋จ์: ์ธ๊ตฌ 10๋ง๋ช
๋ง ๊ฑด์)") +
xlab("๊ฒฝ๋") +
ylab("์๋") +
theme(plot.title = element_text(face="bold", hjust=0.5, size=15,
color="darkblue"),
plot.subtitle = element_text(hjust=1.0))
p2 <- ggChoropleth(data=crime,
aes(fill=Rape,map_id=state),
map = states_map)+
ggtitle("๋ฏธ๊ตญ ์ฃผ๋ณ ๊ฐ๊ฐ๋ฒ์ฃ ๋น์จ","(๋จ์: ์ธ๊ตฌ 10๋ง๋ช
๋ง ๊ฑด์)") +
xlab("๊ฒฝ๋")+
ylab("์๋") +
theme(plot.title = element_text(face="bold", hjust=0.5, size=15,
color="darkblue"),
plot.subtitle = element_text(hjust=1.0))
p3 <- ggChoropleth(data=crime,
aes(fill=Assault,map_id=state),
map = states_map)+
ggtitle("๋ฏธ๊ตญ ์ฃผ๋ณ ๊ฐ๋๋ฒ์ฃ ๋น์จ","(๋จ์: ์ธ๊ตฌ 10๋ง๋ช
๋ง ๊ฑด์)") +
xlab("๊ฒฝ๋")+
ylab("์๋") +
theme(plot.title = element_text(face="bold", hjust=0.5, size=15,
color="darkblue"),
plot.subtitle = element_text(hjust=1.0))
p4 <- ggChoropleth(data=crime,
aes(fill=UrbanPop,map_id=state),
map = states_map)+
ggtitle("๋ฏธ๊ตญ ์ฃผ๋ณ ๋์ ๋น์จ","(๋จ์: ์ธ๊ตฌ 10๋ง๋ช
๋ง ๊ฑด์)") +
xlab("๊ฒฝ๋")+
ylab("์๋") +
theme(plot.title = element_text(face="bold", hjust=0.5, size=15,
color="darkblue"),
plot.subtitle = element_text(hjust=1.0))
library(gridExtra)
grid.arrange(p1,p2,p3,p4, ncol=2,nrow=2)
# No.5
data <- read.csv("f:/Workspace/R/r_stat/data/GDP.csv")
str(data)
tsdata <- data %>%
ungroup %>%
arrange(๋
๋) %>%
ts(start = c(1988,1))
tsdata <- tsdata[,-c(1)] # ๋
๋ ์ด์ ์ ๊ฑฐ
str(tsdata)
library(dygraphs)
dygraph(tsdata)%>% dyRangeSelector
co <- read.csv("f:/Workspace/R/r_stat/data/GDP.csv",
header=T)
str(co)
ggplot() +
geom_line(data=co, aes(x=๋
๋, y=ํ๊ตญ, colour="green")) +
geom_line(data=co, aes(x=๋
๋, y=๋ฏธ๊ตญ, colour="blue")) +
geom_line(data=co, aes(x=๋
๋, y=์ค๊ตญ, colour="red")) +
ylab("GDP ์ฑ์ฅ๋ฅ ")
library(reshape2)
gdp_cagr <- melt(co, id="๋
๋") # convert to long format
str(gdp_cagr)
names(gdp_cagr) <- c("๋
๋", "๊ตญ๊ฐ", "rate")
ggplot(data=gdp_cagr,
aes(x=๋
๋, y=rate, colour=๊ตญ๊ฐ)) +
geom_line(lwd=1) +
ylab("GDP ์ฑ์ฅ๋ฅ ")
# No.6
df <-3
m4 <- rep(NA, 1000)
m8 <- rep(NA, 1000)
m32 <- rep(NA, 1000)
m64 <- rep(NA, 1000)
set.seed(9)
for(i in 1:1000){
m4[i] <-mean(rchisq(4, df))
m8[i] <-mean(rchisq(8, df))
m32[i] <-mean(rchisq(32, df))
m64[i] <-mean(rchisq(64, df))
}
par(mfrow = c(2,2))
hist(m4, xlim=c(0, 8), prob=T , main="ํ๋ณธ์ ๊ฐ์ : 4๊ฐ",
xlab="x", ylab="", col="cyan", border = "blue")
x1 <- seq(min(m4), max(m4),length=1000)
y1 <- dnorm(x= x1, mean=df, sd=sqrt(df*2/4))
lines(x1, y1, lty=2, lwd=2, col="red" )
hist(m8, xlim=c(1, 6), ylim=c(0,0.5), prob=T, main="ํ๋ณธ์ ๊ฐ์ : 8๊ฐ",
xlab="x", ylab="", col="cyan", border = "blue")
x2 <- seq(min(m8), max(m8),length=1000)
y2 <- dnorm(x= x2, mean=df, sd=sqrt(df*2/8))
lines(x2, y2, lty=2, lwd=2, col="red" )
hist(m32, xlim=c(1.5, 4.5), prob=T, main="ํ๋ณธ์ ๊ฐ์ : 32๊ฐ",
xlab="x", ylab="", col="cyan", border = "blue")
x3 <- seq(min(m32), max(m32),length=1000)
y3 <- dnorm(x= x3, mean=df, sd=sqrt(df*2/32))
lines(x3, y3, lty=2, lwd=2, col="red" )
hist(m64, xlim=c(2, 4), ylim=c(0,1.35), prob=T, main="ํ๋ณธ์ ๊ฐ์ : 64๊ฐ",
xlab="x", ylab="", col="cyan", border = "blue")
x4 <- seq(min(m64), max(m64),length=1000)
y4 <- dnorm(x= x4, mean=df, sd=sqrt(df*2/64))
lines(x4, y4, lty=2, lwd=2, col="red" )
par(mfrow = c(1, 1))
|
#Program to count inspection violations data by block group and year (reshape needed)
#Output to CSV for joining with 'MetroBostonBG.shp' shapefile and inputing into PostgreSQL
# Libraries
library(RPostgreSQL)
library(gdata)
#Path
setwd('/home/tom/dataswap_tax/analysis')
# Connect to database
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv, host="66.228.36.34",user='dataswap',password = 'taxtaxtax',dbname="dataswap", port="5432")
#Get variables by year
query = dbSendQuery(con, paste("select geoid10, violationdate, description from joined.inspection_violations"))
df = fetch(query, n = -1); dbClearResult(query)
df$year <- substr(df[,2], 1,4) #creating a 'year' variable for aggregation
#Aggregate and save dataframe
countval = aggregate(df, by = list(df$geoid10, df$year), FUN = length)
countval$geoid10 <- countval$description <- countval$year <- NULL
countval = rename.vars(countval, from = c('Group.1','Group.2','violationdate'), to = c('GEOID10','year','violation_count'))
attach(countval)
countval_sorted <-countval[order(GEOID10,year),]
detach(countval)
row.names(countval_sorted) <- NULL
#Save data as .csv
write.csv(countval_sorted, "inspections_count.csv", row.names = F)
#Write dataframe to Postgres
dbRemoveTable(con, c("aggregated","violation_inspections"))
dbWriteTable(con, c("aggregated","violation_inspections"), value=countval_sorted)
dbDisconnect(con)
|
/analysis/aggregated_inspections.R
|
permissive
|
mjrouser/dataswapping
|
R
| false
| false
| 1,399
|
r
|
#Program to count inspection violations data by block group and year (reshape needed)
#Output to CSV for joining with 'MetroBostonBG.shp' shapefile and inputing into PostgreSQL
# Libraries
library(RPostgreSQL)
library(gdata)
#Path
setwd('/home/tom/dataswap_tax/analysis')
# Connect to database
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv, host="66.228.36.34",user='dataswap',password = 'taxtaxtax',dbname="dataswap", port="5432")
#Get variables by year
query = dbSendQuery(con, paste("select geoid10, violationdate, description from joined.inspection_violations"))
df = fetch(query, n = -1); dbClearResult(query)
df$year <- substr(df[,2], 1,4) #creating a 'year' variable for aggregation
#Aggregate and save dataframe
countval = aggregate(df, by = list(df$geoid10, df$year), FUN = length)
countval$geoid10 <- countval$description <- countval$year <- NULL
countval = rename.vars(countval, from = c('Group.1','Group.2','violationdate'), to = c('GEOID10','year','violation_count'))
attach(countval)
countval_sorted <-countval[order(GEOID10,year),]
detach(countval)
row.names(countval_sorted) <- NULL
#Save data as .csv
write.csv(countval_sorted, "inspections_count.csv", row.names = F)
#Write dataframe to Postgres
dbRemoveTable(con, c("aggregated","violation_inspections"))
dbWriteTable(con, c("aggregated","violation_inspections"), value=countval_sorted)
dbDisconnect(con)
|
shinyUI(navbarPage("Random Forest",
tabPanel("About",
h4("Introduction"),
hr(),
p("This application use random forest as machine learning algorithm
to study the highly linear data set Longley. In this shiny app,
firstly, the data set is discussed. Secondly, random forest parameter
mtry is discussed. Lastly, parameters mtry and ntree are discussed."),
hr(),
p(strong("Please click the bar above to get details of topic below.")),
hr(),
h4("Data"),
p("Data set (longley) is discussed with pairwise plot between variables.
In addition, linear regression is also performanced to show high
collinearity of the data set."),
hr(),
h4("mtry"),
p("One important parameter to build random forest model is mtry, nubmer
of variables to split at each node. In order to pick the best one, mtry
will be scanned. "),
hr(),
h4("Plot"),
p("Two parameters ntree and mtry can be changed to show the difference of
results in the plot.")
),
tabPanel("Data",
sidebarLayout(
sidebarPanel(
h3("Data Description")
),
mainPanel(
h3("Longley's Economic Regression Data"),
p("A macroeconomic data set which provides a
well-known example for a highly collinear regression."),
p("It is a data frame with 7 economical variables, observed
yearly from 1947 to 1962 (n=16). The figure below shows the
pairwise plot between variables and it is known to be",
strong("highly collinear"),"."),
plotOutput("pairplot"),
p("Linear regression by ",code("lm(Employed ~ ., data=longley)"),
"results below can also see the highly collinear of the data"),
hr(),
verbatimTextOutput("lmsum"),
hr(),
p(strong("For high collinear data set, Random Forest is good algorithm due
to random select subset of variables to build each tree.")),
hr(),
h3("Source"),
p("J. W. Longley (1967) An appraisal of least-squares programs
from the point of view of the user.
Journal of the American Statistical Association 62, 819โ841.
"),
h3("References"),
p("Becker, R. A., Chambers, J. M. and Wilks, A. R. (1988)
The New S Language. Wadsworth & Brooks/Cole.")
)
)
),
tabPanel("mtry",
sidebarLayout(
sidebarPanel(
h4("Metric for measuring performance of Random Forest"),
hr(),
radioButtons("plotScan", "Metric",
c("RMSE"="rmse", "Rsquare"="rsq")
)
),
mainPanel(
h3("mtry selelction"),
hr(),
p( strong("mtry: Number of variables randomly sampled as candidates at
each split.")),
hr(),
p("Note that the default values are different
for classification (sqrt(p) where p is number of variables in x)
and regression (p/3)."),
p("It can also be selected by evaluating
all possible mtry from 2 to number of variables in RMSE or Rsquare.",
em("caret"), "package can be used for this purpose."),
hr(),
p("The figure below shows the performance of random forest on Longley data
with different", strong("mtry"), "from 2 to 6."),
plotOutput("scanplot")
)
)
),
tabPanel("Plot",
sidebarLayout(
sidebarPanel(
h4("Metric for measuring performance of Random Forest"),
hr(),
radioButtons("plotType", "Metric",
c("MSE (mean square error)"="mse", "Rsquare"="rsq")
),
hr(),
hr(),
hr(),
h4("Two Parameters of Random Forest"),
hr(),
sliderInput("ntree",
"Number of trees:",
min = 100, max = 5000, value = 3000),
hr(),
sliderInput("mtry",
"mtry:",
min = 2, max = 6, value = 4)
),
mainPanel(
h4("The random forest performance with two parameters ntree and mtry"),
p("With ntree increasing, the performance of random forest will converage."),
plotOutput("plot")
)
)
)
))
|
/app/ui.R
|
no_license
|
courseracheng/DataProductProject
|
R
| false
| false
| 4,690
|
r
|
shinyUI(navbarPage("Random Forest",
tabPanel("About",
h4("Introduction"),
hr(),
p("This application use random forest as machine learning algorithm
to study the highly linear data set Longley. In this shiny app,
firstly, the data set is discussed. Secondly, random forest parameter
mtry is discussed. Lastly, parameters mtry and ntree are discussed."),
hr(),
p(strong("Please click the bar above to get details of topic below.")),
hr(),
h4("Data"),
p("Data set (longley) is discussed with pairwise plot between variables.
In addition, linear regression is also performanced to show high
collinearity of the data set."),
hr(),
h4("mtry"),
p("One important parameter to build random forest model is mtry, nubmer
of variables to split at each node. In order to pick the best one, mtry
will be scanned. "),
hr(),
h4("Plot"),
p("Two parameters ntree and mtry can be changed to show the difference of
results in the plot.")
),
tabPanel("Data",
sidebarLayout(
sidebarPanel(
h3("Data Description")
),
mainPanel(
h3("Longley's Economic Regression Data"),
p("A macroeconomic data set which provides a
well-known example for a highly collinear regression."),
p("It is a data frame with 7 economical variables, observed
yearly from 1947 to 1962 (n=16). The figure below shows the
pairwise plot between variables and it is known to be",
strong("highly collinear"),"."),
plotOutput("pairplot"),
p("Linear regression by ",code("lm(Employed ~ ., data=longley)"),
"results below can also see the highly collinear of the data"),
hr(),
verbatimTextOutput("lmsum"),
hr(),
p(strong("For high collinear data set, Random Forest is good algorithm due
to random select subset of variables to build each tree.")),
hr(),
h3("Source"),
p("J. W. Longley (1967) An appraisal of least-squares programs
from the point of view of the user.
Journal of the American Statistical Association 62, 819โ841.
"),
h3("References"),
p("Becker, R. A., Chambers, J. M. and Wilks, A. R. (1988)
The New S Language. Wadsworth & Brooks/Cole.")
)
)
),
tabPanel("mtry",
sidebarLayout(
sidebarPanel(
h4("Metric for measuring performance of Random Forest"),
hr(),
radioButtons("plotScan", "Metric",
c("RMSE"="rmse", "Rsquare"="rsq")
)
),
mainPanel(
h3("mtry selelction"),
hr(),
p( strong("mtry: Number of variables randomly sampled as candidates at
each split.")),
hr(),
p("Note that the default values are different
for classification (sqrt(p) where p is number of variables in x)
and regression (p/3)."),
p("It can also be selected by evaluating
all possible mtry from 2 to number of variables in RMSE or Rsquare.",
em("caret"), "package can be used for this purpose."),
hr(),
p("The figure below shows the performance of random forest on Longley data
with different", strong("mtry"), "from 2 to 6."),
plotOutput("scanplot")
)
)
),
tabPanel("Plot",
sidebarLayout(
sidebarPanel(
h4("Metric for measuring performance of Random Forest"),
hr(),
radioButtons("plotType", "Metric",
c("MSE (mean square error)"="mse", "Rsquare"="rsq")
),
hr(),
hr(),
hr(),
h4("Two Parameters of Random Forest"),
hr(),
sliderInput("ntree",
"Number of trees:",
min = 100, max = 5000, value = 3000),
hr(),
sliderInput("mtry",
"mtry:",
min = 2, max = 6, value = 4)
),
mainPanel(
h4("The random forest performance with two parameters ntree and mtry"),
p("With ntree increasing, the performance of random forest will converage."),
plotOutput("plot")
)
)
)
))
|
# nocov start
make_gen_additive_reg <- function() {
parsnip::set_new_model("gen_additive_reg")
}
make_gen_additive_reg_stan <- function() {
#### REGRESION
model = "gen_additive_reg"
mode = "regression"
engine = "stan"
parsnip::set_model_engine(model = model, mode = mode, eng = engine)
parsnip::set_dependency(model = model, eng = engine, pkg = "brms")
parsnip::set_dependency(model = model, eng = engine, pkg = "bayesmodels")
parsnip::set_model_arg(
model = model,
eng = "stan",
parsnip = "markov_chains",
original = "chains",
func = list(pkg = "bayesmodels", fun = "markov_chains"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = model,
eng = "stan",
parsnip = "chain_iter",
original = "iter",
func = list(pkg = "bayesmodels", fun = "chain_iter"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = model,
eng = "stan",
parsnip = "warmup_iter",
original = "warmup",
func = list(pkg = "bayesmodels", fun = "warmup_iter"),
has_submodel = FALSE
)
parsnip::set_encoding(
model = model,
eng = engine,
mode = mode,
options = list(
predictor_indicators = "none",
compute_intercept = FALSE,
remove_intercept = FALSE,
allow_sparse_x = FALSE
)
)
parsnip::set_fit(
model = model,
eng = engine,
mode = mode,
value = list(
interface = "formula",
protect = c("formula", "data"),
func = c(fun = "gen_additive_stan_fit_impl"),
defaults = list()
)
)
parsnip::set_pred(
model = model,
eng = engine,
mode = mode,
type = "numeric",
value = list(
pre = NULL,
post = NULL, #function(x, object) res<-tibble::as_tibble(x) %>% dplyr::pull(1) %>% as.numeric(),
func = c(fun = "predict"),
args = list(
object = rlang::expr(object$fit),
new_data = rlang::expr(new_data)
)
)
)
}
# nocov end
|
/R/parsnip-gen_additive_mod_data.R
|
no_license
|
cran/bayesmodels
|
R
| false
| false
| 2,479
|
r
|
# nocov start
make_gen_additive_reg <- function() {
parsnip::set_new_model("gen_additive_reg")
}
make_gen_additive_reg_stan <- function() {
#### REGRESION
model = "gen_additive_reg"
mode = "regression"
engine = "stan"
parsnip::set_model_engine(model = model, mode = mode, eng = engine)
parsnip::set_dependency(model = model, eng = engine, pkg = "brms")
parsnip::set_dependency(model = model, eng = engine, pkg = "bayesmodels")
parsnip::set_model_arg(
model = model,
eng = "stan",
parsnip = "markov_chains",
original = "chains",
func = list(pkg = "bayesmodels", fun = "markov_chains"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = model,
eng = "stan",
parsnip = "chain_iter",
original = "iter",
func = list(pkg = "bayesmodels", fun = "chain_iter"),
has_submodel = FALSE
)
parsnip::set_model_arg(
model = model,
eng = "stan",
parsnip = "warmup_iter",
original = "warmup",
func = list(pkg = "bayesmodels", fun = "warmup_iter"),
has_submodel = FALSE
)
parsnip::set_encoding(
model = model,
eng = engine,
mode = mode,
options = list(
predictor_indicators = "none",
compute_intercept = FALSE,
remove_intercept = FALSE,
allow_sparse_x = FALSE
)
)
parsnip::set_fit(
model = model,
eng = engine,
mode = mode,
value = list(
interface = "formula",
protect = c("formula", "data"),
func = c(fun = "gen_additive_stan_fit_impl"),
defaults = list()
)
)
parsnip::set_pred(
model = model,
eng = engine,
mode = mode,
type = "numeric",
value = list(
pre = NULL,
post = NULL, #function(x, object) res<-tibble::as_tibble(x) %>% dplyr::pull(1) %>% as.numeric(),
func = c(fun = "predict"),
args = list(
object = rlang::expr(object$fit),
new_data = rlang::expr(new_data)
)
)
)
}
# nocov end
|
install.packages("Rfacebook")
//install.packages("contrib.url")
library(devtools)
install_github("Rfacebook", "pablobarbera", subdir = "Rfacebook")
library(Rfacebook)
token <- "XXXXXXXXXXXXXX"
me <- getUsers("pablobarbera", token, private_info = TRUE)
me$name
getPage <- function(page, token, n=25, since=NULL, until=NULL, feed=FALSE, reactions=FALSE) {
url <- paste0('https://graph.facebook.com/', page, 'posts?fields=from,message,created_time,type,link,story,comments.summary(true)','likes.summary(true),shares')
if(feed) {
url <- paste0('https://graph.facebook.com/', page, '/feed?fields=from,message,created_time,type,link,story,comments.summary(true)', 'likes.summary(true),shares')
if(!is.null(until)) {
url <- paste0(url, '&until=', until)
}
if(!is.null(since)) {
url <- paste0(url, '&since=', since)
}
if(n<=25) {
url <- paste0(url, "&limit=", n)
}
content <- callAPI(url=url, token=token)
l <- length(content$data); cat(l, "posts")
error <- 0
while(length(content$error_code)>0) {
cat("Error!\n")
Sys.sleep(0.5)
error <- error + 1
content <- callAPI(url=url, token=token)
if(error==3) {
stop(content$error_msg) }
}
if(length(content$data)==0) {
message("No public posts were found")
return(data.frame())
}
df <- pageDataToDF(content$data)
if(!is.null(since)) {
dates <- formatFbDate(df$created_time, 'date')
mindate <- min(dates)
sincedate <- as.Date(since)
}
if(is.null(since)) {
sincedate <- as.Date('1970/01/01')
mindate <- as.Date(Sys.time())
}
if(n > 25) {
df.list <- list(df)
while(l < n & length(content$data) > 0) & !is.null(content$paging$`next`) & sincedate <= mindate) {
Sys.sleep(0.5)
url <- content$paging$`next`
content <- callAPI(url=url, token=token)
l <- l + length(content$data)
if(length(content$data) > 0) { cat(l, "posts") }
error <- 0
while(length(content$error_code) > 0) {
cat("Error!\n")
Sys.sleep(0.5)
error <- error + 1
content <- callAPI(url=url, token=token)
if(error==3) { stop(content$error_msg) }
}
new.df <- pageDataToDF(content$data)
df.list <- c(df.list, list(new.df))
if(!is.null(since) & nrow(new.df) > 0) {
dates <- formatFbDate(new.df$created_time, 'date')
mindate <- min(dates)
}
df <- do.call(rbind, df.list)
}
if(nrow(df) > n) {
df <- df[1:n,]
}
if(!is.null(since)) {
dates <- formatFbDate(df$created_time, 'date')
df <- df[dates>=sincedate,]
}
if(reactions == TRUE) {
re = getReactions(df$id, token=token, verbose=FALSE)
df <- merge(df, re, all.x=TRUE)
df <- df[order(df$created_time),]
}
return (df)
format.facebook.date <- function(datestring) {
date <- as.POSIXct(datestring, format = "%Y-%m-%dT%H:%M:%S+0000", tz = "GMT")
}
aggregate.metric <- function(metric) {
m <- aggregate(page[[paste0(metric, "_count")]], list(month = page$month), mean)
m$month <- as.Date(paste0(m$month, "-15"))
m$metric <- metric
return(m)
}
page$datetime <- format.facebook.date(page$created_time)
page$month <- format(page$datetime, "%Y-%m")
df.list <- lapply(c("likes", "comments", "shares"), aggregate.metric)
df <- do.call(rbind, df.list)
library(ggplot2)
library(scales)
ggplot(df, aes(x = month, y = x, group = metric)) + geom_line(aes(color = metric)) +
scale_x_date(breaks = "years", labels = date_format("%Y")) + scale_y_log10("Average count per post",
breaks = c(10, 100, 1000, 10000, 50000)) + theme_bw() + theme(axis.title.x = element_blank())
page <- getPage("DonaldTrump", token, n = 5000, since='2015/01/01', until='2015/12/31')
post_id <- head(page$id, n = 1)
post <- getPost(post_id, token, n = 1000, likes = TRUE, comments = FALSE)
}
|
/FacebookRetrieval.R
|
no_license
|
RohanGupta24/Hello-World
|
R
| false
| false
| 4,256
|
r
|
install.packages("Rfacebook")
//install.packages("contrib.url")
library(devtools)
install_github("Rfacebook", "pablobarbera", subdir = "Rfacebook")
library(Rfacebook)
token <- "XXXXXXXXXXXXXX"
me <- getUsers("pablobarbera", token, private_info = TRUE)
me$name
getPage <- function(page, token, n=25, since=NULL, until=NULL, feed=FALSE, reactions=FALSE) {
url <- paste0('https://graph.facebook.com/', page, 'posts?fields=from,message,created_time,type,link,story,comments.summary(true)','likes.summary(true),shares')
if(feed) {
url <- paste0('https://graph.facebook.com/', page, '/feed?fields=from,message,created_time,type,link,story,comments.summary(true)', 'likes.summary(true),shares')
if(!is.null(until)) {
url <- paste0(url, '&until=', until)
}
if(!is.null(since)) {
url <- paste0(url, '&since=', since)
}
if(n<=25) {
url <- paste0(url, "&limit=", n)
}
content <- callAPI(url=url, token=token)
l <- length(content$data); cat(l, "posts")
error <- 0
while(length(content$error_code)>0) {
cat("Error!\n")
Sys.sleep(0.5)
error <- error + 1
content <- callAPI(url=url, token=token)
if(error==3) {
stop(content$error_msg) }
}
if(length(content$data)==0) {
message("No public posts were found")
return(data.frame())
}
df <- pageDataToDF(content$data)
if(!is.null(since)) {
dates <- formatFbDate(df$created_time, 'date')
mindate <- min(dates)
sincedate <- as.Date(since)
}
if(is.null(since)) {
sincedate <- as.Date('1970/01/01')
mindate <- as.Date(Sys.time())
}
if(n > 25) {
df.list <- list(df)
while(l < n & length(content$data) > 0) & !is.null(content$paging$`next`) & sincedate <= mindate) {
Sys.sleep(0.5)
url <- content$paging$`next`
content <- callAPI(url=url, token=token)
l <- l + length(content$data)
if(length(content$data) > 0) { cat(l, "posts") }
error <- 0
while(length(content$error_code) > 0) {
cat("Error!\n")
Sys.sleep(0.5)
error <- error + 1
content <- callAPI(url=url, token=token)
if(error==3) { stop(content$error_msg) }
}
new.df <- pageDataToDF(content$data)
df.list <- c(df.list, list(new.df))
if(!is.null(since) & nrow(new.df) > 0) {
dates <- formatFbDate(new.df$created_time, 'date')
mindate <- min(dates)
}
df <- do.call(rbind, df.list)
}
if(nrow(df) > n) {
df <- df[1:n,]
}
if(!is.null(since)) {
dates <- formatFbDate(df$created_time, 'date')
df <- df[dates>=sincedate,]
}
if(reactions == TRUE) {
re = getReactions(df$id, token=token, verbose=FALSE)
df <- merge(df, re, all.x=TRUE)
df <- df[order(df$created_time),]
}
return (df)
format.facebook.date <- function(datestring) {
date <- as.POSIXct(datestring, format = "%Y-%m-%dT%H:%M:%S+0000", tz = "GMT")
}
aggregate.metric <- function(metric) {
m <- aggregate(page[[paste0(metric, "_count")]], list(month = page$month), mean)
m$month <- as.Date(paste0(m$month, "-15"))
m$metric <- metric
return(m)
}
page$datetime <- format.facebook.date(page$created_time)
page$month <- format(page$datetime, "%Y-%m")
df.list <- lapply(c("likes", "comments", "shares"), aggregate.metric)
df <- do.call(rbind, df.list)
library(ggplot2)
library(scales)
ggplot(df, aes(x = month, y = x, group = metric)) + geom_line(aes(color = metric)) +
scale_x_date(breaks = "years", labels = date_format("%Y")) + scale_y_log10("Average count per post",
breaks = c(10, 100, 1000, 10000, 50000)) + theme_bw() + theme(axis.title.x = element_blank())
page <- getPage("DonaldTrump", token, n = 5000, since='2015/01/01', until='2015/12/31')
post_id <- head(page$id, n = 1)
post <- getPost(post_id, token, n = 1000, likes = TRUE, comments = FALSE)
}
|
compt_pays=pays %>%
group_by(country) %>%
summarise(nb_team=n())
map_countries@data=map_countries@data %>%
mutate(COUNTRY=trimws(sapply(strsplit(as.character(COUNTRY), "\\("), function(x) x[1]))) %>%
mutate(COUNTRY=ifelse(COUNTRY=='United States','USA',COUNTRY)) %>%
left_join(compt_pays %>% rename(COUNTRY=country),by='COUNTRY') %>%
left_join(pays %>% filter(class_pays==1) %>% select(team,nb_pts,country) %>% rename(COUNTRY=country) ,by='COUNTRY')
coord_pays=NULL
for (i in 1:length(map_countries$COUNTRY)) {
temp=map_countries@polygons[[i]]@labpt
coord_pays=rbind(coord_pays,temp)
}
coord_pays = coord_pays %>%
as.data.frame() %>%
rename(lng=V1,lat=V2)
map_countries@data=map_countries@data %>%
cbind(coord_pays)
label <- paste(
"<bold> Country : <bold/> ", map_countries$COUNTRY, "<br/>",
"Nb teams : ", map_countries$nb_team, "<br/>","Best Team : ",map_countries$team ," / ",map_countries$nb_pts," pts",sep="") %>%
lapply(htmltools::HTML)
output$country_map=renderLeaflet(
leaflet(map_countries) %>% addTiles() %>%
addCircleMarkers(label = label,lat= ~ lat,lng= ~ lng,radius = ~ sqrt(nb_team*7),stroke = FALSE, fillOpacity = 0.5)
)
output$tabl_map <-
DT::renderDataTable(
DT::datatable({
map_countries@data %>%
select(COUNTRY,nb_team,team,nb_pts) %>%
arrange(desc(nb_team)) %>%
rename("Nb teams"=nb_team,"Best Team"=team,"Nb pts"=nb_pts)
}
,extensions = c(
#"FixedColumns",
"FixedHeader",
"Scroller",
"ColReorder",
"Buttons"), options = list(
columnDefs = list(list(className = 'dt-center',targets="_all")),
#scroller = TRUE,
colReorder = TRUE,
scrollX = TRUE,
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': 'rgba(255, 255, 128, .5)', 'color': 'hsl(50, 33%, 25%)'});",
"}"),
autoWidth = TRUE,
dom = 'BClfrtip',
buttons = c(I('colvis'), 'csv', 'pdf'),
#scrollY = 200,
#fixedHeader = TRUE,
#class = 'cell-border stripe',
# fixedColumns = list(
# leftColumns = 2,
# heightMatch = 'none'
# ),
paging = TRUE ,orderClasses = TRUE),rownames= FALSE, filter = list(position = 'top'))
)
|
/_SERVER/SHAPES_TEAM/SHAPES_TEAM.R
|
no_license
|
pazzini10/Shiny_Cyclisme
|
R
| false
| false
| 2,381
|
r
|
compt_pays=pays %>%
group_by(country) %>%
summarise(nb_team=n())
map_countries@data=map_countries@data %>%
mutate(COUNTRY=trimws(sapply(strsplit(as.character(COUNTRY), "\\("), function(x) x[1]))) %>%
mutate(COUNTRY=ifelse(COUNTRY=='United States','USA',COUNTRY)) %>%
left_join(compt_pays %>% rename(COUNTRY=country),by='COUNTRY') %>%
left_join(pays %>% filter(class_pays==1) %>% select(team,nb_pts,country) %>% rename(COUNTRY=country) ,by='COUNTRY')
coord_pays=NULL
for (i in 1:length(map_countries$COUNTRY)) {
temp=map_countries@polygons[[i]]@labpt
coord_pays=rbind(coord_pays,temp)
}
coord_pays = coord_pays %>%
as.data.frame() %>%
rename(lng=V1,lat=V2)
map_countries@data=map_countries@data %>%
cbind(coord_pays)
label <- paste(
"<bold> Country : <bold/> ", map_countries$COUNTRY, "<br/>",
"Nb teams : ", map_countries$nb_team, "<br/>","Best Team : ",map_countries$team ," / ",map_countries$nb_pts," pts",sep="") %>%
lapply(htmltools::HTML)
output$country_map=renderLeaflet(
leaflet(map_countries) %>% addTiles() %>%
addCircleMarkers(label = label,lat= ~ lat,lng= ~ lng,radius = ~ sqrt(nb_team*7),stroke = FALSE, fillOpacity = 0.5)
)
output$tabl_map <-
DT::renderDataTable(
DT::datatable({
map_countries@data %>%
select(COUNTRY,nb_team,team,nb_pts) %>%
arrange(desc(nb_team)) %>%
rename("Nb teams"=nb_team,"Best Team"=team,"Nb pts"=nb_pts)
}
,extensions = c(
#"FixedColumns",
"FixedHeader",
"Scroller",
"ColReorder",
"Buttons"), options = list(
columnDefs = list(list(className = 'dt-center',targets="_all")),
#scroller = TRUE,
colReorder = TRUE,
scrollX = TRUE,
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': 'rgba(255, 255, 128, .5)', 'color': 'hsl(50, 33%, 25%)'});",
"}"),
autoWidth = TRUE,
dom = 'BClfrtip',
buttons = c(I('colvis'), 'csv', 'pdf'),
#scrollY = 200,
#fixedHeader = TRUE,
#class = 'cell-border stripe',
# fixedColumns = list(
# leftColumns = 2,
# heightMatch = 'none'
# ),
paging = TRUE ,orderClasses = TRUE),rownames= FALSE, filter = list(position = 'top'))
)
|
#San Francisco Crime Classifcation
#Initialize libraries
library(MASS)
library(readr)
#library (rpart)
library(caret)
library(lubridate)
#Function to setup the reading of data
treatment = function(fname){
df = read.csv(paste0('Data/',fname))
Dates1 = strptime(as.character(df$Dates),"%Y-%m-%d %H:%M:%S")
df$Year = Dates1$year
df$Month = Dates1$mon
df$Day = as.numeric(format(ymd_hms(Dates1), "%d"))
df$Hour = as.numeric(format(ymd_hms(Dates1), "%H"))
df$Loc = as.factor(paste(round(df$X,2), round(df$Y,2), sep= " "))
df$AddOf = sapply(df$Address, FUN=function(x) {strsplit(as.character(x), split="of ")[[1]][2]})
df$AddType = as.factor(ifelse(is.na(df$AddOf ),1,2))
return(df)
}
#Reads the data
train = treatment('train.csv')
test<-treatment('test.csv')
##split train data into 10 paritions due to memory space constraint
inTrain = createDataPartition(train$Category,p=0.55,list=F)
train.sub = train[inTrain,]
#Function to predict per variable
rpart.train = function(train,test){
submission = data.frame(Id=test$Id)
response = data.frame(Cat=train$Category)
crime = as.character(unique(train$Category))
crime = sort(crime)
for (i in crime){
response[i] = 0
response[i][response$Cat==i,] = 1
fit = glm(response[,i]~PdDistrict+X+Y+AddType+DayOfWeek+Year+Hour+Month+Day,data=train, family = binomial)
pred = predict(fit,test, type = "response")
submission[i] = pred
print(paste0(ncol(submission)/length(crime)*100,'% completed'))
}
return(submission)
}
submission = rpart.train(train.sub,test)
#Saves the data
write.csv(submission, file="crimes.csv", row.names=FALSE)
|
/San Francisco Crime Classification/crime_classifier.R
|
no_license
|
Anithaponnuru/Kaggle
|
R
| false
| false
| 1,695
|
r
|
#San Francisco Crime Classifcation
#Initialize libraries
library(MASS)
library(readr)
#library (rpart)
library(caret)
library(lubridate)
#Function to setup the reading of data
treatment = function(fname){
df = read.csv(paste0('Data/',fname))
Dates1 = strptime(as.character(df$Dates),"%Y-%m-%d %H:%M:%S")
df$Year = Dates1$year
df$Month = Dates1$mon
df$Day = as.numeric(format(ymd_hms(Dates1), "%d"))
df$Hour = as.numeric(format(ymd_hms(Dates1), "%H"))
df$Loc = as.factor(paste(round(df$X,2), round(df$Y,2), sep= " "))
df$AddOf = sapply(df$Address, FUN=function(x) {strsplit(as.character(x), split="of ")[[1]][2]})
df$AddType = as.factor(ifelse(is.na(df$AddOf ),1,2))
return(df)
}
#Reads the data
train = treatment('train.csv')
test<-treatment('test.csv')
##split train data into 10 paritions due to memory space constraint
inTrain = createDataPartition(train$Category,p=0.55,list=F)
train.sub = train[inTrain,]
#Function to predict per variable
rpart.train = function(train,test){
submission = data.frame(Id=test$Id)
response = data.frame(Cat=train$Category)
crime = as.character(unique(train$Category))
crime = sort(crime)
for (i in crime){
response[i] = 0
response[i][response$Cat==i,] = 1
fit = glm(response[,i]~PdDistrict+X+Y+AddType+DayOfWeek+Year+Hour+Month+Day,data=train, family = binomial)
pred = predict(fit,test, type = "response")
submission[i] = pred
print(paste0(ncol(submission)/length(crime)*100,'% completed'))
}
return(submission)
}
submission = rpart.train(train.sub,test)
#Saves the data
write.csv(submission, file="crimes.csv", row.names=FALSE)
|
if(.Platform$OS.type != "unix"){
load("hatepolitics.rda")
load("gossiping.rda")
} else{
load("articles_big5.rds")
}
library(jiebaR)
mixseg = worker()
## clean NULL ##
for (i in c(1:length(hatepolitics))){
print(hatepolitics[[i]]$postData$Title)
if( is.null(hatepolitics[[i]]$postData$Title) ) hatepolitics[[i]]$postData$Title <- ""
}
segRes.GD <- lapply(gossiping, function(msg) mixseg <= msg$postData$Title)
segRes.HP <- lapply(hatepolitics, function(msg) mixseg <= msg$postData$Title)
paste(segRes.GD[[1]],collapse = " ")
segRes <- append(segRes.HP,segRes.GD)
library(tm)
tmWordsVec = sapply(segRes,function(ws) paste(ws,collapse = " "))
corpus <- Corpus(VectorSource(tmWordsVec))
tdm = TermDocumentMatrix(corpus,control = list(wordLengths = c(1, Inf)))
inspect(tdm)
library(rpart)
library(rpart.plot)
library(maptree)
Features = t(as.matrix(tdm))
Labels = c(rep("HP",length(segRes.HP)),rep("G",length(segRes.GD)))
df = data.frame(Y=Labels,Features)
# View(df)
model = rpart(Y~.,data=df)
draw.tree(model)
draw.tree(model, nodeinfo = TRUE, cases = "articles")
rpart.plot(model)
model
|
/MachineLearning.R
|
no_license
|
vickynaiyi/Sinopac_RCourse
|
R
| false
| false
| 1,109
|
r
|
if(.Platform$OS.type != "unix"){
load("hatepolitics.rda")
load("gossiping.rda")
} else{
load("articles_big5.rds")
}
library(jiebaR)
mixseg = worker()
## clean NULL ##
for (i in c(1:length(hatepolitics))){
print(hatepolitics[[i]]$postData$Title)
if( is.null(hatepolitics[[i]]$postData$Title) ) hatepolitics[[i]]$postData$Title <- ""
}
segRes.GD <- lapply(gossiping, function(msg) mixseg <= msg$postData$Title)
segRes.HP <- lapply(hatepolitics, function(msg) mixseg <= msg$postData$Title)
paste(segRes.GD[[1]],collapse = " ")
segRes <- append(segRes.HP,segRes.GD)
library(tm)
tmWordsVec = sapply(segRes,function(ws) paste(ws,collapse = " "))
corpus <- Corpus(VectorSource(tmWordsVec))
tdm = TermDocumentMatrix(corpus,control = list(wordLengths = c(1, Inf)))
inspect(tdm)
library(rpart)
library(rpart.plot)
library(maptree)
Features = t(as.matrix(tdm))
Labels = c(rep("HP",length(segRes.HP)),rep("G",length(segRes.GD)))
df = data.frame(Y=Labels,Features)
# View(df)
model = rpart(Y~.,data=df)
draw.tree(model)
draw.tree(model, nodeinfo = TRUE, cases = "articles")
rpart.plot(model)
model
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getAvailableGenes.R
\name{getAvailableGenes}
\alias{getAvailableGenes}
\title{Get Available Genes (helper function)}
\usage{
getAvailableGenes(pool = NULL)
}
\arguments{
\item{pool}{an object created by pool::dbPool to accessing SQL database.
It will be created if not supplied.}
}
\value{
A vector of genes with associated correlation data
}
\description{
Finds available genes within correlation data
}
\examples{
genes <- correlationAnalyzeR::getAvailableGenes()
}
|
/man/getAvailableGenes.Rd
|
no_license
|
ViolaYing/correlationAnalyzeR
|
R
| false
| true
| 547
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getAvailableGenes.R
\name{getAvailableGenes}
\alias{getAvailableGenes}
\title{Get Available Genes (helper function)}
\usage{
getAvailableGenes(pool = NULL)
}
\arguments{
\item{pool}{an object created by pool::dbPool to accessing SQL database.
It will be created if not supplied.}
}
\value{
A vector of genes with associated correlation data
}
\description{
Finds available genes within correlation data
}
\examples{
genes <- correlationAnalyzeR::getAvailableGenes()
}
|
library(yaml)
config = yaml.load_file("/Volumes/SSD2go_tw/conafor/reporting/config/database.yml")
setClass(Class="ResultSet",
representation(
result="data.frame",
module="character",
variable="character",
status="logical"
)
)
getAllVariables <- function(base) {
BaseVars = vector(mode="list", length=length(names(base)))
names(BaseVars) = names(base)
for (i in 1:length(BaseVars) ) {
BaseVars[i] = i
}
return (BaseVars)
}
|
/old/cliente_delta_carbon/tools.R
|
no_license
|
twehrmann/reporting
|
R
| false
| false
| 506
|
r
|
library(yaml)
config = yaml.load_file("/Volumes/SSD2go_tw/conafor/reporting/config/database.yml")
setClass(Class="ResultSet",
representation(
result="data.frame",
module="character",
variable="character",
status="logical"
)
)
getAllVariables <- function(base) {
BaseVars = vector(mode="list", length=length(names(base)))
names(BaseVars) = names(base)
for (i in 1:length(BaseVars) ) {
BaseVars[i] = i
}
return (BaseVars)
}
|
# 3/4/2019
#****************************************************************************************************
# Includes ####
#****************************************************************************************************
source("./r/includes/libraries.r")
source("./r/includes/globals.r")
#****************************************************************************************************
# Libraries ####
#****************************************************************************************************
library("BEAData")
library("fredr")
library("purrr")
library("ggrepel")
library("grid")
library("gridExtra")
library("broom")
#****************************************************************************************************
# Globals ####
#****************************************************************************************************
pop65p <- c("pop65_69", "pop70_74", "pop75_79", "pop80_84", "pop85plus")
#****************************************************************************************************
# Functions ####
#****************************************************************************************************
gband <- function(xmin, xmax, fill="grey", alpha=.5, ymin=-Inf, ymax=Inf) {
annotate("rect",
fill = fill,
alpha = alpha, # larger alpha is darker rectangle
xmin = xmin, xmax = xmax,
ymin = ymin, ymax = ymax)
}
#****************************************************************************************************
# Population projections data ####
#****************************************************************************************************
wpop <- readRDS("./data/popproj/wc_popproj.rds") # Weldon Cooper pop projections
ht(wpop)
count(wpop, stabbr) # 52 includes DC, US, but not PR
count(wpop, sex) # "Total"
count(wpop, year) # 2010, 2020, 2030, 2040
count(wpop, popgroup) # pop00_04, pop05_09, ..., pop80_84, pop85plus, poptot
#****************************************************************************************************
# Labor force data ####
#****************************************************************************************************
# install.packages("fredr")
# library("apitools")
#.. labor force participation by age group ----
# BLS Civilian labor force participation rates
# https://fred.stlouisfed.org/series/LNS11300060 25-54
# https://fred.stlouisfed.org/series/LNS11324230 55+
fredr_set_key(globals$fred_apikey)
rateids <- c("LNS11300060", "LNS11324230")
lfpr <- map_dfr(rateids, fredr, frequency="a") %>%
mutate(year=year(date),
vname=factor(series_id, levels=rateids, labels=c("lfpr2554", "lfpr55p"))) %>%
select(year, series_id, vname, value)
ht(lfpr)
p <- lfpr %>%
ggplot(aes(year, value, colour=vname)) +
geom_line() +
geom_point() +
scale_x_continuous(breaks=seq(1940, 2020, 5)) +
theme_bw()
capt1 <- "U.S. Bureau of Labor Statistics, Civilian Labor Force Participation Rates, retrieved from FRED, Federal Reserve Bank of St. Louis, March 5, 2019."
capt2 <- "25 to 54 years (https://fred.stlouisfed.org/series/LNS11300060)"
capt3 <- "55 years and over (https://fred.stlouisfed.org/series/LNS11324230)"
capt <- paste0("Source: ", capt1, "\n", capt2, "\n", capt3)
p <- lfpr %>%
mutate(vname=factor(vname, levels=c("lfpr2554", "lfpr55p"), labels=c("25-54", "55+"))) %>%
ggplot(aes(year, value, colour=vname)) +
geom_line(size=1.1) +
geom_point() +
scale_x_continuous(name=NULL, breaks=seq(1920, 2050, 5)) +
scale_y_continuous(name="Participation rate (%)") +
ggtitle("U.S. Civilian labor force participation rates") +
labs(caption=capt) +
labs(colour="Age group") +
theme_bw() +
theme(plot.caption = element_text(hjust=0, size=10))
p
ggsave(plot=p, filename="./results/uslfpr.png", width=8, height=6, units="in")
# p + gband(1980, 2005, alpha=.3)
#.. graph of U.S. pop growth ----
glimpse(spi.a_all)
glimpse(nipa)
uspop <- nipa %>%
filter(freq=="A", vname=="B230RC")
ht(uspop)
usgr <- uspop %>%
select(year, pop=value) %>%
mutate(lpop=lag(pop), lyear=lag(year), gr=(pop / lpop)^(1 / (year - lyear))*100 - 100)
ht(usgr)
# get projected US growth rates
ht(wpop)
pgr <- wpop %>%
filter(sex=="Total", popgroup=="poptot", stabbr=="US") %>%
mutate(pop=value / 1000, lpop=lag(pop), lyear=lag(year),
gr=(pop / lpop)^(1 / (year - lyear))*100 - 100)
pgr2 <- tibble(year=min(pgr$year):max(pgr$year)) %>%
left_join(pgr %>% select(year, gr))
ht(pgr2)
allgr <- usgr %>%
select(year, gr) %>%
bind_rows(pgr2 %>% filter(year > max(usgr$year)) %>% select(year, gr)) %>%
mutate(gr=na.approx(gr,na.rm =FALSE)) # fill in missing growth rates by interpolation
ht(allgr)
allgr %>% filter(year %in% 2010:2020)
p <- allgr %>%
mutate(proj=ifelse(year>=2019, "proj", "hist")) %>%
ggplot(aes(year, gr, linetype=proj)) +
geom_line(size=1.1, colour="blue") +
# scale_colour_manual(values=c("blue", "green")) +
scale_x_continuous(name=NULL, breaks=seq(1920, 2050, 10)) +
scale_y_continuous(name="Annualized growth rate", limits=c(0, NA)) +
geom_hline(yintercept = 1, linetype="dashed") +
geom_hline(yintercept = 0.5, linetype="dashed") +
theme_bw()
p + gband(1946, 1964, alpha=.4)
capt1 <- "2018 and earlier: U.S. Bureau of the Census, obtained from Bureau of Economic analysis variable B230RC"
capt2 <- "2019+: Author's interpolation of projections from the University of Virginia, Weldon Cooper Center for Public Service"
capt <- paste0(capt1, "\n", capt2)
p <- allgr %>%
filter(!year %in% c(2021:2029, 2031:2039)) %>%
ggplot(aes(year, gr)) +
geom_line(size=1.1, colour="blue", data=. %>% filter(year <= 2020)) +
geom_curve(aes(x = 2020, y = gr[year==2020],
xend = 2030, yend = gr[year==2030]),
data = allgr, size=1.1, linetype="dotted", colour="blue", curvature=0) +
geom_curve(aes(x = 2030, y = gr[year==2030],
xend = 2040, yend = gr[year==2040]),
data = allgr, size=1.1, linetype="dotted", colour="blue", curvature=0) +
scale_x_continuous(name=NULL, breaks=seq(1920, 2050, 10)) +
scale_y_continuous(name="Percentage change from prior year", limits=c(0, NA)) +
geom_hline(yintercept = 1, linetype="dashed") +
geom_hline(yintercept = 0.5, linetype="dashed") +
ggtitle("U.S. Population growth rate") +
labs(caption=capt) +
theme_bw() +
theme(plot.caption = element_text(hjust=0, size=10))
p2 <- p + gband(1946, 1964, alpha=.4) +
annotate("text", x=1949, y=0.7,
hjust=0,
vjust=0,
label="Baby boom\nbirth years")
# 1946 โ 1964 baby boom birth years
p2
ggsave(plot=p2, filename="./results/uspopgr.png", width=8, height=6, units="in")
#****************************************************************************************************
# SOI ####
#****************************************************************************************************
# D:\Data\bdata_package_sourcedata\soi
data(package="bdata")
glimpse(soiall)
count(soiall, vname)
count(soiall, year)
# retirement income: iradist, txblpension, txblsocsec
count(soiall, incgrp)
# quick check on growth rates
vars <- c("agi", "wages", "iradist", "txblpension", "txblsocsec", "txblinterest", "netcgll", "busprofinc", "odividends")
soi <- soiall %>%
filter(stabbr=="US", incgrp=="all", vname %in% vars) %>%
select(year, stabbr, vname, value) %>%
spread(vname, value) %>%
mutate(retinc=iradist + txblpension + txblsocsec) %>%
gather(vname, value, -year, -stabbr) %>%
group_by(vname) %>%
arrange(year) %>%
mutate(pch=value / lag(value) * 100 - 100)
soi
# Create a table of US retirement income
soi %>% filter(stabbr=="US", year==2016) %>%
write_csv("./results/agius2016.csv")
df %>% select(-stabbr, -value) %>%
spread(vname, pch)
df %>% filter(vname %in% c("wages", "retinc")) %>%
ggplot(aes(year, pch, colour=vname)) +
geom_line() +
geom_point()
df %>% filter(vname %in% c("wages", "retinc")) %>%
select(-pch) %>%
spread(vname, value) %>%
mutate(share=retinc / (retinc + wages) * 100) %>%
ggplot(aes(year, share)) +
geom_line()
df <- soiall %>% filter(incgrp=="all", vname %in% vars) %>%
select(year, stabbr, vname, value) %>%
spread(vname, value) %>%
mutate(retinc=irapay + txblpension + txblsocsec,
share=retinc / (retinc + wages) * 100)
df %>% filter(year %in% c(2004, 2014)) %>%
select(stabbr, year, share) %>%
spread(year, share) %>%
mutate(change=`2014` - `2004`) %>%
arrange(-change)
df %>% filter(stabbr %in% c("US", "DE", "MI", "OK", "ND")) %>%
ggplot(aes(year, share, colour=stabbr)) +
geom_line()
#.. State tables on retirement income ---
#****************************************************************************************************
# ONETIME: CPS - create perdf - person data frame ####
#****************************************************************************************************
# March supplement as obtained from Census
# General info https://www.census.gov/programs-surveys/saipe/guidance/model-input-data/cpsasec.html
# ftp download
# State tax variables discussed at http://answers.popdata.org/Negative-values-fedtax-statetax-q1469058.aspx
cpsdir <- "D:/Data/CensusCPS/splitfiles/"
stabfips <- function(fips){
stabbr <- stcodes$stabbr[match(fips, as.character(stcodes$stfips))] %>%
as.character
return(stabbr)
}
# households - good for 2015, 2016, 2017, 2018
hvars <- read_csv("vname, start, end, type
hseq, 2, 6, i
gestfips, 42, 43, c")
hvars
gethh <- function(year, hvars) {
hhfn <- paste0(cpsdir, "asec", year, "_hh.dat")
hhdf <- read_fwf(hhfn, fwf_positions(hvars$start, hvars$end, col_names=hvars$vname),
col_types = paste0(hvars$type, collapse = ""),
n_max=-1)
return(hhdf)
}
# hh2015 <- gethh(2015, hvars) %>%
# mutate(year=2015,
# stabbr=stabfips(gestfips))
# glimpse(hh2015)
# count(hh2015, gestfips, stabbr)
#
# hh2016 <- gethh(2016, hvars) %>%
# mutate(year=2016,
# stabbr=stabfips(gestfips))
# glimpse(hh2016)
# count(hh2016, gestfips, stabbr)
hh2017 <- gethh(2017, hvars) %>%
mutate(year=2017,
stabbr=stabfips(gestfips))
glimpse(hh2017)
count(hh2017, gestfips, stabbr)
hh2018 <- gethh(2018, hvars) %>%
mutate(year=2018,
stabbr=stabfips(gestfips))
glimpse(hh2018)
count(hh2018, gestfips, stabbr)
# persons - also good for 2015, 2016, 2017
# ptotval Total persons income
# D FILESTAT 1 733 (1:6)
# Tax Filer status
# V 1 .Joint, both <65
# V 2 .Joint, one <65 & one 65+
# V 3 .Joint, both 65+
# V 4 .Head of household
# V 5 .Single
# V 6 .Nonfiler
# D STATETAX_AC 6 790 (-9999:999999)
# State income tax liability, after all
# credits
# V 0 .None
# V -9999 - .Dollar amount
# V 999999 .
pvars <- read_csv("vname, start, end, type
hseq, 2, 6, i
age, 19, 20, i
sex, 24, 24, i
marsupwt, 155, 162, d
ptotval, 580, 587, d
filestat, 733, 733, i
statetax_ac, 790, 795, d
")
pvars
getp <- function(year, pvars){
perfn <- paste0(cpsdir, "asec", year, "_person.dat")
perdf <- read_fwf(perfn, fwf_positions(pvars$start, pvars$end, col_names=pvars$vname),
col_types = paste0(pvars$type, collapse = ""),
n_max=-1)
return(perdf)
}
# p2015 <- getp(2015, pvars) %>%
# mutate(year=2015,
# marsupwt=marsupwt / 100,
# stabbr=hh2015$stabbr[match(hseq, hh2015$hseq)])
# glimpse(p2015)
#
# p2016 <- getp(2016, pvars) %>%
# mutate(year=2016,
# marsupwt=marsupwt / 100,
# stabbr=hh2016$stabbr[match(hseq, hh2016$hseq)])
# glimpse(p2016)
p2017 <- getp(2017, pvars) %>%
mutate(year=2017,
marsupwt=marsupwt / 100,
stabbr=hh2017$stabbr[match(hseq, hh2017$hseq)])
glimpse(p2017)
p2018 <- getp(2018, pvars) %>%
mutate(year=2018,
marsupwt=marsupwt / 100,
stabbr=hh2018$stabbr[match(hseq, hh2018$hseq)])
glimpse(p2018)
# create a combined file
# put real tax in 2015 dollars -- that is, the 2016 file has 2015 $ (tax in the prior year)
# so adjust the 2015-file amount forward from 2014$ to 2015$
# the CPI-U was up only 0.1% between 2014 and 2015!
# it was up 2.1% from 2016 to 2017
perdf <- bind_rows(p2017, p2018) %>%
mutate(rincome=ifelse(year==2017, ptotval * 1.021, ptotval),
rtax=ifelse(year==2017, statetax_ac * 1.021, statetax_ac))
glimpse(perdf)
saveRDS(perdf, "./data/perdf.rds")
#****************************************************************************************************
# CPS - get/analyze perdf - person data frame ####
#****************************************************************************************************
#.. graph pop distribution -- density ----
perdf <- readRDS("./data/perdf.rds")
glimpse(perdf)
popsum <- perdf %>%
group_by(stabbr, age) %>%
summarise(pop=sum(marsupwt) / 2)
usvals <- popsum %>%
ungroup %>%
mutate(stabbr="US", weight = pop/sum(pop))
allpop <- popsum %>%
bind_rows(usvals) %>%
ungroup
sts <- c("US", "NH", "TX")
stnames <- c("U.S.", "New Hampshire", "Texas")
p <- allpop %>%
filter(stabbr %in% sts) %>%
mutate(stabbr=factor(stabbr,
levels=sts,
labels=stnames)) %>%
group_by(stabbr) %>%
mutate(weight = pop/sum(pop)) %>%
ggplot(aes(age, weight = weight, colour=stabbr)) +
geom_line(stat="density", size=1) +
# get rid of the box in the legend by using show.legend=FALSE and then stat_density
# geom_density(size=1, show.legend = FALSE) +
# stat_density(aes(x=age, colour=stabbr),
# geom="line",
# position="identity", size=1) +
# geom_hline(yintercept=0, colour="white", size=1) +
scale_x_continuous(name="Age", breaks=seq(0, 100, 5)) +
scale_y_continuous(name="Density", breaks=seq(0, 1, .001)) +
theme_bw() +
labs(colour=NULL) +
ggtitle("Age distribution of population, selected states")
p
ggsave(p, file=paste0("./results/popdist_density.png"), width=10, height=6, units="in")
# stat_density(geom="line", size=1.5)
allpop %>%
filter(stabbr %in% c("US", "ME", "TX", "FL")) %>%
group_by(stabbr) %>%
mutate(weight = pop/sum(pop)) %>%
ggplot(aes(age, y = ..density.., weight = weight, fill=stabbr)) +
geom_histogram(size=1.5, bins=20) +
facet_wrap(~stabbr, ncol=1)
count(perdf, stabbr, year) %>%
spread(year, n) %>%
mutate(diff=`2018` - `2017`, pdiff=diff / `2017` * 100) %>%
arrange(desc(pdiff)) %>%
ht
perdf %>% filter(age>=18, age<65) %>% # even here we have half paying no state tax!
group_by(year) %>%
do(qtiledf(.$rtax))
perdf %>% filter(age>=18, !stabbr %in% globals$nonpit_states) %>% # still half paying no state tax!
group_by(year) %>%
do(qtiledf(.$rtax))
# D FILESTAT 1 733 (1:6)
# Tax Filer status
# V 1 .Joint, both <65
# V 2 .Joint, one <65 & one 65+
# V 3 .Joint, both 65+
# V 4 .Head of household
# V 5 .Single
# V 6 .Nonfiler
perdf %>% filter(age >= 18, age < 65, !stabbr %in% globals$nonpit_states) %>%
group_by(filestat) %>%
do(qtiledf(.$rtax))
ibrks <- c(-1e9, 0, 10e3, 25e3, 50e3, 100e3, 1e9)
perdf %>% filter(age >= 18, !stabbr %in% globals$nonpit_states) %>%
mutate(ygroup=cut(rincome, ibrks)) %>%
group_by(ygroup) %>%
do(qtiledf(.$rtax))
# get weighted mean rtax by age groups
abrks <- c(-100, 0, 18, 25, 35, 45, 65, 1e9)
# counts
perdf %>% filter(age > 16) %>%
mutate(agegrp=cut(age, breaks=abrks)) %>%
group_by(stabbr, agegrp) %>%
summarise(n=n()) %>%
spread(agegrp, n)
tmp <- perdf %>% filter(age > 16) %>%
mutate(agegrp=cut(age, breaks=abrks)) %>%
group_by(stabbr, agegrp) %>%
summarise(n=sum(marsupwt) / 1e6,
rtax=weighted.mean(rtax, marsupwt, na.rm=TRUE))
tmp
tmp2 <- tmp %>% select(-n) %>%
group_by(stabbr) %>%
spread(agegrp, rtax) %>%
mutate(ratio=`(65,1e+09]` / `(45,65]`,
diff=`(65,1e+09]` - `(45,65]`)
tmp2 %>% arrange(ratio)
tmp2 %>% arrange(diff)
# Now do age-tax profiles (regressions)
mod2 <- perdf %>% filter(age >= 18, stabbr=="MS") %>%
mutate(age2=age^2, age3=age^3, rincome2=rincome^2) %>%
lm(rtax ~ age + age2 + age3 + rincome + rincome2, data=.)
summary(mod2)
mod <- perdf %>% filter(age>=18, stabbr=="MS") %>%
mutate(age2=age^2, age3=age^3) %>%
lm(rtax ~ age + age2 + age3, data=.)
summary(mod)
tibble(age=18:75) %>%
mutate(rtax=coef(mod)["age"]*age +
coef(mod)["age2"]*age^2 +
coef(mod)["age3"]*age^3,
rtax=rtax - rtax[age==18]) %>%
ggplot(aes(age, rtax)) +
geom_line() +
scale_x_continuous(breaks=seq(0, 100, 5)) +
scale_y_continuous(breaks=seq(-1000, 2000, 100))
perdf %>% filter(stabbr=="MD", age >= 18) %>%
group_by(age) %>%
summarise(rtax=weighted.mean(rtax, marsupwt, na.rm=TRUE)) %>%
ggplot(aes(age, rtax)) +
geom_line() +
scale_x_continuous(breaks=seq(0, 100, 5)) +
scale_y_continuous(breaks=seq(-1000, 10000, 100))
st <- "MD"
byage <- perdf %>% filter(age>=18, stabbr==st) %>%
group_by(age) %>%
summarise(rtax=weighted.mean(rtax, marsupwt, na.rm=TRUE),
rincome=weighted.mean(rincome, marsupwt, na.rm=TRUE))
byage %>%
ggplot(aes(age, rincome)) +
geom_line() +
scale_x_continuous(breaks=seq(0, 100, 5)) +
scale_y_continuous(breaks=seq(-10e3, 500e3, 10e3)) +
ggtitle(st)
st <- "CA"
mod <- perdf %>% filter(age>=18, stabbr==st) %>%
mutate(age2=age^2, age3=age^3, age4=age^4) %>%
lm(rtax ~ age + age2 + age3 + age4, data=.)
summary(mod)
tibble(age=18:75) %>%
mutate(rtax.est=coef(mod)["(Intercept)"] +
coef(mod)["age"]*age +
coef(mod)["age2"]*age^2 +
coef(mod)["age3"]*age^3 +
coef(mod)["age4"]*age^4) %>%
left_join(byage) %>%
gather(variable, value, -age) %>%
ggplot(aes(age, value, colour=variable)) +
geom_line() +
scale_x_continuous(breaks=seq(0, 100, 5)) +
scale_y_continuous(breaks=seq(-1000, 10000, 100)) +
ggtitle(st)
#****************************************************************************************************
# Construct CPS age-tax profiles ####
#****************************************************************************************************
# Now do age-tax profiles (regressions) for real ####
# perdf
# perdf2 <- perdf %>% filter(!(stabbr=="MD" & year==2016)) # remove bad data
# saveRDS(perdf2, "./data/perdf2.rds")
# perdf2 <- readRDS("./data/perdf2.rds")
#.. stage -- state age analysis build the data ----
stage.st <- perdf %>%
filter(age>=18) %>%
group_by(stabbr, age) %>%
summarise(rtax=weighted.mean(rtax, marsupwt, na.rm=TRUE),
rincome=weighted.mean(rincome, marsupwt, na.rm=TRUE))
stage.us <- perdf %>%
filter(age>=18) %>%
group_by(age) %>%
summarise(rtax=weighted.mean(rtax, marsupwt, na.rm=TRUE),
rincome=weighted.mean(rincome, marsupwt, na.rm=TRUE)) %>%
mutate(stabbr="US")
stage <- bind_rows(stage.st, stage.us)
st <- "CA"
#.. build the model ----
f <- function(df){
mod <- df %>%
filter(age >= 18) %>%
mutate(age2=age^2, age3=age^3, age4=age^4) %>%
lm(rtax ~ age + age2 + age3 + age4, data=.)
c.df <- as_tibble(t(coef(mod)))
vnames <- names(c.df)
vnames[1] <- "intercept"
vnames <- paste0("tx.", vnames)
c.df <- c.df %>% setNames(vnames)
return(c.df)
}
tx.us <- perdf %>% do(f(.)) %>% mutate(stabbr="US")
tx.df <- perdf %>% group_by(stabbr) %>%
do(f(.)) %>%
bind_rows(tx.us)
g <- function(df){
mod <- df %>%
filter(age >= 18) %>%
mutate(age2=age^2, age3=age^3, age4=age^4) %>%
lm(rincome ~ age + age2 + age3 + age4, data=.)
c.df <- as_tibble(t(coef(mod)))
vnames <- names(c.df)
vnames[1] <- "intercept"
vnames <- paste0("inc.", vnames)
c.df <- c.df %>% setNames(vnames)
return(c.df)
}
inc.us <- perdf %>% do(g(.)) %>% mutate(stabbr="US")
inc.df <- perdf %>% group_by(stabbr) %>%
do(g(.)) %>%
bind_rows(inc.us)
st2 <- stage %>%
left_join(tx.df) %>%
left_join(inc.df) %>%
mutate(rtax.est=tx.intercept +
tx.age * age +
tx.age2 * age^2 +
tx.age3 * age^3 +
tx.age4 * age^4,
rincome.est=inc.intercept +
inc.age * age +
inc.age2 * age^2 +
inc.age3 * age^3 +
inc.age4 * age^4) %>%
select(stabbr, age, rtax, rincome, rtax.est, rincome.est) %>%
gather(variable, value, -stabbr, -age)
#.. look at the profiles ----
st2
#.. set up graph info ----
cps_src <- "Source: Annual Social and Economic (ASEC) supplement to the Current Population Survey, pooled 2017 and 2018"
note <- "Smoothed values estimated as a 4th degree polynomial of age"
capt <- paste0(cps_src, "\n", note)
#.. graphs of income and tax by age, US, 2 panels ----
p1 <- st2 %>%
filter(stabbr=="US", str_sub(variable, 1, 4)=="rinc") %>%
mutate(varf=factor(variable, levels=c("rincome", "rincome.est"), labels=c("Actual", "Smoothed"))) %>%
ggplot(aes(x=age, y=value, colour=varf)) +
theme_bw() +
geom_line(size=rel(1.3)) +
geom_point() +
scale_x_continuous(name="Age", breaks=seq(0, 100, 5)) +
scale_y_continuous(name="Income", breaks=seq(0, 200e3, 10e3), limits=c(0, NA), labels=scales::dollar) +
scale_colour_manual(values=c("darkgreen", "blue")) +
guides(colour=guide_legend(title=NULL)) +
ggtitle("Average income by age in the United States", subtitle="2017 dollars") +
labs(caption=capt) +
theme(plot.title = element_text(size=rel(1.5), face="bold")) +
theme(plot.subtitle = element_text(size=rel(1), face="bold")) +
theme(axis.title = element_text(face="bold", size=rel(1))) +
theme(axis.text = element_text(face="bold", size=rel(1))) +
theme(plot.caption = element_text(hjust=0, size=rel(.8)))
p1
p2 <- st2 %>%
filter(stabbr=="US", str_sub(variable, 1, 4)=="rtax") %>%
mutate(varf=factor(variable, levels=c("rtax", "rtax.est"), labels=c("Actual", "Smoothed"))) %>%
ggplot(aes(x=age, y=value, colour=varf)) +
theme_bw() +
geom_line(size=rel(1.3)) +
geom_point() +
scale_x_continuous(name="Age", breaks=seq(0, 100, 5)) +
scale_y_continuous(name="State income tax", breaks=seq(0, 50e3, 200), limits=c(0, NA), labels=scales::dollar) +
scale_colour_manual(values=c("darkgreen", "blue")) +
guides(colour=guide_legend(title=NULL)) +
ggtitle("Average state income tax by age in the United States", subtitle="2017 dollars") +
labs(caption=capt) +
theme(plot.title = element_text(size=rel(1.5), face="bold")) +
theme(plot.subtitle = element_text(size=rel(1), face="bold")) +
theme(axis.title = element_text(face="bold", size=rel(1))) +
theme(axis.text = element_text(face="bold", size=rel(1))) +
theme(plot.caption = element_text(hjust=0, size=rel(.8)))
p2
ml <- marrangeGrob(list(p1, p2), nrow=2, ncol=1, top=NULL)
ml
ggsave("./results/income_and_tax_byage.png", ml, width=8, height=11, units="in")
#.. graphs of income and tax by age, case study states ####
sts <- c(globals$case_study_states, "US"); clrs <- c('#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c', "black")
sts <- c(globals$case_study_states); clrs <- c('#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c')
p1 <- st2 %>%
filter(stabbr %in% sts, variable=="rincome.est") %>%
ungroup %>%
mutate(stabbr=factor(stabbr, levels=sts)) %>%
ggplot(aes(x=age, y=value, colour=stabbr)) +
theme_bw() +
geom_line(size=rel(1.3)) +
scale_x_continuous(name="Age", breaks=seq(0, 100, 5)) +
scale_y_continuous(name="Income", breaks=seq(0, 200e3, 10e3), limits=c(0, NA), labels=scales::dollar) +
scale_colour_manual(values=clrs) +
guides(colour=guide_legend(title=NULL)) +
ggtitle("Average income by age in case study states", subtitle="2017 dollars") +
labs(caption=capt) +
theme(plot.title = element_text(size=rel(1.3), face="bold")) +
theme(plot.subtitle = element_text(size=rel(1), face="bold")) +
theme(axis.title = element_text(face="bold", size=rel(1))) +
theme(axis.text = element_text(face="bold", size=rel(1))) +
theme(plot.caption = element_text(hjust=0, size=rel(.8)))
p1
p2 <- st2 %>% filter(stabbr %in% sts, variable=="rtax.est") %>%
ungroup %>%
mutate(stabbr=factor(stabbr, levels=sts)) %>%
ggplot(aes(x=age, y=value, colour=stabbr)) +
theme_bw() +
geom_line(size=rel(1.3)) +
scale_x_continuous(name="Age", breaks=seq(0, 100, 5)) +
scale_y_continuous(name="State income tax", breaks=seq(0, 50e3, 200), limits=c(0, NA), labels=scales::dollar) +
scale_colour_manual(values=clrs) +
guides(colour=guide_legend(title=NULL)) +
ggtitle("Average state income tax by age in case study states", subtitle="2017 dollars") +
labs(caption=capt) +
theme(plot.title = element_text(size=rel(1.3), face="bold")) +
theme(plot.subtitle = element_text(size=rel(1), face="bold")) +
theme(axis.title = element_text(face="bold", size=rel(1))) +
theme(axis.text = element_text(face="bold", size=rel(1))) +
theme(plot.caption = element_text(hjust=0, size=rel(.8)))
p2
ml <- marrangeGrob(list(p1, p2), nrow=2, ncol=1, top=NULL)
ml
# use one or the other of these
ggsave("./results/income_and_tax_byage_css_xus.png", ml, width=8, height=11, units="in")
ggsave("./results/income_and_tax_byage_css.png", ml, width=8, height=11, units="in")
#.. DON'T NEED graphs of income and tax by age indexed to age 50, individual case study states ####
# sts <- c(globals$case_study_states, "US")
# st <- sts[1]
# p1 <- st2 %>%
# filter(stabbr %in% sts, variable %in% c("rincome.est", "rtax.est")) %>%
# mutate(variable=factor(variable, levels=c("rincome.est", "rtax.est"), labels=c("Income", "State income tax"))) %>%
# group_by(stabbr, variable) %>%
# mutate(ivalue=value / value[age==50] * 100) %>%
# ungroup %>%
# ggplot(aes(x=age, y=ivalue, colour=variable)) +
# theme_bw() +
# geom_line(size=rel(1.3)) +
# geom_hline(yintercept = 100, linetype="dashed") +
# scale_x_continuous(name="Age", breaks=seq(0, 100, 5)) +
# scale_y_continuous(name="Indexed value (age 50=100)", breaks=seq(0, 100, 10), limits=c(0, NA)) +
# scale_colour_manual(values=c("blue", "darkgreen")) +
# guides(colour=guide_legend(title=NULL)) +
# ggtitle("Average income and state income tax by age", subtitle="Indexed to values at age 50") +
# labs(caption=capt) +
# theme(plot.title = element_text(size=rel(1.3), face="bold")) +
# theme(plot.subtitle = element_text(size=rel(1), face="bold")) +
# theme(axis.title = element_text(face="bold", size=rel(1))) +
# theme(axis.text = element_text(face="bold", size=rel(1))) +
# theme(plot.caption = element_text(hjust=0, size=rel(.8)))
# p1 + facet_wrap(~stabbr)
# ggsave("./results/income_and_tax_byage_indexed.png", p1, width=8, height=11, units="in")
#.. DON'T NEED graph of pch in avg tax vs avg income, from 2nd-top to top income range ----
# abrks <- c(-100, 0, 18, 25, 35, 45, 65, 1e9)
# pchdf <- perdf %>% filter(age>=18, !stabbr %in% globals$nonpit_states) %>%
# mutate(agegrp=cut(age, breaks=abrks)) %>%
# group_by(stabbr, agegrp) %>%
# summarise(rtax=weighted.mean(rtax, marsupwt, na.rm=TRUE),
# rincome=weighted.mean(rincome, marsupwt, na.rm=TRUE)) %>%
# filter(agegrp %in% c("(45,65]", "(65,1e+09]")) %>%
# gather(variable, value, rtax, rincome) %>%
# spread(agegrp, value) %>%
# mutate(pch=`(65,1e+09]` / `(45,65]` - 1) %>%
# select(stabbr, variable, pch) %>%
# spread(variable, pch) %>%
# mutate(diff=rtax - rincome)
#
# uspch <- pchdf %>% ungroup %>%
# summarise(rincome=median(rincome), rtax=median(rtax), diff=median(diff))
#
#
# p <- pchdf %>% mutate(stype=ifelse(stabbr %in% globals$case_study_states, "Case study\nstates", "Other states"),
# lsize=ifelse(stype=="Other states", 3, 4)) %>%
# ggplot(aes(x=rincome, y=rtax, label=stabbr)) +
# theme_bw() +
# # geom_text(colour="blue", size=3, position=position_jitter(width=.004, height=.004)) +
# # geom_text(aes(colour=stype, size=stype), fontface = "bold", position=position_nudge(x=.002, y=.002)) +
# geom_text_repel(aes(colour=stype, size=stype), fontface = "bold", point.padding = NA) +
# #geom_text(aes(colour=stype, size=stype), fontface = "bold") +
# scale_x_continuous(name="Percent change in income (greater declines are to left)", labels = scales::percent,
# breaks=seq(-1, 1, .05)) +
# scale_y_continuous(name="Percent change in state income tax (greater declines are lower)",
# labels = scales::percent,
# breaks=seq(-1, 1, .05)) +
# scale_colour_manual(values=c("darkred", "blue", "#636363")) +
# scale_size_manual(values=c(3.5, 3.5, 3)) +
# geom_hline(yintercept=uspch$rtax) +
# geom_vline(xintercept=uspch$rincome) +
# ggtitle(label="Decline in income and in state income tax, age 65+ cohort relative to age 45-64 cohort",
# subtitle="Horizontal and vertical lines show United States medians") +
# labs(caption="\nSource: Annual Social and Economic (ASEC) supplement to the Current Population Survey, pooled 2017 and 2018") +
# theme(plot.title = element_text(size=rel(1.3), face="bold")) +
# theme(plot.subtitle = element_text(size=rel(1), face="bold")) +
# theme(axis.title = element_text(face="bold", size=rel(1))) +
# theme(axis.text = element_text(face="bold", size=rel(1))) +
# theme(plot.caption = element_text(hjust=0, size=8)) +
# guides(colour=guide_legend(title=NULL), size=guide_legend(title=NULL))
# p
# ggsave(p, file=paste0("./results/taxpch_vs_incpch_65vs45to64_scatter.png"), width=10, height=6.25, units="in")
#.. DON'T NEED graph of tax ratios ----
# get average tax by pop group
abrks <- c(-100, 0, 18, 25, 35, 45, 65, 1e9)
taxratio <- perdf %>% filter(age>=18, !stabbr %in% globals$nonpit_states) %>%
mutate(agegrp=cut(age, breaks=abrks)) %>%
group_by(stabbr, agegrp) %>%
summarise(rtax=weighted.mean(rtax, marsupwt, na.rm=TRUE)) %>%
spread(agegrp, rtax) %>%
mutate(diff=`(65,1e+09]` - `(45,65]`,
pch=diff / `(45,65]`) %>%
arrange(pch)
ustr <- taxratio %>%
ungroup %>%
summarise(diff=median(diff), pch=median(pch))
p <- taxratio %>%
filter(!stabbr %in% globals$nonpit_states) %>%
mutate(stype=ifelse(stabbr %in% globals$case_study_states, "Case study\nstates", "Other states"),
lsize=ifelse(stype=="Other states", 3, 4)) %>%
ggplot(aes(x=pch, y=diff, label=stabbr)) +
theme_bw() +
# geom_text(colour="blue", size=3, position=position_jitter(width=.004, height=.004)) +
# geom_text(aes(colour=stype, size=stype), fontface = "bold", position=position_nudge(x=.002, y=.002)) +
geom_text_repel(aes(colour=stype, size=stype), fontface = "bold", point.padding = NA) +
#geom_text(aes(colour=stype, size=stype), fontface = "bold") +
scale_x_continuous(name="Percent change", labels = scales::percent,
breaks=seq(-1, 1, .05)) +
scale_y_continuous(name="Dollar difference, 65+ minus 45-64 age group",
labels = scales::dollar,
breaks=seq(-3e3, 1e3, 100)) +
scale_colour_manual(values=c("darkred", "blue", "#636363")) +
scale_size_manual(values=c(3.5, 3.5, 3)) +
geom_hline(yintercept=ustr$diff) +
geom_vline(xintercept=ustr$pch) +
# geom_hline(yintercept=0, linetype="dashed", colour="darkblue") +
ggtitle(label="Change in tax per-capita for 65+ age group relative to 45-64 age group",
subtitle="Horizontal and vertical lines show United States medians") +
labs(caption="\nSource: Annual Social and Economic (ASEC) supplement to the Current Population Survey, pooled 2017 and 2018") +
theme(plot.title = element_text(size=rel(1.5), face="bold")) +
theme(plot.subtitle = element_text(size=rel(1), face="bold")) +
theme(axis.title = element_text(face="bold", size=rel(1))) +
theme(axis.text = element_text(face="bold", size=rel(1))) +
theme(plot.caption = element_text(hjust=0, size=8)) +
guides(colour=guide_legend(title=NULL), size=guide_legend(title=NULL))
p
ggsave(p, file=paste0("./results/age65ptax_vs45to64_scatter.png"), width=10, height=6.25, units="in")
#****************************************************************************************************
# Now do Watkins-like calculation of potential revenue loss ####
#****************************************************************************************************
wpop <- readRDS("./data/popproj/wc_popproj.rds") # Weldon Cooper pop projections
glimpse(wpop)
count(wpop, popgroup) # get age cuts needed
popgroup <- count(wpop, popgroup) %>%
filter(popgroup!="poptot")
unique(wpop$popgroup)
# make a data frame to link popgroups on
agebrks <- c(-1, seq(4, 84, 5), 1e9)
agegroups <- tibble(age=1:100, agebrk=cut(age, agebrks)) %>%
group_by(agebrk) %>%
summarise(n=n()) %>%
mutate(popgroup=popgroup$popgroup) %>%
select(-n)
agegroups # good, we can use this for linking
# get the tax data from CPS AND CUT THE WEIGHT IN HALF TO REFLECT 2 years !!!! ----
perdf <- readRDS("./data/perdf.rds") %>%
mutate(avgweight=marsupwt / 2)
glimpse(perdf)
# perdf has one rec per person in 2017 and one per 2018; rtax is real state tax (2017 dollars)
# it has ~180k obs per year; marsupwt is the weight
# prepare cps data for merging against pop projections data
# total population and mean rtax by state and age group, plus total pop [2017, 2018 pooled]
# thus, all the pops are 2-year sums; the taxes are average annual tax
spop.age <- perdf %>%
mutate(agebrk=cut(age, agebrks)) %>%
group_by(stabbr, agebrk) %>%
summarise(wtdn=sum(avgweight),
rtax=weighted.mean(rtax, avgweight)) %>%
left_join(agegroups) %>% # this allows us to link to Weldon Cooper projections
group_by(stabbr) %>%
mutate(totpop=sum(wtdn)) %>%
ungroup
spop.age
spop.age %>% filter(stabbr=="NY")
# prepare the Weldon Cooper popshares data - for each state and income group, get share of state pop different years
wpop2 <- wpop %>%
filter(sex=="Total", popgroup!="poptot") %>%
group_by(year, stabbr) %>%
mutate(totproj=sum(value),
popshare=value / totproj,
shareyear=paste0("share", year)) %>%
ungroup %>%
select(stabbr, popgroup, shareyear, popshare) %>%
spread(shareyear, popshare)
wpop2 %>% group_by(stabbr) %>% summarise_at(vars(starts_with("share")), sum) # make sure shares add to 1
# put the shares on the tax data
ptaxshares <- spop.age %>%
group_by(stabbr) %>%
mutate(sharecps=wtdn / sum(wtdn)) %>%
ungroup %>%
left_join(wpop2, by=c("stabbr", "popgroup")) %>%
mutate(taxcps=rtax * wtdn,
tax2010=rtax * totpop * share2010,
tax2020=rtax * totpop * share2020,
tax2030=rtax * totpop * share2030,
tax2040=rtax * totpop * share2040)
#.. quick check compare implied pit in the cps data vs cps per census years don't match quite correctly of course ----
sgcomp <- ptaxshares %>%
group_by(stabbr) %>%
summarise(taxcps=sum(taxcps) / 1e6) %>%
left_join(slgfin %>% filter(level==2, aggvar=="iit", year==2016) %>% select(stabbr, taxcen=value) %>% mutate(taxcen=taxcen / 1000)) %>%
mutate(pdiff=taxcps / taxcen * 100 - 100,
level="sg")
slgcomp <- ptaxshares %>%
group_by(stabbr) %>%
summarise(taxcps=sum(taxcps) / 1e6) %>%
left_join(slgfin %>% filter(level==1, aggvar=="iit", year==2016) %>% select(stabbr, taxcen=value) %>% mutate(taxcen=taxcen / 1000)) %>%
mutate(pdiff=taxcps / taxcen * 100 - 100,
level="slg")
taxcomp <- bind_rows(sgcomp, slgcomp)
taxcomp %>% arrange(-abs(pdiff))
taxcomp %>% filter(stabbr=="NY")
taxcomp %>% filter(stabbr %in% globals$case_study_states) %>% arrange(stabbr, level)
#.. end check ----
ptaxshares %>%
filter(stabbr=="NY") %>%
select(-agebrk, -totpop) %>%
select(stabbr, popgroup, everything()) %>%
mutate_at(vars(starts_with("tax")), ~ . / 1e6) %>%
janitor::adorn_totals() %>%
mutate(pch=tax2040 / tax2020 * 100 - 100) %>%
kable(digits=c(rep(0, 4), rep(3, 5), rep(0, 5), 1), format.args=list(big.mark=","))
write_csv(ptaxshares, "./results/ptaxshares.csv") # so I can calc by hand
#.. compute total state income tax after credits in $ millions at different pop shares and change ----
totchange <- ptaxshares %>%
group_by(stabbr) %>%
summarise_at(vars(tax2010, tax2020, tax2030, tax2040), ~(sum(.) / 1e6)) %>%
mutate(diff.tot=tax2040 - tax2020,
pch.tot=tax2040 / tax2020 * 100 - 100)
# alternative approach - per capita tax, and change - should yield the same pch
percapchange <- ptaxshares %>%
group_by(stabbr) %>%
summarise(rtax2010=weighted.mean(rtax, share2010, na.rm=TRUE),
rtax2020=weighted.mean(rtax, share2020, na.rm=TRUE),
rtax2030=weighted.mean(rtax, share2030, na.rm=TRUE),
rtax2040=weighted.mean(rtax, share2040, na.rm=TRUE)) %>%
mutate(diff.pc=rtax2040 - rtax2020,
pch.pc=rtax2040 / rtax2020 * 100 - 100)
taxdiff <- totchange %>%
left_join(percapchange) %>%
arrange(diff.pc, pch.pc)
taxdiff
write_csv(taxdiff, "./results/pitchanges.csv")
saveRDS(taxdiff, "./results/pitchanges.rds")
taxdiff <- readRDS("./results/pitchanges.rds")
pdata <- taxdiff %>% filter(!stabbr %in% c("DC", "US", globals$nonpit_states)) %>% mutate(pch.pc=pch.pc / 100)
usvals <- pdata %>% summarise_at(vars(pch.pc, diff.pc), ~median(., na.rm=TRUE))
l1 <- "Source: Author's analysis of data from Current Population Survey and from University of Virginia, Weldon Cooper Center for Public Service"
l2 <- "Notes: (1) States without broad-based income taxes excluded. (2) Analysis is similar to Felix & Watkins 2013, with updated information."
srcnote <- paste0("\n", l1, "\n", l2)
p <- pdata %>%
mutate(stype=ifelse(stabbr %in% globals$case_study_states, "Case study\nstates", "Other states"),
lsize=ifelse(stype=="Other", 3, 4)) %>%
ggplot(aes(x=pch.pc, y=diff.pc, label=stabbr)) +
theme_bw() +
# geom_text_repel(aes(colour=stype, size=stype), fontface = "bold") +
geom_text(aes(colour=stype, size=stype), fontface = "bold") +
scale_x_continuous(name="Percent change in per-capita income tax",
labels = scales::percent,
breaks=seq(-.5, .5, .01)) +
scale_y_continuous(name="Dollar change in per-capita income tax",
labels = scales::dollar,
breaks=seq(-100, 100, 20)) +
scale_colour_manual(values=c("darkred", "blue", "#636363")) +
scale_size_manual(values=c(3.5, 3.5, 3)) +
geom_hline(yintercept=usvals$diff.pc) +
geom_vline(xintercept=usvals$pch.pc) +
geom_hline(yintercept=0, linetype="dashed", colour="darkblue") +
ggtitle(label="Impact on per-capita income tax of moving from projected 2020 to 2040 age distribution (2017 dollars)",
subtitle="Horizontal and vertical lines show medians for income-tax states") +
labs(caption=srcnote) +
theme(plot.title = element_text(size=rel(1.3), face="bold")) +
theme(plot.subtitle = element_text(size=rel(1), face="bold")) +
theme(axis.title = element_text(face="bold", size=rel(1))) +
theme(axis.text = element_text(face="bold", size=rel(1))) +
theme(plot.caption = element_text(hjust=0, size=8)) +
guides(colour=guide_legend(title=NULL), size=guide_legend(title=NULL))
p
ggsave(p, file=paste0("./results/pit_impact_scatter.png"), width=10, height=6.25, units="in")
taxdiff %>% select(stabbr, diff.pc, pch.pc) %>%
arrange(pch.pc)
ptaxshares %>% select(stabbr, popgroup, starts_with("share")) %>%
filter(popgroup %in% pop65p) %>%
group_by(stabbr) %>%
summarise_at(vars(starts_with("share")), funs(sum)) %>%
mutate(diff=share2040 - share2020)
#.. get pit change as % of own-source revenue ----
totchange %>% arrange(desc(abs(pch.tot)))
# count(slgfin, aggvar)
osrpct <- slgfin %>%
filter(year==2016, level==1, aggvar %in% c("iit", "osr")) %>%
mutate(value=value / 1e3) %>% # put in $ millions
spread(aggvar, value) %>%
select(stabbr, iit, osr) %>%
left_join(totchange %>% select(stabbr, pch.iit=pch.tot)) %>%
mutate(iitchange=iit * pch.iit / 100,
osrpct=iitchange / osr * 100)
osrpct %>% arrange(desc(abs(osrpct)))
#****************************************************************************************************
# Consumer expenditure survey ####
#****************************************************************************************************
# prepare the popshares data
popproj <- readRDS("./data/popproj.rds")
glimpse(popproj)
count(popproj, popgroup) # get age cuts needed
popgroup <- count(popproj, popgroup) %>%
filter(popgroup!="pop")
# prepare the popshares data
pop2 <- popproj %>%
filter(sex=="Total", popgroup!="pop") %>%
group_by(year, stabbr) %>%
mutate(totproj=sum(value),
popshare=value / totproj,
shareyear=paste0("share", year)) %>%
ungroup %>%
select(stabbr, popgroup, shareyear, popshare) %>%
spread(shareyear, popshare)
# get shares for each state, group, and year (cols)
pop2 <- popproj %>%
filter(sex=="Total", popgroup!="pop") %>%
group_by(year, stabbr) %>%
mutate(totproj=sum(value), popshare=value / totproj,
shareyear=paste0("share", year)) %>%
ungroup %>%
select(stabbr, popgroup, shareyear, popshare) %>%
spread(shareyear, popshare)
# check
pop2 %>% group_by(stabbr) %>% summarise_at(vars(starts_with("share")), sum) # make sure they sum to 1
# calculate taxable consumption as % of income at each age group
age <- c(21.4, 29.6, 39.5, 49.6, 59.3, 68.9, 81.6) # avg age of reference person per CEX
txpct <- c(51.7, 41.1, 38.4, 34.7, 38.4, 46.5, 48.9) # txbl consumption as % of income per my analysis of CEX
agepct <- tibble(age, txpct)
# get coefs that allow us to estimate txpct at any age
mod <- agepct %>% mutate(age2=age*age, age3=age2*age) %>%
lm(txpct ~ age + age2 + age3, data=.)
summary(mod)
# estimate txpct of income at any age
agespend <- tibble(age=18:85,
txpct=coef(mod)["(Intercept)"] +
coef(mod)["age"] * age +
coef(mod)["age2"] * age^2 +
coef(mod)["age3"] * age^3) %>%
mutate(type="est") %>%
bind_rows(agepct %>% mutate(type="data"))
# check that it looks plausible
agespend %>% ggplot(aes(age, txpct, colour=type)) +
geom_line()
# make a data frame to link popgroups on proj file with pop on CPS
agebrks <- c(-1, seq(4, 84, 5), 1e9)
agegroups <- tibble(age=1:100, agebrk=cut(age, agebrks)) %>%
group_by(agebrk) %>%
summarise(n=n()) %>%
mutate(popgroup=popgroup$popgroup) %>%
select(-n)
agegroups # good, we can use this for linking
# calc txbl consumption for each person in pooled CPS
perdf2 <- readRDS("./data/perdf2.rds")
glimpse(perdf2)
txc <- perdf2 %>%
left_join(agespend %>% filter(type=="est") %>%
select(-type)) %>%
mutate(txblsales=rincome * txpct / 100,
agebrk=cut(age, agebrks)) %>%
left_join(agegroups) %>%
group_by(stabbr, popgroup) %>%
summarise(wtdn=sum(marsupwt),
rincome=weighted.mean(rincome, marsupwt, na.rm=TRUE),
txblsales=weighted.mean(txblsales, marsupwt, na.rm=TRUE)) %>%
ungroup
txc %>% filter(stabbr=="NY") %>%
select(popgroup, rincome, txblsales) %>%
gather(variable, value, -popgroup) %>%
# must use group=variable because popgroup is a factor!
ggplot(aes(popgroup, value, colour=variable, group=variable)) +
geom_point() +
geom_line() +
theme(axis.text.x=element_text(angle=45,hjust=.9,vjust=0.9))
# now get state averages under different shares
txcshares <- txc %>% left_join(pop2) %>%
mutate(totpop=sum(wtdn)) %>%
mutate(txc2010=txblsales * totpop * share2010,
txc2020=txblsales * totpop * share2020,
txc2030=txblsales * totpop * share2030,
txc2040=txblsales * totpop * share2040)
write_csv(txcshares, "./results/txcshares.csv") # so I can calc by hand
# compute total tax, and change
txcchange <- txcshares %>% group_by(stabbr) %>%
summarise_at(vars(txc2010, txc2020, txc2030, txc2040), funs(sum(., na.rm=TRUE) / 1e6)) %>%
mutate(diff.tot=txc2030 - txc2010,
pch.tot=txc2030 / txc2010 * 100 - 100)
txcchange
# put the shares on the tax data
ptaxshares <- p4 %>% left_join(pop2) %>%
mutate(tax2010=rtax * totpop * share2010,
tax2020=rtax * totpop * share2020,
tax2030=rtax * totpop * share2030,
tax2040=rtax * totpop * share2040)
write_csv(ptaxshares, "./results/ptaxshares.csv") # so I can calc by hand
# compute total tax, and change
totchange <- ptaxshares %>% group_by(stabbr) %>%
summarise_at(vars(tax2010, tax2020, tax2030, tax2040), funs(sum(.) / 1e6)) %>%
mutate(diff.tot=tax2030 - tax2010,
pch.tot=tax2030 / tax2010 * 100 - 100)
#****************************************************************************************************
# Consumer expenditure survey - PLAN B ####
#****************************************************************************************************
popproj <- readRDS("./data/popproj.rds")
# CEX 2015 age groupings
txc.age <- read_csv("agegrp.cex, ageref.cex, incpretax, expend, consxfin, txblexp
00to24, 21.4, 31606, 32797, 29836, 16330
25to34, 29.6, 64472, 52062, 45182, 26480
35to44, 39.5, 84938, 65334, 55954, 32641
45to54, 49.6, 95248, 69753, 57578, 33047
55to64, 59.3, 75262, 58781, 49051, 28889
65to74, 68.9, 54067, 49477, 43518, 25167
75+, 81.6, 36408, 38123, 34392, 17816")
txc.age
txc.age <- txc.age %>% mutate(agelabs=agegrp.cex %>% str_replace_all("to", " to "),
agelabs=agelabs %>% str_replace("00 to", "Up to"))
pdata <- txc.age %>% select(-consxfin) %>%
gather(variable, value, -agegrp.cex, -ageref.cex, -agelabs) %>%
mutate(varf=factor(variable,
levels=c("incpretax", "expend", "txblexp"),
labels=c("Income before tax", "Total expenditures", "Commonly taxable\nexpenditures")))
p <- pdata %>%
ggplot(aes(ageref.cex, value, colour=varf)) +
geom_line(size=rel(1.3)) +
geom_point(size=rel(1.2)) +
scale_y_continuous(name=NULL, breaks=seq(0, 100e3, 10e3), limits=c(0, NA), labels=scales::dollar) +
scale_x_continuous(name="Age of householder", breaks=txc.age$ageref.cex, labels=txc.age$agelabs) +
theme_bw() +
scale_colour_manual(values=c("red", "blue", "darkgreen")) +
theme(plot.title = element_text(size=rel(2), face="bold")) +
theme(axis.title = element_text(face="bold", size=rel(1))) +
theme(axis.text = element_text(face="bold", size=rel(1)))
p1 <- p +
ggtitle("Income and expenditures by householder age") +
guides(color = guide_legend(title=NULL, nrow = 3)) +
labs(caption="\nSource: U.S. Bureau of the Census, Consumer Expenditure Survey 2015, Table 1300 plus author's calculations") +
theme(plot.caption = element_text(hjust=0, size=rel(.8))) +
theme(legend.direction = 'horizontal',
legend.position = 'right',
legend.key = element_rect(size = 5),
legend.key.size = unit(1.5, 'lines')) # put space between legend lines of text
p1
ggsave(p1, file=paste0("./results/expend_hhage.png"), width=10, height=6.25, units="in")
p2 <- p +
ggtitle("Household income and expenditures\nby householder age") +
guides(color = guide_legend(title=NULL, nrow = 1)) +
labs(caption="\nSource: U.S. Bureau of the Census, Consumer Expenditure Survey 2015\nTable 1300 plus author's calculations") +
theme(plot.caption = element_text(hjust=0, size=rel(.8))) +
theme(legend.direction = 'horizontal',
legend.position = 'bottom',
legend.key = element_rect(size = 5),
legend.key.size = unit(1, 'lines')) # put space between legend lines of text
p2
ggsave(p2, file=paste0("./results/expend_hhage_alt.png"), width=7, height=8, units="in")
popgroup
# CEX 2create a linking file ####
pop2 <- popproj %>%
filter(sex=="Total", popgroup!="pop") %>%
group_by(year, stabbr) %>%
mutate(totproj=sum(value), popshare=value / totproj,
shareyear=paste0("share", year)) %>%
ungroup %>%
select(stabbr, popgroup, shareyear, popshare) %>%
spread(shareyear, popshare)
popgroup <- count(popproj, popgroup) %>%
filter(popgroup!="pop")
popgroup.cex <- popgroup %>% mutate(c2=str_sub(popgroup, 1, 2),
agegrp.cex=ifelse(c2 < 25, "00to24", NA),
agegrp.cex=ifelse(c2 %in% 25:34, "25to34", agegrp.cex),
agegrp.cex=ifelse(c2 %in% 35:44, "35to44", agegrp.cex),
agegrp.cex=ifelse(c2 %in% 45:54, "45to54", agegrp.cex),
agegrp.cex=ifelse(c2 %in% 55:64, "55to64", agegrp.cex),
agegrp.cex=ifelse(c2 %in% 65:74, "65to74", agegrp.cex),
agegrp.cex=ifelse(c2 >= 75, "75+", agegrp.cex)) %>%
select(-c2)
popgroup.cex
# get total population of each state in base year
perdf2 <- readRDS("./data/perdf2.rds")
totpop2015 <- perdf2 %>% filter(year==2015) %>%
group_by(stabbr) %>%
summarise(pop=sum(marsupwt, na.rm=TRUE))
# collapse pop proj shares by CEX age groups
cex_change <- pop2 %>%
filter(stabbr!="US") %>%
left_join(popgroup.cex) %>%
group_by(stabbr, agegrp.cex) %>%
summarise_at(vars(starts_with("share")), funs(sum)) %>%
left_join(totpop2015) %>%
left_join(txc.age) %>%
mutate(txc2010=share2010 * pop * txblexp / 1e6,
txc2030=share2030 * pop * txblexp / 1e6) %>%
group_by(stabbr) %>%
summarise_at(vars(txc2010, txc2030), funs(sum)) %>%
mutate(diff=txc2030 - txc2010,
pch=diff / txc2010 * 100) %>%
left_join(totpop2015) %>%
mutate_at(vars(txc2010, txc2030, diff), funs(pc=. * 1e6 / pop))
cex_change
precis(cex_change)
precis(cex_change %>% filter(stabbr != "DC"))
cex_change %>% arrange(pch)
write_csv(cex_change, "./results/cex_change.csv")
quantile(cex_change$pch, probs=c(0, .1, .25, .5, .75, .9, 1))
xgst <- c("DC")
p <- cex_change %>% filter(!stabbr %in% xgst) %>%
arrange(desc(pch)) %>%
mutate(stname=factor(stabbr, levels=stcodes$stabbr, labels=stcodes$stname),
stname2=factor(stname, levels=stname, labels=stname, ordered=TRUE),
pchround=round(pch, 1)) %>%
ggplot(aes(x=stname2, y=pch)) +
geom_bar(stat="identity", fill="blue") +
scale_y_continuous(name=NULL, breaks=seq(-3, 3, .2)) +
scale_x_discrete(name=NULL) +
geom_text(aes(label=pchround), nudge_y=0.1, size=3) +
theme_bw() +
ggtitle(label="Taxable sales") +
coord_flip() +
geom_vline(xintercept=0) +
theme(axis.text.y=element_text(hjust=0.5))
# apparently must adjust the state labels AFTER the flip, as y axis (I think)
p
#****************************************************************************************************
# Median pit changes ####
#****************************************************************************************************
sgtax.a %>% filter(year %in% 2007:2010, vname=="iit", stabbr!="US", value>0) %>%
select(stabbr, year, value) %>%
spread(year, value) %>%
mutate(pch1=`2009` / `2008` * 100 - 100,
pch2=`2010` / `2009` * 100 - 100) %>%
summarise_at(vars(pch1, pch2), funs(median))
#****************************************************************************************************
# Consumer expenditure survey - exp breakdown ####
#****************************************************************************************************
fn <- "D:/Dropbox/Open Projects/NCSL Boston Feb 2017/Presentation/BoydNCSLBoston2017(10).xlsx"
df <- read_excel(fn, sheet="CEX_export", skip=1)
names(df)
df <- df[, which(!is.na(names(df)))] # fix bad data
names(df)
# get df with agerp and agegrp
agevals <- tibble(agegrp=names(df)[-c(1:2)], agerp=df[1, -c(1:2)] %>% t %>% as.vector)
vars <- c("exp_total", "exp_txblcons", "food_home", "food_away", "alcohol", "housing", "housing_shelter",
"apparel", "transport", "healthcare", "entertainment", "perscare", "reading", "education", "tobacco")
ages <- c("agerp_all", "agerp_00to24", "agerp_25to34", "agerp_35to44", "agerp_45to54",
"agerp_55to64", "agerp_65to74", "agerp_75+", "agerp_65+")
df2 <- df %>%
select(-description) %>%
filter(vname %in% vars) %>%
gather(agegrp, value, -vname) %>%
spread(vname, value) %>%
mutate(alctob=alcohol + tobacco,
housing_xshelter=housing - housing_shelter) %>%
gather(vname, value, -agegrp) %>%
left_join(agevals)
voi <- c("exp_total", "exp_txblcons", "food_home", "food_away", "alctob", "housing", "apparel", "transport",
"healthcare", "entertainment", "perscare", "reading", "education")
voilabs <- c("Total", "Taxable", "Food at home", "Food away", "Alcohol & tobacco", "Housing", "Apparel", "Transportation",
"Health care", "Entertainment", "Personal care", "Reading", "Education")
cbind(voi, voilabs)
srcnote <- "\nSource: U.S. Bureau of the Census, Consumer Expenditure Survey 2015 Table 1300, plus author's calculations"
note <- "Inspired by Oregon Office of Economic Analysis, https://oregoneconomicanalysis.com/2014/03/19/aging-revenues/"
capt <- paste0(srcnote, "\n", note)
capt <- srcnote
p <- df2 %>% filter(agegrp %in% c("agerp_55to64", "agerp_65+"),
vname %in% voi) %>%
select(-agerp) %>%
spread(agegrp, value) %>%
mutate(pch=`agerp_65+` / agerp_55to64 - 1,
vlab=factor(vname, levels=voi, labels=voilabs)) %>%
arrange(pch) %>%
mutate(vlab2=factor(vlab, levels=vlab, labels=vlab, ordered=TRUE)) %>%
ggplot(aes(vlab2, pch)) +
geom_bar(stat="identity", fill="blue") +
scale_y_continuous(name=NULL,
breaks=seq(-2, 2, .1),
labels = scales::percent) +
scale_x_discrete(name=NULL) +
geom_hline(aes(yintercept = pch[vname=="exp_total"]), linetype="dashed", size=1) +
geom_hline(yintercept = 0, linetype="solid", size=1) +
theme_bw() +
ggtitle(label="Expenditures per household, 65+ households relative to 55-64 year-old households") +
labs(caption=capt) +
coord_flip() +
theme(axis.text.y=element_text(hjust=1)) +
theme(plot.title = element_text(size=rel(1.3), face="bold")) +
theme(axis.title = element_text(face="bold", size=rel(1))) +
theme(axis.text = element_text(face="bold", size=rel(1))) +
theme(plot.caption = element_text(hjust=0, size=11))
p
ggsave(p, file=paste0("./results/CEX_by_item_hbar.png"), width=10, height=6.25, units="in")
|
/r/PIT_analysis(2).r
|
no_license
|
donboyd5/PewAging
|
R
| false
| false
| 54,251
|
r
|
# 3/4/2019
#****************************************************************************************************
# Includes ####
#****************************************************************************************************
source("./r/includes/libraries.r")
source("./r/includes/globals.r")
#****************************************************************************************************
# Libraries ####
#****************************************************************************************************
library("BEAData")
library("fredr")
library("purrr")
library("ggrepel")
library("grid")
library("gridExtra")
library("broom")
#****************************************************************************************************
# Globals ####
#****************************************************************************************************
pop65p <- c("pop65_69", "pop70_74", "pop75_79", "pop80_84", "pop85plus")
#****************************************************************************************************
# Functions ####
#****************************************************************************************************
gband <- function(xmin, xmax, fill="grey", alpha=.5, ymin=-Inf, ymax=Inf) {
annotate("rect",
fill = fill,
alpha = alpha, # larger alpha is darker rectangle
xmin = xmin, xmax = xmax,
ymin = ymin, ymax = ymax)
}
#****************************************************************************************************
# Population projections data ####
#****************************************************************************************************
wpop <- readRDS("./data/popproj/wc_popproj.rds") # Weldon Cooper pop projections
ht(wpop)
count(wpop, stabbr) # 52 includes DC, US, but not PR
count(wpop, sex) # "Total"
count(wpop, year) # 2010, 2020, 2030, 2040
count(wpop, popgroup) # pop00_04, pop05_09, ..., pop80_84, pop85plus, poptot
#****************************************************************************************************
# Labor force data ####
#****************************************************************************************************
# install.packages("fredr")
# library("apitools")
#.. labor force participation by age group ----
# BLS Civilian labor force participation rates
# https://fred.stlouisfed.org/series/LNS11300060 25-54
# https://fred.stlouisfed.org/series/LNS11324230 55+
fredr_set_key(globals$fred_apikey)
rateids <- c("LNS11300060", "LNS11324230")
lfpr <- map_dfr(rateids, fredr, frequency="a") %>%
mutate(year=year(date),
vname=factor(series_id, levels=rateids, labels=c("lfpr2554", "lfpr55p"))) %>%
select(year, series_id, vname, value)
ht(lfpr)
p <- lfpr %>%
ggplot(aes(year, value, colour=vname)) +
geom_line() +
geom_point() +
scale_x_continuous(breaks=seq(1940, 2020, 5)) +
theme_bw()
capt1 <- "U.S. Bureau of Labor Statistics, Civilian Labor Force Participation Rates, retrieved from FRED, Federal Reserve Bank of St. Louis, March 5, 2019."
capt2 <- "25 to 54 years (https://fred.stlouisfed.org/series/LNS11300060)"
capt3 <- "55 years and over (https://fred.stlouisfed.org/series/LNS11324230)"
capt <- paste0("Source: ", capt1, "\n", capt2, "\n", capt3)
p <- lfpr %>%
mutate(vname=factor(vname, levels=c("lfpr2554", "lfpr55p"), labels=c("25-54", "55+"))) %>%
ggplot(aes(year, value, colour=vname)) +
geom_line(size=1.1) +
geom_point() +
scale_x_continuous(name=NULL, breaks=seq(1920, 2050, 5)) +
scale_y_continuous(name="Participation rate (%)") +
ggtitle("U.S. Civilian labor force participation rates") +
labs(caption=capt) +
labs(colour="Age group") +
theme_bw() +
theme(plot.caption = element_text(hjust=0, size=10))
p
ggsave(plot=p, filename="./results/uslfpr.png", width=8, height=6, units="in")
# p + gband(1980, 2005, alpha=.3)
#.. graph of U.S. pop growth ----
glimpse(spi.a_all)
glimpse(nipa)
uspop <- nipa %>%
filter(freq=="A", vname=="B230RC")
ht(uspop)
usgr <- uspop %>%
select(year, pop=value) %>%
mutate(lpop=lag(pop), lyear=lag(year), gr=(pop / lpop)^(1 / (year - lyear))*100 - 100)
ht(usgr)
# get projected US growth rates
ht(wpop)
pgr <- wpop %>%
filter(sex=="Total", popgroup=="poptot", stabbr=="US") %>%
mutate(pop=value / 1000, lpop=lag(pop), lyear=lag(year),
gr=(pop / lpop)^(1 / (year - lyear))*100 - 100)
pgr2 <- tibble(year=min(pgr$year):max(pgr$year)) %>%
left_join(pgr %>% select(year, gr))
ht(pgr2)
allgr <- usgr %>%
select(year, gr) %>%
bind_rows(pgr2 %>% filter(year > max(usgr$year)) %>% select(year, gr)) %>%
mutate(gr=na.approx(gr,na.rm =FALSE)) # fill in missing growth rates by interpolation
ht(allgr)
allgr %>% filter(year %in% 2010:2020)
p <- allgr %>%
mutate(proj=ifelse(year>=2019, "proj", "hist")) %>%
ggplot(aes(year, gr, linetype=proj)) +
geom_line(size=1.1, colour="blue") +
# scale_colour_manual(values=c("blue", "green")) +
scale_x_continuous(name=NULL, breaks=seq(1920, 2050, 10)) +
scale_y_continuous(name="Annualized growth rate", limits=c(0, NA)) +
geom_hline(yintercept = 1, linetype="dashed") +
geom_hline(yintercept = 0.5, linetype="dashed") +
theme_bw()
p + gband(1946, 1964, alpha=.4)
capt1 <- "2018 and earlier: U.S. Bureau of the Census, obtained from Bureau of Economic analysis variable B230RC"
capt2 <- "2019+: Author's interpolation of projections from the University of Virginia, Weldon Cooper Center for Public Service"
capt <- paste0(capt1, "\n", capt2)
p <- allgr %>%
filter(!year %in% c(2021:2029, 2031:2039)) %>%
ggplot(aes(year, gr)) +
geom_line(size=1.1, colour="blue", data=. %>% filter(year <= 2020)) +
geom_curve(aes(x = 2020, y = gr[year==2020],
xend = 2030, yend = gr[year==2030]),
data = allgr, size=1.1, linetype="dotted", colour="blue", curvature=0) +
geom_curve(aes(x = 2030, y = gr[year==2030],
xend = 2040, yend = gr[year==2040]),
data = allgr, size=1.1, linetype="dotted", colour="blue", curvature=0) +
scale_x_continuous(name=NULL, breaks=seq(1920, 2050, 10)) +
scale_y_continuous(name="Percentage change from prior year", limits=c(0, NA)) +
geom_hline(yintercept = 1, linetype="dashed") +
geom_hline(yintercept = 0.5, linetype="dashed") +
ggtitle("U.S. Population growth rate") +
labs(caption=capt) +
theme_bw() +
theme(plot.caption = element_text(hjust=0, size=10))
p2 <- p + gband(1946, 1964, alpha=.4) +
annotate("text", x=1949, y=0.7,
hjust=0,
vjust=0,
label="Baby boom\nbirth years")
# 1946 โ 1964 baby boom birth years
p2
ggsave(plot=p2, filename="./results/uspopgr.png", width=8, height=6, units="in")
#****************************************************************************************************
# SOI ####
#****************************************************************************************************
# D:\Data\bdata_package_sourcedata\soi
data(package="bdata")
glimpse(soiall)
count(soiall, vname)
count(soiall, year)
# retirement income: iradist, txblpension, txblsocsec
count(soiall, incgrp)
# quick check on growth rates
vars <- c("agi", "wages", "iradist", "txblpension", "txblsocsec", "txblinterest", "netcgll", "busprofinc", "odividends")
soi <- soiall %>%
filter(stabbr=="US", incgrp=="all", vname %in% vars) %>%
select(year, stabbr, vname, value) %>%
spread(vname, value) %>%
mutate(retinc=iradist + txblpension + txblsocsec) %>%
gather(vname, value, -year, -stabbr) %>%
group_by(vname) %>%
arrange(year) %>%
mutate(pch=value / lag(value) * 100 - 100)
soi
# Create a table of US retirement income
soi %>% filter(stabbr=="US", year==2016) %>%
write_csv("./results/agius2016.csv")
df %>% select(-stabbr, -value) %>%
spread(vname, pch)
df %>% filter(vname %in% c("wages", "retinc")) %>%
ggplot(aes(year, pch, colour=vname)) +
geom_line() +
geom_point()
df %>% filter(vname %in% c("wages", "retinc")) %>%
select(-pch) %>%
spread(vname, value) %>%
mutate(share=retinc / (retinc + wages) * 100) %>%
ggplot(aes(year, share)) +
geom_line()
df <- soiall %>% filter(incgrp=="all", vname %in% vars) %>%
select(year, stabbr, vname, value) %>%
spread(vname, value) %>%
mutate(retinc=irapay + txblpension + txblsocsec,
share=retinc / (retinc + wages) * 100)
df %>% filter(year %in% c(2004, 2014)) %>%
select(stabbr, year, share) %>%
spread(year, share) %>%
mutate(change=`2014` - `2004`) %>%
arrange(-change)
df %>% filter(stabbr %in% c("US", "DE", "MI", "OK", "ND")) %>%
ggplot(aes(year, share, colour=stabbr)) +
geom_line()
#.. State tables on retirement income ---
#****************************************************************************************************
# ONETIME: CPS - create perdf - person data frame ####
#****************************************************************************************************
# March supplement as obtained from Census
# General info https://www.census.gov/programs-surveys/saipe/guidance/model-input-data/cpsasec.html
# ftp download
# State tax variables discussed at http://answers.popdata.org/Negative-values-fedtax-statetax-q1469058.aspx
cpsdir <- "D:/Data/CensusCPS/splitfiles/"
stabfips <- function(fips){
stabbr <- stcodes$stabbr[match(fips, as.character(stcodes$stfips))] %>%
as.character
return(stabbr)
}
# households - good for 2015, 2016, 2017, 2018
hvars <- read_csv("vname, start, end, type
hseq, 2, 6, i
gestfips, 42, 43, c")
hvars
gethh <- function(year, hvars) {
hhfn <- paste0(cpsdir, "asec", year, "_hh.dat")
hhdf <- read_fwf(hhfn, fwf_positions(hvars$start, hvars$end, col_names=hvars$vname),
col_types = paste0(hvars$type, collapse = ""),
n_max=-1)
return(hhdf)
}
# hh2015 <- gethh(2015, hvars) %>%
# mutate(year=2015,
# stabbr=stabfips(gestfips))
# glimpse(hh2015)
# count(hh2015, gestfips, stabbr)
#
# hh2016 <- gethh(2016, hvars) %>%
# mutate(year=2016,
# stabbr=stabfips(gestfips))
# glimpse(hh2016)
# count(hh2016, gestfips, stabbr)
hh2017 <- gethh(2017, hvars) %>%
mutate(year=2017,
stabbr=stabfips(gestfips))
glimpse(hh2017)
count(hh2017, gestfips, stabbr)
hh2018 <- gethh(2018, hvars) %>%
mutate(year=2018,
stabbr=stabfips(gestfips))
glimpse(hh2018)
count(hh2018, gestfips, stabbr)
# persons - also good for 2015, 2016, 2017
# ptotval Total persons income
# D FILESTAT 1 733 (1:6)
# Tax Filer status
# V 1 .Joint, both <65
# V 2 .Joint, one <65 & one 65+
# V 3 .Joint, both 65+
# V 4 .Head of household
# V 5 .Single
# V 6 .Nonfiler
# D STATETAX_AC 6 790 (-9999:999999)
# State income tax liability, after all
# credits
# V 0 .None
# V -9999 - .Dollar amount
# V 999999 .
pvars <- read_csv("vname, start, end, type
hseq, 2, 6, i
age, 19, 20, i
sex, 24, 24, i
marsupwt, 155, 162, d
ptotval, 580, 587, d
filestat, 733, 733, i
statetax_ac, 790, 795, d
")
pvars
getp <- function(year, pvars){
perfn <- paste0(cpsdir, "asec", year, "_person.dat")
perdf <- read_fwf(perfn, fwf_positions(pvars$start, pvars$end, col_names=pvars$vname),
col_types = paste0(pvars$type, collapse = ""),
n_max=-1)
return(perdf)
}
# p2015 <- getp(2015, pvars) %>%
# mutate(year=2015,
# marsupwt=marsupwt / 100,
# stabbr=hh2015$stabbr[match(hseq, hh2015$hseq)])
# glimpse(p2015)
#
# p2016 <- getp(2016, pvars) %>%
# mutate(year=2016,
# marsupwt=marsupwt / 100,
# stabbr=hh2016$stabbr[match(hseq, hh2016$hseq)])
# glimpse(p2016)
p2017 <- getp(2017, pvars) %>%
mutate(year=2017,
marsupwt=marsupwt / 100,
stabbr=hh2017$stabbr[match(hseq, hh2017$hseq)])
glimpse(p2017)
p2018 <- getp(2018, pvars) %>%
mutate(year=2018,
marsupwt=marsupwt / 100,
stabbr=hh2018$stabbr[match(hseq, hh2018$hseq)])
glimpse(p2018)
# create a combined file
# put real tax in 2015 dollars -- that is, the 2016 file has 2015 $ (tax in the prior year)
# so adjust the 2015-file amount forward from 2014$ to 2015$
# the CPI-U was up only 0.1% between 2014 and 2015!
# it was up 2.1% from 2016 to 2017
perdf <- bind_rows(p2017, p2018) %>%
mutate(rincome=ifelse(year==2017, ptotval * 1.021, ptotval),
rtax=ifelse(year==2017, statetax_ac * 1.021, statetax_ac))
glimpse(perdf)
saveRDS(perdf, "./data/perdf.rds")
#****************************************************************************************************
# CPS - get/analyze perdf - person data frame ####
#****************************************************************************************************
#.. graph pop distribution -- density ----
perdf <- readRDS("./data/perdf.rds")
glimpse(perdf)
popsum <- perdf %>%
group_by(stabbr, age) %>%
summarise(pop=sum(marsupwt) / 2)
usvals <- popsum %>%
ungroup %>%
mutate(stabbr="US", weight = pop/sum(pop))
allpop <- popsum %>%
bind_rows(usvals) %>%
ungroup
sts <- c("US", "NH", "TX")
stnames <- c("U.S.", "New Hampshire", "Texas")
p <- allpop %>%
filter(stabbr %in% sts) %>%
mutate(stabbr=factor(stabbr,
levels=sts,
labels=stnames)) %>%
group_by(stabbr) %>%
mutate(weight = pop/sum(pop)) %>%
ggplot(aes(age, weight = weight, colour=stabbr)) +
geom_line(stat="density", size=1) +
# get rid of the box in the legend by using show.legend=FALSE and then stat_density
# geom_density(size=1, show.legend = FALSE) +
# stat_density(aes(x=age, colour=stabbr),
# geom="line",
# position="identity", size=1) +
# geom_hline(yintercept=0, colour="white", size=1) +
scale_x_continuous(name="Age", breaks=seq(0, 100, 5)) +
scale_y_continuous(name="Density", breaks=seq(0, 1, .001)) +
theme_bw() +
labs(colour=NULL) +
ggtitle("Age distribution of population, selected states")
p
ggsave(p, file=paste0("./results/popdist_density.png"), width=10, height=6, units="in")
# stat_density(geom="line", size=1.5)
allpop %>%
filter(stabbr %in% c("US", "ME", "TX", "FL")) %>%
group_by(stabbr) %>%
mutate(weight = pop/sum(pop)) %>%
ggplot(aes(age, y = ..density.., weight = weight, fill=stabbr)) +
geom_histogram(size=1.5, bins=20) +
facet_wrap(~stabbr, ncol=1)
count(perdf, stabbr, year) %>%
spread(year, n) %>%
mutate(diff=`2018` - `2017`, pdiff=diff / `2017` * 100) %>%
arrange(desc(pdiff)) %>%
ht
perdf %>% filter(age>=18, age<65) %>% # even here we have half paying no state tax!
group_by(year) %>%
do(qtiledf(.$rtax))
perdf %>% filter(age>=18, !stabbr %in% globals$nonpit_states) %>% # still half paying no state tax!
group_by(year) %>%
do(qtiledf(.$rtax))
# D FILESTAT 1 733 (1:6)
# Tax Filer status
# V 1 .Joint, both <65
# V 2 .Joint, one <65 & one 65+
# V 3 .Joint, both 65+
# V 4 .Head of household
# V 5 .Single
# V 6 .Nonfiler
perdf %>% filter(age >= 18, age < 65, !stabbr %in% globals$nonpit_states) %>%
group_by(filestat) %>%
do(qtiledf(.$rtax))
ibrks <- c(-1e9, 0, 10e3, 25e3, 50e3, 100e3, 1e9)
perdf %>% filter(age >= 18, !stabbr %in% globals$nonpit_states) %>%
mutate(ygroup=cut(rincome, ibrks)) %>%
group_by(ygroup) %>%
do(qtiledf(.$rtax))
# get weighted mean rtax by age groups
abrks <- c(-100, 0, 18, 25, 35, 45, 65, 1e9)
# counts
perdf %>% filter(age > 16) %>%
mutate(agegrp=cut(age, breaks=abrks)) %>%
group_by(stabbr, agegrp) %>%
summarise(n=n()) %>%
spread(agegrp, n)
tmp <- perdf %>% filter(age > 16) %>%
mutate(agegrp=cut(age, breaks=abrks)) %>%
group_by(stabbr, agegrp) %>%
summarise(n=sum(marsupwt) / 1e6,
rtax=weighted.mean(rtax, marsupwt, na.rm=TRUE))
tmp
tmp2 <- tmp %>% select(-n) %>%
group_by(stabbr) %>%
spread(agegrp, rtax) %>%
mutate(ratio=`(65,1e+09]` / `(45,65]`,
diff=`(65,1e+09]` - `(45,65]`)
tmp2 %>% arrange(ratio)
tmp2 %>% arrange(diff)
# Now do age-tax profiles (regressions)
mod2 <- perdf %>% filter(age >= 18, stabbr=="MS") %>%
mutate(age2=age^2, age3=age^3, rincome2=rincome^2) %>%
lm(rtax ~ age + age2 + age3 + rincome + rincome2, data=.)
summary(mod2)
mod <- perdf %>% filter(age>=18, stabbr=="MS") %>%
mutate(age2=age^2, age3=age^3) %>%
lm(rtax ~ age + age2 + age3, data=.)
summary(mod)
tibble(age=18:75) %>%
mutate(rtax=coef(mod)["age"]*age +
coef(mod)["age2"]*age^2 +
coef(mod)["age3"]*age^3,
rtax=rtax - rtax[age==18]) %>%
ggplot(aes(age, rtax)) +
geom_line() +
scale_x_continuous(breaks=seq(0, 100, 5)) +
scale_y_continuous(breaks=seq(-1000, 2000, 100))
perdf %>% filter(stabbr=="MD", age >= 18) %>%
group_by(age) %>%
summarise(rtax=weighted.mean(rtax, marsupwt, na.rm=TRUE)) %>%
ggplot(aes(age, rtax)) +
geom_line() +
scale_x_continuous(breaks=seq(0, 100, 5)) +
scale_y_continuous(breaks=seq(-1000, 10000, 100))
st <- "MD"
byage <- perdf %>% filter(age>=18, stabbr==st) %>%
group_by(age) %>%
summarise(rtax=weighted.mean(rtax, marsupwt, na.rm=TRUE),
rincome=weighted.mean(rincome, marsupwt, na.rm=TRUE))
byage %>%
ggplot(aes(age, rincome)) +
geom_line() +
scale_x_continuous(breaks=seq(0, 100, 5)) +
scale_y_continuous(breaks=seq(-10e3, 500e3, 10e3)) +
ggtitle(st)
st <- "CA"
mod <- perdf %>% filter(age>=18, stabbr==st) %>%
mutate(age2=age^2, age3=age^3, age4=age^4) %>%
lm(rtax ~ age + age2 + age3 + age4, data=.)
summary(mod)
tibble(age=18:75) %>%
mutate(rtax.est=coef(mod)["(Intercept)"] +
coef(mod)["age"]*age +
coef(mod)["age2"]*age^2 +
coef(mod)["age3"]*age^3 +
coef(mod)["age4"]*age^4) %>%
left_join(byage) %>%
gather(variable, value, -age) %>%
ggplot(aes(age, value, colour=variable)) +
geom_line() +
scale_x_continuous(breaks=seq(0, 100, 5)) +
scale_y_continuous(breaks=seq(-1000, 10000, 100)) +
ggtitle(st)
#****************************************************************************************************
# Construct CPS age-tax profiles ####
#****************************************************************************************************
# Now do age-tax profiles (regressions) for real ####
# perdf
# perdf2 <- perdf %>% filter(!(stabbr=="MD" & year==2016)) # remove bad data
# saveRDS(perdf2, "./data/perdf2.rds")
# perdf2 <- readRDS("./data/perdf2.rds")
#.. stage -- state age analysis build the data ----
stage.st <- perdf %>%
filter(age>=18) %>%
group_by(stabbr, age) %>%
summarise(rtax=weighted.mean(rtax, marsupwt, na.rm=TRUE),
rincome=weighted.mean(rincome, marsupwt, na.rm=TRUE))
stage.us <- perdf %>%
filter(age>=18) %>%
group_by(age) %>%
summarise(rtax=weighted.mean(rtax, marsupwt, na.rm=TRUE),
rincome=weighted.mean(rincome, marsupwt, na.rm=TRUE)) %>%
mutate(stabbr="US")
stage <- bind_rows(stage.st, stage.us)
st <- "CA"
#.. build the model ----
f <- function(df){
mod <- df %>%
filter(age >= 18) %>%
mutate(age2=age^2, age3=age^3, age4=age^4) %>%
lm(rtax ~ age + age2 + age3 + age4, data=.)
c.df <- as_tibble(t(coef(mod)))
vnames <- names(c.df)
vnames[1] <- "intercept"
vnames <- paste0("tx.", vnames)
c.df <- c.df %>% setNames(vnames)
return(c.df)
}
tx.us <- perdf %>% do(f(.)) %>% mutate(stabbr="US")
tx.df <- perdf %>% group_by(stabbr) %>%
do(f(.)) %>%
bind_rows(tx.us)
g <- function(df){
mod <- df %>%
filter(age >= 18) %>%
mutate(age2=age^2, age3=age^3, age4=age^4) %>%
lm(rincome ~ age + age2 + age3 + age4, data=.)
c.df <- as_tibble(t(coef(mod)))
vnames <- names(c.df)
vnames[1] <- "intercept"
vnames <- paste0("inc.", vnames)
c.df <- c.df %>% setNames(vnames)
return(c.df)
}
inc.us <- perdf %>% do(g(.)) %>% mutate(stabbr="US")
inc.df <- perdf %>% group_by(stabbr) %>%
do(g(.)) %>%
bind_rows(inc.us)
st2 <- stage %>%
left_join(tx.df) %>%
left_join(inc.df) %>%
mutate(rtax.est=tx.intercept +
tx.age * age +
tx.age2 * age^2 +
tx.age3 * age^3 +
tx.age4 * age^4,
rincome.est=inc.intercept +
inc.age * age +
inc.age2 * age^2 +
inc.age3 * age^3 +
inc.age4 * age^4) %>%
select(stabbr, age, rtax, rincome, rtax.est, rincome.est) %>%
gather(variable, value, -stabbr, -age)
#.. look at the profiles ----
st2
#.. set up graph info ----
cps_src <- "Source: Annual Social and Economic (ASEC) supplement to the Current Population Survey, pooled 2017 and 2018"
note <- "Smoothed values estimated as a 4th degree polynomial of age"
capt <- paste0(cps_src, "\n", note)
#.. graphs of income and tax by age, US, 2 panels ----
p1 <- st2 %>%
filter(stabbr=="US", str_sub(variable, 1, 4)=="rinc") %>%
mutate(varf=factor(variable, levels=c("rincome", "rincome.est"), labels=c("Actual", "Smoothed"))) %>%
ggplot(aes(x=age, y=value, colour=varf)) +
theme_bw() +
geom_line(size=rel(1.3)) +
geom_point() +
scale_x_continuous(name="Age", breaks=seq(0, 100, 5)) +
scale_y_continuous(name="Income", breaks=seq(0, 200e3, 10e3), limits=c(0, NA), labels=scales::dollar) +
scale_colour_manual(values=c("darkgreen", "blue")) +
guides(colour=guide_legend(title=NULL)) +
ggtitle("Average income by age in the United States", subtitle="2017 dollars") +
labs(caption=capt) +
theme(plot.title = element_text(size=rel(1.5), face="bold")) +
theme(plot.subtitle = element_text(size=rel(1), face="bold")) +
theme(axis.title = element_text(face="bold", size=rel(1))) +
theme(axis.text = element_text(face="bold", size=rel(1))) +
theme(plot.caption = element_text(hjust=0, size=rel(.8)))
p1
p2 <- st2 %>%
filter(stabbr=="US", str_sub(variable, 1, 4)=="rtax") %>%
mutate(varf=factor(variable, levels=c("rtax", "rtax.est"), labels=c("Actual", "Smoothed"))) %>%
ggplot(aes(x=age, y=value, colour=varf)) +
theme_bw() +
geom_line(size=rel(1.3)) +
geom_point() +
scale_x_continuous(name="Age", breaks=seq(0, 100, 5)) +
scale_y_continuous(name="State income tax", breaks=seq(0, 50e3, 200), limits=c(0, NA), labels=scales::dollar) +
scale_colour_manual(values=c("darkgreen", "blue")) +
guides(colour=guide_legend(title=NULL)) +
ggtitle("Average state income tax by age in the United States", subtitle="2017 dollars") +
labs(caption=capt) +
theme(plot.title = element_text(size=rel(1.5), face="bold")) +
theme(plot.subtitle = element_text(size=rel(1), face="bold")) +
theme(axis.title = element_text(face="bold", size=rel(1))) +
theme(axis.text = element_text(face="bold", size=rel(1))) +
theme(plot.caption = element_text(hjust=0, size=rel(.8)))
p2
ml <- marrangeGrob(list(p1, p2), nrow=2, ncol=1, top=NULL)
ml
ggsave("./results/income_and_tax_byage.png", ml, width=8, height=11, units="in")
#.. graphs of income and tax by age, case study states ####
sts <- c(globals$case_study_states, "US"); clrs <- c('#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c', "black")
sts <- c(globals$case_study_states); clrs <- c('#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c')
p1 <- st2 %>%
filter(stabbr %in% sts, variable=="rincome.est") %>%
ungroup %>%
mutate(stabbr=factor(stabbr, levels=sts)) %>%
ggplot(aes(x=age, y=value, colour=stabbr)) +
theme_bw() +
geom_line(size=rel(1.3)) +
scale_x_continuous(name="Age", breaks=seq(0, 100, 5)) +
scale_y_continuous(name="Income", breaks=seq(0, 200e3, 10e3), limits=c(0, NA), labels=scales::dollar) +
scale_colour_manual(values=clrs) +
guides(colour=guide_legend(title=NULL)) +
ggtitle("Average income by age in case study states", subtitle="2017 dollars") +
labs(caption=capt) +
theme(plot.title = element_text(size=rel(1.3), face="bold")) +
theme(plot.subtitle = element_text(size=rel(1), face="bold")) +
theme(axis.title = element_text(face="bold", size=rel(1))) +
theme(axis.text = element_text(face="bold", size=rel(1))) +
theme(plot.caption = element_text(hjust=0, size=rel(.8)))
p1
p2 <- st2 %>% filter(stabbr %in% sts, variable=="rtax.est") %>%
ungroup %>%
mutate(stabbr=factor(stabbr, levels=sts)) %>%
ggplot(aes(x=age, y=value, colour=stabbr)) +
theme_bw() +
geom_line(size=rel(1.3)) +
scale_x_continuous(name="Age", breaks=seq(0, 100, 5)) +
scale_y_continuous(name="State income tax", breaks=seq(0, 50e3, 200), limits=c(0, NA), labels=scales::dollar) +
scale_colour_manual(values=clrs) +
guides(colour=guide_legend(title=NULL)) +
ggtitle("Average state income tax by age in case study states", subtitle="2017 dollars") +
labs(caption=capt) +
theme(plot.title = element_text(size=rel(1.3), face="bold")) +
theme(plot.subtitle = element_text(size=rel(1), face="bold")) +
theme(axis.title = element_text(face="bold", size=rel(1))) +
theme(axis.text = element_text(face="bold", size=rel(1))) +
theme(plot.caption = element_text(hjust=0, size=rel(.8)))
p2
ml <- marrangeGrob(list(p1, p2), nrow=2, ncol=1, top=NULL)
ml
# use one or the other of these
ggsave("./results/income_and_tax_byage_css_xus.png", ml, width=8, height=11, units="in")
ggsave("./results/income_and_tax_byage_css.png", ml, width=8, height=11, units="in")
#.. DON'T NEED graphs of income and tax by age indexed to age 50, individual case study states ####
# sts <- c(globals$case_study_states, "US")
# st <- sts[1]
# p1 <- st2 %>%
# filter(stabbr %in% sts, variable %in% c("rincome.est", "rtax.est")) %>%
# mutate(variable=factor(variable, levels=c("rincome.est", "rtax.est"), labels=c("Income", "State income tax"))) %>%
# group_by(stabbr, variable) %>%
# mutate(ivalue=value / value[age==50] * 100) %>%
# ungroup %>%
# ggplot(aes(x=age, y=ivalue, colour=variable)) +
# theme_bw() +
# geom_line(size=rel(1.3)) +
# geom_hline(yintercept = 100, linetype="dashed") +
# scale_x_continuous(name="Age", breaks=seq(0, 100, 5)) +
# scale_y_continuous(name="Indexed value (age 50=100)", breaks=seq(0, 100, 10), limits=c(0, NA)) +
# scale_colour_manual(values=c("blue", "darkgreen")) +
# guides(colour=guide_legend(title=NULL)) +
# ggtitle("Average income and state income tax by age", subtitle="Indexed to values at age 50") +
# labs(caption=capt) +
# theme(plot.title = element_text(size=rel(1.3), face="bold")) +
# theme(plot.subtitle = element_text(size=rel(1), face="bold")) +
# theme(axis.title = element_text(face="bold", size=rel(1))) +
# theme(axis.text = element_text(face="bold", size=rel(1))) +
# theme(plot.caption = element_text(hjust=0, size=rel(.8)))
# p1 + facet_wrap(~stabbr)
# ggsave("./results/income_and_tax_byage_indexed.png", p1, width=8, height=11, units="in")
#.. DON'T NEED graph of pch in avg tax vs avg income, from 2nd-top to top income range ----
# abrks <- c(-100, 0, 18, 25, 35, 45, 65, 1e9)
# pchdf <- perdf %>% filter(age>=18, !stabbr %in% globals$nonpit_states) %>%
# mutate(agegrp=cut(age, breaks=abrks)) %>%
# group_by(stabbr, agegrp) %>%
# summarise(rtax=weighted.mean(rtax, marsupwt, na.rm=TRUE),
# rincome=weighted.mean(rincome, marsupwt, na.rm=TRUE)) %>%
# filter(agegrp %in% c("(45,65]", "(65,1e+09]")) %>%
# gather(variable, value, rtax, rincome) %>%
# spread(agegrp, value) %>%
# mutate(pch=`(65,1e+09]` / `(45,65]` - 1) %>%
# select(stabbr, variable, pch) %>%
# spread(variable, pch) %>%
# mutate(diff=rtax - rincome)
#
# uspch <- pchdf %>% ungroup %>%
# summarise(rincome=median(rincome), rtax=median(rtax), diff=median(diff))
#
#
# p <- pchdf %>% mutate(stype=ifelse(stabbr %in% globals$case_study_states, "Case study\nstates", "Other states"),
# lsize=ifelse(stype=="Other states", 3, 4)) %>%
# ggplot(aes(x=rincome, y=rtax, label=stabbr)) +
# theme_bw() +
# # geom_text(colour="blue", size=3, position=position_jitter(width=.004, height=.004)) +
# # geom_text(aes(colour=stype, size=stype), fontface = "bold", position=position_nudge(x=.002, y=.002)) +
# geom_text_repel(aes(colour=stype, size=stype), fontface = "bold", point.padding = NA) +
# #geom_text(aes(colour=stype, size=stype), fontface = "bold") +
# scale_x_continuous(name="Percent change in income (greater declines are to left)", labels = scales::percent,
# breaks=seq(-1, 1, .05)) +
# scale_y_continuous(name="Percent change in state income tax (greater declines are lower)",
# labels = scales::percent,
# breaks=seq(-1, 1, .05)) +
# scale_colour_manual(values=c("darkred", "blue", "#636363")) +
# scale_size_manual(values=c(3.5, 3.5, 3)) +
# geom_hline(yintercept=uspch$rtax) +
# geom_vline(xintercept=uspch$rincome) +
# ggtitle(label="Decline in income and in state income tax, age 65+ cohort relative to age 45-64 cohort",
# subtitle="Horizontal and vertical lines show United States medians") +
# labs(caption="\nSource: Annual Social and Economic (ASEC) supplement to the Current Population Survey, pooled 2017 and 2018") +
# theme(plot.title = element_text(size=rel(1.3), face="bold")) +
# theme(plot.subtitle = element_text(size=rel(1), face="bold")) +
# theme(axis.title = element_text(face="bold", size=rel(1))) +
# theme(axis.text = element_text(face="bold", size=rel(1))) +
# theme(plot.caption = element_text(hjust=0, size=8)) +
# guides(colour=guide_legend(title=NULL), size=guide_legend(title=NULL))
# p
# ggsave(p, file=paste0("./results/taxpch_vs_incpch_65vs45to64_scatter.png"), width=10, height=6.25, units="in")
#.. DON'T NEED graph of tax ratios ----
# get average tax by pop group
abrks <- c(-100, 0, 18, 25, 35, 45, 65, 1e9)
taxratio <- perdf %>% filter(age>=18, !stabbr %in% globals$nonpit_states) %>%
mutate(agegrp=cut(age, breaks=abrks)) %>%
group_by(stabbr, agegrp) %>%
summarise(rtax=weighted.mean(rtax, marsupwt, na.rm=TRUE)) %>%
spread(agegrp, rtax) %>%
mutate(diff=`(65,1e+09]` - `(45,65]`,
pch=diff / `(45,65]`) %>%
arrange(pch)
ustr <- taxratio %>%
ungroup %>%
summarise(diff=median(diff), pch=median(pch))
p <- taxratio %>%
filter(!stabbr %in% globals$nonpit_states) %>%
mutate(stype=ifelse(stabbr %in% globals$case_study_states, "Case study\nstates", "Other states"),
lsize=ifelse(stype=="Other states", 3, 4)) %>%
ggplot(aes(x=pch, y=diff, label=stabbr)) +
theme_bw() +
# geom_text(colour="blue", size=3, position=position_jitter(width=.004, height=.004)) +
# geom_text(aes(colour=stype, size=stype), fontface = "bold", position=position_nudge(x=.002, y=.002)) +
geom_text_repel(aes(colour=stype, size=stype), fontface = "bold", point.padding = NA) +
#geom_text(aes(colour=stype, size=stype), fontface = "bold") +
scale_x_continuous(name="Percent change", labels = scales::percent,
breaks=seq(-1, 1, .05)) +
scale_y_continuous(name="Dollar difference, 65+ minus 45-64 age group",
labels = scales::dollar,
breaks=seq(-3e3, 1e3, 100)) +
scale_colour_manual(values=c("darkred", "blue", "#636363")) +
scale_size_manual(values=c(3.5, 3.5, 3)) +
geom_hline(yintercept=ustr$diff) +
geom_vline(xintercept=ustr$pch) +
# geom_hline(yintercept=0, linetype="dashed", colour="darkblue") +
ggtitle(label="Change in tax per-capita for 65+ age group relative to 45-64 age group",
subtitle="Horizontal and vertical lines show United States medians") +
labs(caption="\nSource: Annual Social and Economic (ASEC) supplement to the Current Population Survey, pooled 2017 and 2018") +
theme(plot.title = element_text(size=rel(1.5), face="bold")) +
theme(plot.subtitle = element_text(size=rel(1), face="bold")) +
theme(axis.title = element_text(face="bold", size=rel(1))) +
theme(axis.text = element_text(face="bold", size=rel(1))) +
theme(plot.caption = element_text(hjust=0, size=8)) +
guides(colour=guide_legend(title=NULL), size=guide_legend(title=NULL))
p
ggsave(p, file=paste0("./results/age65ptax_vs45to64_scatter.png"), width=10, height=6.25, units="in")
#****************************************************************************************************
# Now do Watkins-like calculation of potential revenue loss ####
#****************************************************************************************************
wpop <- readRDS("./data/popproj/wc_popproj.rds") # Weldon Cooper pop projections
glimpse(wpop)
count(wpop, popgroup) # get age cuts needed
popgroup <- count(wpop, popgroup) %>%
filter(popgroup!="poptot")
unique(wpop$popgroup)
# make a data frame to link popgroups on
agebrks <- c(-1, seq(4, 84, 5), 1e9)
agegroups <- tibble(age=1:100, agebrk=cut(age, agebrks)) %>%
group_by(agebrk) %>%
summarise(n=n()) %>%
mutate(popgroup=popgroup$popgroup) %>%
select(-n)
agegroups # good, we can use this for linking
# get the tax data from CPS AND CUT THE WEIGHT IN HALF TO REFLECT 2 years !!!! ----
perdf <- readRDS("./data/perdf.rds") %>%
mutate(avgweight=marsupwt / 2)
glimpse(perdf)
# perdf has one rec per person in 2017 and one per 2018; rtax is real state tax (2017 dollars)
# it has ~180k obs per year; marsupwt is the weight
# prepare cps data for merging against pop projections data
# total population and mean rtax by state and age group, plus total pop [2017, 2018 pooled]
# thus, all the pops are 2-year sums; the taxes are average annual tax
spop.age <- perdf %>%
mutate(agebrk=cut(age, agebrks)) %>%
group_by(stabbr, agebrk) %>%
summarise(wtdn=sum(avgweight),
rtax=weighted.mean(rtax, avgweight)) %>%
left_join(agegroups) %>% # this allows us to link to Weldon Cooper projections
group_by(stabbr) %>%
mutate(totpop=sum(wtdn)) %>%
ungroup
spop.age
spop.age %>% filter(stabbr=="NY")
# prepare the Weldon Cooper popshares data - for each state and income group, get share of state pop different years
wpop2 <- wpop %>%
filter(sex=="Total", popgroup!="poptot") %>%
group_by(year, stabbr) %>%
mutate(totproj=sum(value),
popshare=value / totproj,
shareyear=paste0("share", year)) %>%
ungroup %>%
select(stabbr, popgroup, shareyear, popshare) %>%
spread(shareyear, popshare)
wpop2 %>% group_by(stabbr) %>% summarise_at(vars(starts_with("share")), sum) # make sure shares add to 1
# put the shares on the tax data
ptaxshares <- spop.age %>%
group_by(stabbr) %>%
mutate(sharecps=wtdn / sum(wtdn)) %>%
ungroup %>%
left_join(wpop2, by=c("stabbr", "popgroup")) %>%
mutate(taxcps=rtax * wtdn,
tax2010=rtax * totpop * share2010,
tax2020=rtax * totpop * share2020,
tax2030=rtax * totpop * share2030,
tax2040=rtax * totpop * share2040)
#.. quick check compare implied pit in the cps data vs cps per census years don't match quite correctly of course ----
sgcomp <- ptaxshares %>%
group_by(stabbr) %>%
summarise(taxcps=sum(taxcps) / 1e6) %>%
left_join(slgfin %>% filter(level==2, aggvar=="iit", year==2016) %>% select(stabbr, taxcen=value) %>% mutate(taxcen=taxcen / 1000)) %>%
mutate(pdiff=taxcps / taxcen * 100 - 100,
level="sg")
slgcomp <- ptaxshares %>%
group_by(stabbr) %>%
summarise(taxcps=sum(taxcps) / 1e6) %>%
left_join(slgfin %>% filter(level==1, aggvar=="iit", year==2016) %>% select(stabbr, taxcen=value) %>% mutate(taxcen=taxcen / 1000)) %>%
mutate(pdiff=taxcps / taxcen * 100 - 100,
level="slg")
taxcomp <- bind_rows(sgcomp, slgcomp)
taxcomp %>% arrange(-abs(pdiff))
taxcomp %>% filter(stabbr=="NY")
taxcomp %>% filter(stabbr %in% globals$case_study_states) %>% arrange(stabbr, level)
#.. end check ----
ptaxshares %>%
filter(stabbr=="NY") %>%
select(-agebrk, -totpop) %>%
select(stabbr, popgroup, everything()) %>%
mutate_at(vars(starts_with("tax")), ~ . / 1e6) %>%
janitor::adorn_totals() %>%
mutate(pch=tax2040 / tax2020 * 100 - 100) %>%
kable(digits=c(rep(0, 4), rep(3, 5), rep(0, 5), 1), format.args=list(big.mark=","))
write_csv(ptaxshares, "./results/ptaxshares.csv") # so I can calc by hand
#.. compute total state income tax after credits in $ millions at different pop shares and change ----
totchange <- ptaxshares %>%
group_by(stabbr) %>%
summarise_at(vars(tax2010, tax2020, tax2030, tax2040), ~(sum(.) / 1e6)) %>%
mutate(diff.tot=tax2040 - tax2020,
pch.tot=tax2040 / tax2020 * 100 - 100)
# alternative approach - per capita tax, and change - should yield the same pch
percapchange <- ptaxshares %>%
group_by(stabbr) %>%
summarise(rtax2010=weighted.mean(rtax, share2010, na.rm=TRUE),
rtax2020=weighted.mean(rtax, share2020, na.rm=TRUE),
rtax2030=weighted.mean(rtax, share2030, na.rm=TRUE),
rtax2040=weighted.mean(rtax, share2040, na.rm=TRUE)) %>%
mutate(diff.pc=rtax2040 - rtax2020,
pch.pc=rtax2040 / rtax2020 * 100 - 100)
taxdiff <- totchange %>%
left_join(percapchange) %>%
arrange(diff.pc, pch.pc)
taxdiff
write_csv(taxdiff, "./results/pitchanges.csv")
saveRDS(taxdiff, "./results/pitchanges.rds")
taxdiff <- readRDS("./results/pitchanges.rds")
pdata <- taxdiff %>% filter(!stabbr %in% c("DC", "US", globals$nonpit_states)) %>% mutate(pch.pc=pch.pc / 100)
usvals <- pdata %>% summarise_at(vars(pch.pc, diff.pc), ~median(., na.rm=TRUE))
l1 <- "Source: Author's analysis of data from Current Population Survey and from University of Virginia, Weldon Cooper Center for Public Service"
l2 <- "Notes: (1) States without broad-based income taxes excluded. (2) Analysis is similar to Felix & Watkins 2013, with updated information."
srcnote <- paste0("\n", l1, "\n", l2)
p <- pdata %>%
mutate(stype=ifelse(stabbr %in% globals$case_study_states, "Case study\nstates", "Other states"),
lsize=ifelse(stype=="Other", 3, 4)) %>%
ggplot(aes(x=pch.pc, y=diff.pc, label=stabbr)) +
theme_bw() +
# geom_text_repel(aes(colour=stype, size=stype), fontface = "bold") +
geom_text(aes(colour=stype, size=stype), fontface = "bold") +
scale_x_continuous(name="Percent change in per-capita income tax",
labels = scales::percent,
breaks=seq(-.5, .5, .01)) +
scale_y_continuous(name="Dollar change in per-capita income tax",
labels = scales::dollar,
breaks=seq(-100, 100, 20)) +
scale_colour_manual(values=c("darkred", "blue", "#636363")) +
scale_size_manual(values=c(3.5, 3.5, 3)) +
geom_hline(yintercept=usvals$diff.pc) +
geom_vline(xintercept=usvals$pch.pc) +
geom_hline(yintercept=0, linetype="dashed", colour="darkblue") +
ggtitle(label="Impact on per-capita income tax of moving from projected 2020 to 2040 age distribution (2017 dollars)",
subtitle="Horizontal and vertical lines show medians for income-tax states") +
labs(caption=srcnote) +
theme(plot.title = element_text(size=rel(1.3), face="bold")) +
theme(plot.subtitle = element_text(size=rel(1), face="bold")) +
theme(axis.title = element_text(face="bold", size=rel(1))) +
theme(axis.text = element_text(face="bold", size=rel(1))) +
theme(plot.caption = element_text(hjust=0, size=8)) +
guides(colour=guide_legend(title=NULL), size=guide_legend(title=NULL))
p
ggsave(p, file=paste0("./results/pit_impact_scatter.png"), width=10, height=6.25, units="in")
taxdiff %>% select(stabbr, diff.pc, pch.pc) %>%
arrange(pch.pc)
ptaxshares %>% select(stabbr, popgroup, starts_with("share")) %>%
filter(popgroup %in% pop65p) %>%
group_by(stabbr) %>%
summarise_at(vars(starts_with("share")), funs(sum)) %>%
mutate(diff=share2040 - share2020)
#.. get pit change as % of own-source revenue ----
totchange %>% arrange(desc(abs(pch.tot)))
# count(slgfin, aggvar)
osrpct <- slgfin %>%
filter(year==2016, level==1, aggvar %in% c("iit", "osr")) %>%
mutate(value=value / 1e3) %>% # put in $ millions
spread(aggvar, value) %>%
select(stabbr, iit, osr) %>%
left_join(totchange %>% select(stabbr, pch.iit=pch.tot)) %>%
mutate(iitchange=iit * pch.iit / 100,
osrpct=iitchange / osr * 100)
osrpct %>% arrange(desc(abs(osrpct)))
#****************************************************************************************************
# Consumer expenditure survey ####
#****************************************************************************************************
# prepare the popshares data
popproj <- readRDS("./data/popproj.rds")
glimpse(popproj)
count(popproj, popgroup) # get age cuts needed
popgroup <- count(popproj, popgroup) %>%
filter(popgroup!="pop")
# prepare the popshares data
pop2 <- popproj %>%
filter(sex=="Total", popgroup!="pop") %>%
group_by(year, stabbr) %>%
mutate(totproj=sum(value),
popshare=value / totproj,
shareyear=paste0("share", year)) %>%
ungroup %>%
select(stabbr, popgroup, shareyear, popshare) %>%
spread(shareyear, popshare)
# get shares for each state, group, and year (cols)
pop2 <- popproj %>%
filter(sex=="Total", popgroup!="pop") %>%
group_by(year, stabbr) %>%
mutate(totproj=sum(value), popshare=value / totproj,
shareyear=paste0("share", year)) %>%
ungroup %>%
select(stabbr, popgroup, shareyear, popshare) %>%
spread(shareyear, popshare)
# check
pop2 %>% group_by(stabbr) %>% summarise_at(vars(starts_with("share")), sum) # make sure they sum to 1
# calculate taxable consumption as % of income at each age group
age <- c(21.4, 29.6, 39.5, 49.6, 59.3, 68.9, 81.6) # avg age of reference person per CEX
txpct <- c(51.7, 41.1, 38.4, 34.7, 38.4, 46.5, 48.9) # txbl consumption as % of income per my analysis of CEX
agepct <- tibble(age, txpct)
# get coefs that allow us to estimate txpct at any age
mod <- agepct %>% mutate(age2=age*age, age3=age2*age) %>%
lm(txpct ~ age + age2 + age3, data=.)
summary(mod)
# estimate txpct of income at any age
agespend <- tibble(age=18:85,
txpct=coef(mod)["(Intercept)"] +
coef(mod)["age"] * age +
coef(mod)["age2"] * age^2 +
coef(mod)["age3"] * age^3) %>%
mutate(type="est") %>%
bind_rows(agepct %>% mutate(type="data"))
# check that it looks plausible
agespend %>% ggplot(aes(age, txpct, colour=type)) +
geom_line()
# make a data frame to link popgroups on proj file with pop on CPS
agebrks <- c(-1, seq(4, 84, 5), 1e9)
agegroups <- tibble(age=1:100, agebrk=cut(age, agebrks)) %>%
group_by(agebrk) %>%
summarise(n=n()) %>%
mutate(popgroup=popgroup$popgroup) %>%
select(-n)
agegroups # good, we can use this for linking
# calc txbl consumption for each person in pooled CPS
perdf2 <- readRDS("./data/perdf2.rds")
glimpse(perdf2)
txc <- perdf2 %>%
left_join(agespend %>% filter(type=="est") %>%
select(-type)) %>%
mutate(txblsales=rincome * txpct / 100,
agebrk=cut(age, agebrks)) %>%
left_join(agegroups) %>%
group_by(stabbr, popgroup) %>%
summarise(wtdn=sum(marsupwt),
rincome=weighted.mean(rincome, marsupwt, na.rm=TRUE),
txblsales=weighted.mean(txblsales, marsupwt, na.rm=TRUE)) %>%
ungroup
txc %>% filter(stabbr=="NY") %>%
select(popgroup, rincome, txblsales) %>%
gather(variable, value, -popgroup) %>%
# must use group=variable because popgroup is a factor!
ggplot(aes(popgroup, value, colour=variable, group=variable)) +
geom_point() +
geom_line() +
theme(axis.text.x=element_text(angle=45,hjust=.9,vjust=0.9))
# now get state averages under different shares
txcshares <- txc %>% left_join(pop2) %>%
mutate(totpop=sum(wtdn)) %>%
mutate(txc2010=txblsales * totpop * share2010,
txc2020=txblsales * totpop * share2020,
txc2030=txblsales * totpop * share2030,
txc2040=txblsales * totpop * share2040)
write_csv(txcshares, "./results/txcshares.csv") # so I can calc by hand
# compute total tax, and change
txcchange <- txcshares %>% group_by(stabbr) %>%
summarise_at(vars(txc2010, txc2020, txc2030, txc2040), funs(sum(., na.rm=TRUE) / 1e6)) %>%
mutate(diff.tot=txc2030 - txc2010,
pch.tot=txc2030 / txc2010 * 100 - 100)
txcchange
# put the shares on the tax data
ptaxshares <- p4 %>% left_join(pop2) %>%
mutate(tax2010=rtax * totpop * share2010,
tax2020=rtax * totpop * share2020,
tax2030=rtax * totpop * share2030,
tax2040=rtax * totpop * share2040)
write_csv(ptaxshares, "./results/ptaxshares.csv") # so I can calc by hand
# compute total tax, and change
totchange <- ptaxshares %>% group_by(stabbr) %>%
summarise_at(vars(tax2010, tax2020, tax2030, tax2040), funs(sum(.) / 1e6)) %>%
mutate(diff.tot=tax2030 - tax2010,
pch.tot=tax2030 / tax2010 * 100 - 100)
#****************************************************************************************************
# Consumer expenditure survey - PLAN B ####
#****************************************************************************************************
popproj <- readRDS("./data/popproj.rds")
# CEX 2015 age groupings
txc.age <- read_csv("agegrp.cex, ageref.cex, incpretax, expend, consxfin, txblexp
00to24, 21.4, 31606, 32797, 29836, 16330
25to34, 29.6, 64472, 52062, 45182, 26480
35to44, 39.5, 84938, 65334, 55954, 32641
45to54, 49.6, 95248, 69753, 57578, 33047
55to64, 59.3, 75262, 58781, 49051, 28889
65to74, 68.9, 54067, 49477, 43518, 25167
75+, 81.6, 36408, 38123, 34392, 17816")
txc.age
txc.age <- txc.age %>% mutate(agelabs=agegrp.cex %>% str_replace_all("to", " to "),
agelabs=agelabs %>% str_replace("00 to", "Up to"))
pdata <- txc.age %>% select(-consxfin) %>%
gather(variable, value, -agegrp.cex, -ageref.cex, -agelabs) %>%
mutate(varf=factor(variable,
levels=c("incpretax", "expend", "txblexp"),
labels=c("Income before tax", "Total expenditures", "Commonly taxable\nexpenditures")))
p <- pdata %>%
ggplot(aes(ageref.cex, value, colour=varf)) +
geom_line(size=rel(1.3)) +
geom_point(size=rel(1.2)) +
scale_y_continuous(name=NULL, breaks=seq(0, 100e3, 10e3), limits=c(0, NA), labels=scales::dollar) +
scale_x_continuous(name="Age of householder", breaks=txc.age$ageref.cex, labels=txc.age$agelabs) +
theme_bw() +
scale_colour_manual(values=c("red", "blue", "darkgreen")) +
theme(plot.title = element_text(size=rel(2), face="bold")) +
theme(axis.title = element_text(face="bold", size=rel(1))) +
theme(axis.text = element_text(face="bold", size=rel(1)))
p1 <- p +
ggtitle("Income and expenditures by householder age") +
guides(color = guide_legend(title=NULL, nrow = 3)) +
labs(caption="\nSource: U.S. Bureau of the Census, Consumer Expenditure Survey 2015, Table 1300 plus author's calculations") +
theme(plot.caption = element_text(hjust=0, size=rel(.8))) +
theme(legend.direction = 'horizontal',
legend.position = 'right',
legend.key = element_rect(size = 5),
legend.key.size = unit(1.5, 'lines')) # put space between legend lines of text
p1
ggsave(p1, file=paste0("./results/expend_hhage.png"), width=10, height=6.25, units="in")
p2 <- p +
ggtitle("Household income and expenditures\nby householder age") +
guides(color = guide_legend(title=NULL, nrow = 1)) +
labs(caption="\nSource: U.S. Bureau of the Census, Consumer Expenditure Survey 2015\nTable 1300 plus author's calculations") +
theme(plot.caption = element_text(hjust=0, size=rel(.8))) +
theme(legend.direction = 'horizontal',
legend.position = 'bottom',
legend.key = element_rect(size = 5),
legend.key.size = unit(1, 'lines')) # put space between legend lines of text
p2
ggsave(p2, file=paste0("./results/expend_hhage_alt.png"), width=7, height=8, units="in")
popgroup
# CEX 2create a linking file ####
pop2 <- popproj %>%
filter(sex=="Total", popgroup!="pop") %>%
group_by(year, stabbr) %>%
mutate(totproj=sum(value), popshare=value / totproj,
shareyear=paste0("share", year)) %>%
ungroup %>%
select(stabbr, popgroup, shareyear, popshare) %>%
spread(shareyear, popshare)
popgroup <- count(popproj, popgroup) %>%
filter(popgroup!="pop")
popgroup.cex <- popgroup %>% mutate(c2=str_sub(popgroup, 1, 2),
agegrp.cex=ifelse(c2 < 25, "00to24", NA),
agegrp.cex=ifelse(c2 %in% 25:34, "25to34", agegrp.cex),
agegrp.cex=ifelse(c2 %in% 35:44, "35to44", agegrp.cex),
agegrp.cex=ifelse(c2 %in% 45:54, "45to54", agegrp.cex),
agegrp.cex=ifelse(c2 %in% 55:64, "55to64", agegrp.cex),
agegrp.cex=ifelse(c2 %in% 65:74, "65to74", agegrp.cex),
agegrp.cex=ifelse(c2 >= 75, "75+", agegrp.cex)) %>%
select(-c2)
popgroup.cex
# get total population of each state in base year
perdf2 <- readRDS("./data/perdf2.rds")
totpop2015 <- perdf2 %>% filter(year==2015) %>%
group_by(stabbr) %>%
summarise(pop=sum(marsupwt, na.rm=TRUE))
# collapse pop proj shares by CEX age groups
cex_change <- pop2 %>%
filter(stabbr!="US") %>%
left_join(popgroup.cex) %>%
group_by(stabbr, agegrp.cex) %>%
summarise_at(vars(starts_with("share")), funs(sum)) %>%
left_join(totpop2015) %>%
left_join(txc.age) %>%
mutate(txc2010=share2010 * pop * txblexp / 1e6,
txc2030=share2030 * pop * txblexp / 1e6) %>%
group_by(stabbr) %>%
summarise_at(vars(txc2010, txc2030), funs(sum)) %>%
mutate(diff=txc2030 - txc2010,
pch=diff / txc2010 * 100) %>%
left_join(totpop2015) %>%
mutate_at(vars(txc2010, txc2030, diff), funs(pc=. * 1e6 / pop))
cex_change
precis(cex_change)
precis(cex_change %>% filter(stabbr != "DC"))
cex_change %>% arrange(pch)
write_csv(cex_change, "./results/cex_change.csv")
quantile(cex_change$pch, probs=c(0, .1, .25, .5, .75, .9, 1))
xgst <- c("DC")
p <- cex_change %>% filter(!stabbr %in% xgst) %>%
arrange(desc(pch)) %>%
mutate(stname=factor(stabbr, levels=stcodes$stabbr, labels=stcodes$stname),
stname2=factor(stname, levels=stname, labels=stname, ordered=TRUE),
pchround=round(pch, 1)) %>%
ggplot(aes(x=stname2, y=pch)) +
geom_bar(stat="identity", fill="blue") +
scale_y_continuous(name=NULL, breaks=seq(-3, 3, .2)) +
scale_x_discrete(name=NULL) +
geom_text(aes(label=pchround), nudge_y=0.1, size=3) +
theme_bw() +
ggtitle(label="Taxable sales") +
coord_flip() +
geom_vline(xintercept=0) +
theme(axis.text.y=element_text(hjust=0.5))
# apparently must adjust the state labels AFTER the flip, as y axis (I think)
p
#****************************************************************************************************
# Median pit changes ####
#****************************************************************************************************
sgtax.a %>% filter(year %in% 2007:2010, vname=="iit", stabbr!="US", value>0) %>%
select(stabbr, year, value) %>%
spread(year, value) %>%
mutate(pch1=`2009` / `2008` * 100 - 100,
pch2=`2010` / `2009` * 100 - 100) %>%
summarise_at(vars(pch1, pch2), funs(median))
#****************************************************************************************************
# Consumer expenditure survey - exp breakdown ####
#****************************************************************************************************
fn <- "D:/Dropbox/Open Projects/NCSL Boston Feb 2017/Presentation/BoydNCSLBoston2017(10).xlsx"
df <- read_excel(fn, sheet="CEX_export", skip=1)
names(df)
df <- df[, which(!is.na(names(df)))] # fix bad data
names(df)
# get df with agerp and agegrp
agevals <- tibble(agegrp=names(df)[-c(1:2)], agerp=df[1, -c(1:2)] %>% t %>% as.vector)
vars <- c("exp_total", "exp_txblcons", "food_home", "food_away", "alcohol", "housing", "housing_shelter",
"apparel", "transport", "healthcare", "entertainment", "perscare", "reading", "education", "tobacco")
ages <- c("agerp_all", "agerp_00to24", "agerp_25to34", "agerp_35to44", "agerp_45to54",
"agerp_55to64", "agerp_65to74", "agerp_75+", "agerp_65+")
df2 <- df %>%
select(-description) %>%
filter(vname %in% vars) %>%
gather(agegrp, value, -vname) %>%
spread(vname, value) %>%
mutate(alctob=alcohol + tobacco,
housing_xshelter=housing - housing_shelter) %>%
gather(vname, value, -agegrp) %>%
left_join(agevals)
voi <- c("exp_total", "exp_txblcons", "food_home", "food_away", "alctob", "housing", "apparel", "transport",
"healthcare", "entertainment", "perscare", "reading", "education")
voilabs <- c("Total", "Taxable", "Food at home", "Food away", "Alcohol & tobacco", "Housing", "Apparel", "Transportation",
"Health care", "Entertainment", "Personal care", "Reading", "Education")
cbind(voi, voilabs)
srcnote <- "\nSource: U.S. Bureau of the Census, Consumer Expenditure Survey 2015 Table 1300, plus author's calculations"
note <- "Inspired by Oregon Office of Economic Analysis, https://oregoneconomicanalysis.com/2014/03/19/aging-revenues/"
capt <- paste0(srcnote, "\n", note)
capt <- srcnote
p <- df2 %>% filter(agegrp %in% c("agerp_55to64", "agerp_65+"),
vname %in% voi) %>%
select(-agerp) %>%
spread(agegrp, value) %>%
mutate(pch=`agerp_65+` / agerp_55to64 - 1,
vlab=factor(vname, levels=voi, labels=voilabs)) %>%
arrange(pch) %>%
mutate(vlab2=factor(vlab, levels=vlab, labels=vlab, ordered=TRUE)) %>%
ggplot(aes(vlab2, pch)) +
geom_bar(stat="identity", fill="blue") +
scale_y_continuous(name=NULL,
breaks=seq(-2, 2, .1),
labels = scales::percent) +
scale_x_discrete(name=NULL) +
geom_hline(aes(yintercept = pch[vname=="exp_total"]), linetype="dashed", size=1) +
geom_hline(yintercept = 0, linetype="solid", size=1) +
theme_bw() +
ggtitle(label="Expenditures per household, 65+ households relative to 55-64 year-old households") +
labs(caption=capt) +
coord_flip() +
theme(axis.text.y=element_text(hjust=1)) +
theme(plot.title = element_text(size=rel(1.3), face="bold")) +
theme(axis.title = element_text(face="bold", size=rel(1))) +
theme(axis.text = element_text(face="bold", size=rel(1))) +
theme(plot.caption = element_text(hjust=0, size=11))
p
ggsave(p, file=paste0("./results/CEX_by_item_hbar.png"), width=10, height=6.25, units="in")
|
library(dplyr)
library(rayshader)
library(png)
library(raster)
library(sf)
library(viridis)
library(tuneR)
library(signal)
library(rgl)
library(cartography)
#shp file with geometries
neufTroisSHP <- st_read("~/art_withR/boucingMap/neuftrois.shp")
neufTroisSHP <- st_simplify(neufTroisSHP, preserveTopology = T,dTolerance = 50)
#st_write(neufTroisSHP,"~/art_withR/boucingMap/neufTrois_simple.shp")
#version rapide du plot
# par(mar=c(0,0,0,0))
plot(st_geometry(neufTroisSHP), border="white", bgc="black")
#elevation avec l'intensitรฉ d'un sample
# nbIris <- length(neufTroisSHP$OBJECTID)
#614 iris
sampleLength <- 68
soundfile <- file.path("~/art_withR/boucingMap/introlongue_niceCut.wav")
intro <- readWave(soundfile, from = 0, to = sampleLength , units="seconds")
# writeWave(intro, "introlongue.wav")
#on veut une visu toutes les 1/10 secondes et que 1 s c'est 44100 samples,
#รงa fait des fenรจtres de 4410 samples
frames_per_s <-12
sampRate <- intro@samp.rate
windowsSize <- sampRate / frames_per_s
#spectre de l'intro
fftIntro <- specgram(intro@left, n=windowsSize, Fs = intro@samp.rate)
#retire la partie complexe / phase
fftIntro$S <- abs(fftIntro$S)
#nb frequences doit รชtre >= au nb IRIS pour que tout le monde ait qqch
#length(fftIntro$f)
#get rid of useless info in shp
neufTroisSHP$M2_IP <- NULL
neufTroisSHP$M2_EMP <- NULL
neufTroisSHP$M2_POP <- NULL
neufTroisSHP$C_CAINSEE <- NULL
neufTroisSHP$N_QU <- NULL
neufTroisSHP$N_SQ_IR <- NULL
neufTroisSHP$C_IR <- NULL
neufTroisSHP$C_TYPEIR <- NULL
neufTroisSHP$L_IR <- NULL
# sort geometries (IRIS) by size
neuftroisTri <- neufTroisSHP %>% arrange(desc(SHAPE_Area))
# number of bands of frequencies
nb_bandes <- 31
#each geometry recieve a band number
neuftroisTri$bande <- (neuftroisTri$SHAPE_Area %>% ntile(nb_bandes) )
#seems to be traditional values of EQ freq : double each band
limites_de_bandesEQ<-c(0, 20,25,31.5,40,50,63,80,100,125,160,200,250,315,400,500,630,800,1000,1250,1600,2000,2500,3150,4000,5000,6300,8000,10000,12500,16000)
#
pEQ <- limites_de_bandesEQ / max(fftIntro$f)
qq <- quantile(fftIntro$f, probs = pEQ)
#split the frequencies of the FFT by the desired number of frequency bands
bandeslabels <- (nb_bandes-1):1
fftIntro$bande <- cut(fftIntro$f, qq, include.lowest=TRUE, labels = bandeslabels )
library(reshape2)
longfft <- melt(fftIntro$S, varnames = c("f_index","t_index") , value.name = "intensite")
longfft$bande <-(cut(longfft$f, nb_bandes, include.lowest=TRUE, labels=nb_bandes:1 ))
#modify type of factor cf ?factor pour le caster en numeric
longfft$bande <- as.numeric(as.character(longfft$bande))
#normalize by 4 parts quart so the bass isnt the only high values
longfft$quart <- cut(longfft$intensite,4, labels=FALSE)
# 4 maximum intensities
maximumIntensityByQuarts <- longfft %>% group_by(quart) %>% summarise(quartMax=max(intensite))
#each intensity gets its quart maximal value
longfft <- inner_join(longfft, maximumIntensityByQuarts, by= "quart")
#normalize each intensity by it quart maximum value
longfft <- longfft %>% mutate(intensite=intensite/quartMax )
#compute intensity mean value in each freq band, to affect to each geometry
mean_ByBand_By_t <- longfft %>% group_by(bande, t_index) %>% summarise(valAgg = mean(intensite))
cube93 <- left_join(neuftroisTri, mean_ByBand_By_t)
sampleID <- 1400
display_by_Sample(sampleID)
display_by_Sample <- function(sampleID){
par(mar=c(0,0,0,0))
tutu <-cube93[cube93$t_index==sampleID,"valAgg"]
plot(tutu["valAgg"],border=NA, bgc="black", pal=viridis, key.pos=NULL, main = NULL )
}
library(tiff)
nb_frames <- sampleLength * frames_per_s
# display_by_Sample(400, neuftroisTri,fftIntro)
# display_by_Sample(600, neuftroisTri,fftIntro)
# display_by_Sample(800, neuftroisTri,fftIntro)
imagefifi <- readPNG("art_withR/boucingMap/neufTroisOSM.png")
#remove borders
imagefifi <- imagefifi[21:(nrow(imagefifi)-20),21:(ncol(imagefifi)-20),]
neufTroisOSMraster <- imagefifi
for (sampleID in 2 : nb_frames){
cat(sampleID,"\n")
if (sampleID %% 5 == 0 ){
gc()
}
fifi <- tempfile()
png(fifi, width = 800, height = 800)
display_by_Sample(sampleID)
dev.off()
#layer elevation
localtif <- raster::raster(fifi)
# #layer couleur
# fifi <- tempfile()
# png(fifi)
# display_by_Sample_Othecolor(sampleID)
# dev.off()
# imagefifi <- readPNG(fifi)
# dim(imagefifi)
# imagefifi <- readPNG("art_withR/boucingMap/neufTroisOSM.png")
# #imagefifiOSM <- readTIFF("art_withR/boucingMap/fondCarteOSM/fondCarteOSM.tif")
#
#
# #remove borders
# imagefifi <- imagefifi[21:(nrow(imagefifi)-20),21:(ncol(imagefifi)-20),]
#
# #add alpha value
# imagefifi.alpah <- array(NA, dim=c(nrow(imagefifi), ncol(imagefifi), 4))
# imagefifi.alpah[,,-4] <- imagefifi
# #imagefifi.alpah[,,4] <- 0.9
# imagefifi[,,4] <- 0.9
# grid::grid.raster(imagefifi)
#
#create elevation matrix from the localtif created from fifi
elmat <- matrix(raster::extract(localtif,raster::extent(localtif),buffer=1000),
nrow=ncol(localtif),ncol=nrow(localtif))
#remove borders from the elevation matrix because they are white so they are interpreted as high elevation values
elmat <- elmat[21:(nrow(elmat)-20),21:(ncol(elmat)-20)]
elmat %>%
sphere_shade(progbar = FALSE,texture = "bw") %>%
add_overlay(neufTroisOSMraster) %>%
add_shadow(ray_shade(elmat,zscale=3000,maxsearch = 300,progbar = FALSE),0.7) %>%
plot_3d(elmat, zscale = 1, fov=70, theta=70+sampleID*0.5, phi=35, windowsize=c(100,100), zoom=0.65)
# Render Viewport to File
rgl.snapshot(paste0("~/art_withR/boucingMap/frames/frame",sprintf("%05d",sampleID),".png"), fmt = "png", top = F)
rgl.clear()
}
rgl.close()
#creer la vidรฉo r le rate
#ffmpeg -r 78 -i frame%05d.png -c:v libx264 out.mp4
#ajouter le son sur le flux video
#-r rate
# -i input files and sound
#ffmpeg -r 60 -i out.mp4 -i intro.wav -c:v copy -c:a aac out_et_son.mp4
|
/boucingMap/bouncingMap.R
|
no_license
|
chapinux/art_withR
|
R
| false
| false
| 6,048
|
r
|
library(dplyr)
library(rayshader)
library(png)
library(raster)
library(sf)
library(viridis)
library(tuneR)
library(signal)
library(rgl)
library(cartography)
#shp file with geometries
neufTroisSHP <- st_read("~/art_withR/boucingMap/neuftrois.shp")
neufTroisSHP <- st_simplify(neufTroisSHP, preserveTopology = T,dTolerance = 50)
#st_write(neufTroisSHP,"~/art_withR/boucingMap/neufTrois_simple.shp")
#version rapide du plot
# par(mar=c(0,0,0,0))
plot(st_geometry(neufTroisSHP), border="white", bgc="black")
#elevation avec l'intensitรฉ d'un sample
# nbIris <- length(neufTroisSHP$OBJECTID)
#614 iris
sampleLength <- 68
soundfile <- file.path("~/art_withR/boucingMap/introlongue_niceCut.wav")
intro <- readWave(soundfile, from = 0, to = sampleLength , units="seconds")
# writeWave(intro, "introlongue.wav")
#on veut une visu toutes les 1/10 secondes et que 1 s c'est 44100 samples,
#รงa fait des fenรจtres de 4410 samples
frames_per_s <-12
sampRate <- intro@samp.rate
windowsSize <- sampRate / frames_per_s
#spectre de l'intro
fftIntro <- specgram(intro@left, n=windowsSize, Fs = intro@samp.rate)
#retire la partie complexe / phase
fftIntro$S <- abs(fftIntro$S)
#nb frequences doit รชtre >= au nb IRIS pour que tout le monde ait qqch
#length(fftIntro$f)
#get rid of useless info in shp
neufTroisSHP$M2_IP <- NULL
neufTroisSHP$M2_EMP <- NULL
neufTroisSHP$M2_POP <- NULL
neufTroisSHP$C_CAINSEE <- NULL
neufTroisSHP$N_QU <- NULL
neufTroisSHP$N_SQ_IR <- NULL
neufTroisSHP$C_IR <- NULL
neufTroisSHP$C_TYPEIR <- NULL
neufTroisSHP$L_IR <- NULL
# sort geometries (IRIS) by size
neuftroisTri <- neufTroisSHP %>% arrange(desc(SHAPE_Area))
# number of bands of frequencies
nb_bandes <- 31
#each geometry recieve a band number
neuftroisTri$bande <- (neuftroisTri$SHAPE_Area %>% ntile(nb_bandes) )
#seems to be traditional values of EQ freq : double each band
limites_de_bandesEQ<-c(0, 20,25,31.5,40,50,63,80,100,125,160,200,250,315,400,500,630,800,1000,1250,1600,2000,2500,3150,4000,5000,6300,8000,10000,12500,16000)
#
pEQ <- limites_de_bandesEQ / max(fftIntro$f)
qq <- quantile(fftIntro$f, probs = pEQ)
#split the frequencies of the FFT by the desired number of frequency bands
bandeslabels <- (nb_bandes-1):1
fftIntro$bande <- cut(fftIntro$f, qq, include.lowest=TRUE, labels = bandeslabels )
library(reshape2)
longfft <- melt(fftIntro$S, varnames = c("f_index","t_index") , value.name = "intensite")
longfft$bande <-(cut(longfft$f, nb_bandes, include.lowest=TRUE, labels=nb_bandes:1 ))
#modify type of factor cf ?factor pour le caster en numeric
longfft$bande <- as.numeric(as.character(longfft$bande))
#normalize by 4 parts quart so the bass isnt the only high values
longfft$quart <- cut(longfft$intensite,4, labels=FALSE)
# 4 maximum intensities
maximumIntensityByQuarts <- longfft %>% group_by(quart) %>% summarise(quartMax=max(intensite))
#each intensity gets its quart maximal value
longfft <- inner_join(longfft, maximumIntensityByQuarts, by= "quart")
#normalize each intensity by it quart maximum value
longfft <- longfft %>% mutate(intensite=intensite/quartMax )
#compute intensity mean value in each freq band, to affect to each geometry
mean_ByBand_By_t <- longfft %>% group_by(bande, t_index) %>% summarise(valAgg = mean(intensite))
cube93 <- left_join(neuftroisTri, mean_ByBand_By_t)
sampleID <- 1400
display_by_Sample(sampleID)
display_by_Sample <- function(sampleID){
par(mar=c(0,0,0,0))
tutu <-cube93[cube93$t_index==sampleID,"valAgg"]
plot(tutu["valAgg"],border=NA, bgc="black", pal=viridis, key.pos=NULL, main = NULL )
}
library(tiff)
nb_frames <- sampleLength * frames_per_s
# display_by_Sample(400, neuftroisTri,fftIntro)
# display_by_Sample(600, neuftroisTri,fftIntro)
# display_by_Sample(800, neuftroisTri,fftIntro)
imagefifi <- readPNG("art_withR/boucingMap/neufTroisOSM.png")
#remove borders
imagefifi <- imagefifi[21:(nrow(imagefifi)-20),21:(ncol(imagefifi)-20),]
neufTroisOSMraster <- imagefifi
for (sampleID in 2 : nb_frames){
cat(sampleID,"\n")
if (sampleID %% 5 == 0 ){
gc()
}
fifi <- tempfile()
png(fifi, width = 800, height = 800)
display_by_Sample(sampleID)
dev.off()
#layer elevation
localtif <- raster::raster(fifi)
# #layer couleur
# fifi <- tempfile()
# png(fifi)
# display_by_Sample_Othecolor(sampleID)
# dev.off()
# imagefifi <- readPNG(fifi)
# dim(imagefifi)
# imagefifi <- readPNG("art_withR/boucingMap/neufTroisOSM.png")
# #imagefifiOSM <- readTIFF("art_withR/boucingMap/fondCarteOSM/fondCarteOSM.tif")
#
#
# #remove borders
# imagefifi <- imagefifi[21:(nrow(imagefifi)-20),21:(ncol(imagefifi)-20),]
#
# #add alpha value
# imagefifi.alpah <- array(NA, dim=c(nrow(imagefifi), ncol(imagefifi), 4))
# imagefifi.alpah[,,-4] <- imagefifi
# #imagefifi.alpah[,,4] <- 0.9
# imagefifi[,,4] <- 0.9
# grid::grid.raster(imagefifi)
#
#create elevation matrix from the localtif created from fifi
elmat <- matrix(raster::extract(localtif,raster::extent(localtif),buffer=1000),
nrow=ncol(localtif),ncol=nrow(localtif))
#remove borders from the elevation matrix because they are white so they are interpreted as high elevation values
elmat <- elmat[21:(nrow(elmat)-20),21:(ncol(elmat)-20)]
elmat %>%
sphere_shade(progbar = FALSE,texture = "bw") %>%
add_overlay(neufTroisOSMraster) %>%
add_shadow(ray_shade(elmat,zscale=3000,maxsearch = 300,progbar = FALSE),0.7) %>%
plot_3d(elmat, zscale = 1, fov=70, theta=70+sampleID*0.5, phi=35, windowsize=c(100,100), zoom=0.65)
# Render Viewport to File
rgl.snapshot(paste0("~/art_withR/boucingMap/frames/frame",sprintf("%05d",sampleID),".png"), fmt = "png", top = F)
rgl.clear()
}
rgl.close()
#creer la vidรฉo r le rate
#ffmpeg -r 78 -i frame%05d.png -c:v libx264 out.mp4
#ajouter le son sur le flux video
#-r rate
# -i input files and sound
#ffmpeg -r 60 -i out.mp4 -i intro.wav -c:v copy -c:a aac out_et_son.mp4
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot-internals.R
\name{.gg_resize_legend}
\alias{.gg_resize_legend}
\title{Make the legend smaller}
\usage{
.gg_resize_legend(pointSize = 0.75, textSize = 6, spaceLegend = 0.75)
}
\arguments{
\item{pointSize}{- the ggplot size of lines or points}
\item{textSize}{- the size in pts of the text}
\item{spaceLegend}{- degree of spacing between items in the scale (defines overall size)}
}
\value{
a theme
}
\description{
Make the legend smaller
}
|
/man/dot-gg_resize_legend.Rd
|
permissive
|
terminological/ggrrr
|
R
| false
| true
| 526
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot-internals.R
\name{.gg_resize_legend}
\alias{.gg_resize_legend}
\title{Make the legend smaller}
\usage{
.gg_resize_legend(pointSize = 0.75, textSize = 6, spaceLegend = 0.75)
}
\arguments{
\item{pointSize}{- the ggplot size of lines or points}
\item{textSize}{- the size in pts of the text}
\item{spaceLegend}{- degree of spacing between items in the scale (defines overall size)}
}
\value{
a theme
}
\description{
Make the legend smaller
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{getXCIstate}
\alias{getXCIstate}
\title{Classify X-genes}
\usage{
getXCIstate(xciObj)
}
\arguments{
\item{xciObj}{A \code{data.table}. The table returned by \code{betaBinomXI}}
}
\value{
A \code{data.table} with genes and their XCI-state.
}
\description{
Classify X-linked genes between Escape (E), Variable Escape (VE) and Silenced (S)
}
|
/man/getXCIstate.Rd
|
no_license
|
HaiminLi0/XCIR
|
R
| false
| true
| 431
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{getXCIstate}
\alias{getXCIstate}
\title{Classify X-genes}
\usage{
getXCIstate(xciObj)
}
\arguments{
\item{xciObj}{A \code{data.table}. The table returned by \code{betaBinomXI}}
}
\value{
A \code{data.table} with genes and their XCI-state.
}
\description{
Classify X-linked genes between Escape (E), Variable Escape (VE) and Silenced (S)
}
|
## Caching the Inverse of a Matrix:
## The function makeCacheMatrix() create a matrix and cache(store)its inverse
## The function cacheSolve() checks whether inverse already exists
## if not calculate it's inverse
## This function Function makeCacheMatrix gets a matrix as an input,set the value of the matrix,
## get the value of the matrix, set the inverse Matrix and get the inverse Matrix. The matrix object
## can cache its own object.
## <<- operator is used to assign a value to an object in an environment that is different
## from the current environment
makeCacheMatrix <- function(x = matrix()) {
inV <- NULL
set <- function(y) {
x <<- y
inV <<- NULL
}
get <- function() x
setinverse <- function(matInv) inV <<- matInv
getinverse <- function() inV
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The function cacheSolve takes the output of makeCacheMatrix(matrix) as an
## input , checks whether the inverse of matrix already stored
## if yes it displays a message "getting cached inverse of given matrix"
## and the cached object
## if not gets the original matrix data and calculate it's inverse using solve()
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inV <- x$getinverse()
if(!is.null(inV)) {
message("getting cached inverse of given matrix")
return(inV)
}
data <- x$get()
inV <- solve(data, ...)
x$setinverse(inV)
inV
}
|
/cachematrix.R
|
no_license
|
Nidhichaubey/ProgrammingAssignment2
|
R
| false
| false
| 1,627
|
r
|
## Caching the Inverse of a Matrix:
## The function makeCacheMatrix() create a matrix and cache(store)its inverse
## The function cacheSolve() checks whether inverse already exists
## if not calculate it's inverse
## This function Function makeCacheMatrix gets a matrix as an input,set the value of the matrix,
## get the value of the matrix, set the inverse Matrix and get the inverse Matrix. The matrix object
## can cache its own object.
## <<- operator is used to assign a value to an object in an environment that is different
## from the current environment
makeCacheMatrix <- function(x = matrix()) {
inV <- NULL
set <- function(y) {
x <<- y
inV <<- NULL
}
get <- function() x
setinverse <- function(matInv) inV <<- matInv
getinverse <- function() inV
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The function cacheSolve takes the output of makeCacheMatrix(matrix) as an
## input , checks whether the inverse of matrix already stored
## if yes it displays a message "getting cached inverse of given matrix"
## and the cached object
## if not gets the original matrix data and calculate it's inverse using solve()
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inV <- x$getinverse()
if(!is.null(inV)) {
message("getting cached inverse of given matrix")
return(inV)
}
data <- x$get()
inV <- solve(data, ...)
x$setinverse(inV)
inV
}
|
# Library
suppressPackageStartupMessages(library(caret))
suppressPackageStartupMessages(library(tidyverse))
# Import train set
dat <- readRDS("data/pred_data.RDS")
train <- dat %>% filter(train==1)
test <- dat %>% filter(train==0)
# Create eval data.frame
eval.train <- data.frame("threshold" = 1:100, "nn"=NA, "bt"=NA, "max"=NA)
eval.test <- data.frame("threshold" = 1:100, "nn"=NA, "bt"=NA, "max"=NA)
for(i in 1:100){
# Training set
eval.train$nn[i] <- sum(train$fraud[train$bin_nn >= i])/sum(train$fraud)
eval.train$bt[i] <- sum(train$fraud[train$bin_bt >= i])/sum(train$fraud)
eval.train$max[i] <- sum(train$fraud[train$bin_max >= i])/sum(train$fraud)
# Test set
eval.test$nn[i] <- sum(test$fraud[test$bin_nn >= i])/sum(test$fraud)
eval.test$bt[i] <- sum(test$fraud[test$bin_bt >= i])/sum(test$fraud)
eval.test$max[i] <- sum(test$fraud[test$bin_max >= i])/sum(test$fraud)
}
eval.train <- eval.train %>% arrange(desc(threshold))
eval.test <- eval.test %>% arrange(desc(threshold))
# Save tables
write.csv(eval.train, "data/evaltab_train.csv")
write.csv(eval.test, "data/evaltab_test.csv")
# Plot tables
eval.train %>%
gather("metric","score",2:4) %>%
ggplot(aes(x=threshold,y=score,color=metric))+geom_line(size=1)+
xlab("Bin Threshold")+ylab("% of Total Fraud Caught")+ggtitle("FDR Curve: Training Set")+
scale_color_discrete(label = c("Boosted Tree","Max", "Neural Network"))+
theme(legend.title = element_blank())+
scale_y_continuous(breaks = seq(0,1,.1))+
geom_abline(slope=-0.01, intercept=1, linetype="dashed")
eval.test %>%
gather("metric","score",2:4) %>%
ggplot(aes(x=threshold,y=score,color=metric))+geom_line(size=1)+
xlab("Bin Threshold")+ylab("% of Total Fraud Caught")+ggtitle("FDR Curve: Test Set")+
scale_color_discrete(label = c("Boosted Tree","Max", "Neural Network"))+
theme(legend.title = element_blank())+
scale_y_continuous(breaks = seq(0,1,.1))+
geom_abline(slope=-0.01, intercept=1, linetype="dashed")
|
/scripts/train_test_eval.R
|
no_license
|
chrisarm/fraud_pro_2
|
R
| false
| false
| 1,978
|
r
|
# Library
suppressPackageStartupMessages(library(caret))
suppressPackageStartupMessages(library(tidyverse))
# Import train set
dat <- readRDS("data/pred_data.RDS")
train <- dat %>% filter(train==1)
test <- dat %>% filter(train==0)
# Create eval data.frame
eval.train <- data.frame("threshold" = 1:100, "nn"=NA, "bt"=NA, "max"=NA)
eval.test <- data.frame("threshold" = 1:100, "nn"=NA, "bt"=NA, "max"=NA)
for(i in 1:100){
# Training set
eval.train$nn[i] <- sum(train$fraud[train$bin_nn >= i])/sum(train$fraud)
eval.train$bt[i] <- sum(train$fraud[train$bin_bt >= i])/sum(train$fraud)
eval.train$max[i] <- sum(train$fraud[train$bin_max >= i])/sum(train$fraud)
# Test set
eval.test$nn[i] <- sum(test$fraud[test$bin_nn >= i])/sum(test$fraud)
eval.test$bt[i] <- sum(test$fraud[test$bin_bt >= i])/sum(test$fraud)
eval.test$max[i] <- sum(test$fraud[test$bin_max >= i])/sum(test$fraud)
}
eval.train <- eval.train %>% arrange(desc(threshold))
eval.test <- eval.test %>% arrange(desc(threshold))
# Save tables
write.csv(eval.train, "data/evaltab_train.csv")
write.csv(eval.test, "data/evaltab_test.csv")
# Plot tables
eval.train %>%
gather("metric","score",2:4) %>%
ggplot(aes(x=threshold,y=score,color=metric))+geom_line(size=1)+
xlab("Bin Threshold")+ylab("% of Total Fraud Caught")+ggtitle("FDR Curve: Training Set")+
scale_color_discrete(label = c("Boosted Tree","Max", "Neural Network"))+
theme(legend.title = element_blank())+
scale_y_continuous(breaks = seq(0,1,.1))+
geom_abline(slope=-0.01, intercept=1, linetype="dashed")
eval.test %>%
gather("metric","score",2:4) %>%
ggplot(aes(x=threshold,y=score,color=metric))+geom_line(size=1)+
xlab("Bin Threshold")+ylab("% of Total Fraud Caught")+ggtitle("FDR Curve: Test Set")+
scale_color_discrete(label = c("Boosted Tree","Max", "Neural Network"))+
theme(legend.title = element_blank())+
scale_y_continuous(breaks = seq(0,1,.1))+
geom_abline(slope=-0.01, intercept=1, linetype="dashed")
|
\name{GIMLET-package}
\alias{GIMLET-package}
\alias{GIMLET}
\docType{package}
\title{
A short title line describing what the package does
}
\description{
A more detailed description of what the package does. A length
of about one to five lines is recommended.
}
\details{
This section should provide a more detailed overview of how to use the
package, including the most important functions.
}
\author{
Your Name, email optional.
Maintainer: Your Name <your@email.com>
}
\references{
This optional section can contain literature or other references for
background information.
}
\keyword{ package }
\seealso{
Optional links to other man pages
}
\examples{
\dontrun{
## Optional simple examples of the most important functions
## These can be in \dontrun{} and \donttest{} blocks.
}
}
|
/man/GIMLET-package.Rd
|
no_license
|
tshimam/GIMLET
|
R
| false
| false
| 818
|
rd
|
\name{GIMLET-package}
\alias{GIMLET-package}
\alias{GIMLET}
\docType{package}
\title{
A short title line describing what the package does
}
\description{
A more detailed description of what the package does. A length
of about one to five lines is recommended.
}
\details{
This section should provide a more detailed overview of how to use the
package, including the most important functions.
}
\author{
Your Name, email optional.
Maintainer: Your Name <your@email.com>
}
\references{
This optional section can contain literature or other references for
background information.
}
\keyword{ package }
\seealso{
Optional links to other man pages
}
\examples{
\dontrun{
## Optional simple examples of the most important functions
## These can be in \dontrun{} and \donttest{} blocks.
}
}
|
# Read in the electric power consumption dataset
rawdata <- read.table("household_power_consumption.txt", sep=";", header=TRUE)
# Keep only observations from the dates 1 Feb and 2 Feb 2007
rawdata$Date <- as.Date(rawdata$Date, "%d/%m/%Y")
powerdata <- subset(rawdata, as.Date(Date)=='2007-02-01' | as.Date(Date)=='2007-02-02')
# Convert the factor variables into numeric variables
powerdata$Global_active_power <- as.numeric(as.character(powerdata$Global_active_power))
powerdata$Sub_metering_1 <- as.numeric(as.character(powerdata$Sub_metering_1))
powerdata$Sub_metering_2 <- as.numeric(as.character(powerdata$Sub_metering_2))
powerdata$Sub_metering_3 <- as.numeric(as.character(powerdata$Sub_metering_3))
powerdata$Voltage <- as.numeric(as.character(powerdata$Voltage))
powerdata$Global_reactive_power <- as.numeric(as.character(powerdata$Global_reactive_power))
# Concatenating date and time to convert it to a time format
powerdata$concat_datetime <- paste(powerdata$Date,powerdata$Time,sep=' ')
# to time format but date is also included.
powerdata$datetime <- strptime(powerdata$concat_datetime,"%Y-%m-%d %H:%M:%S")
#####
#Launch a png file device
png(filename = "plot4.png",
width = 480, height = 480,
bg = "transparent")
par(mfrow = c(2, 2))
with(powerdata, {
## Plot 1
plot(powerdata$datetime, powerdata$Global_active_power,
type = "l",
ylab = "Global Active Power",
xlab = "")
## Plot 2
# x axis to labelled "datetime"
plot(powerdata$datetime, powerdata$Voltage,
type = "l",
ylab = "Voltage",
xlab = "datetime")
## Plot 3
plot(powerdata$datetime, powerdata$Sub_metering_1,
type = "l",
ylab = "Energy sub metering",
xlab = "",
ylim=c(0,38))
#Add the chart for Sub_metering_2
lines(powerdata$datetime, powerdata$Sub_metering_2, col= "red")
#Add the chart for Sub_metering_3
lines(powerdata$datetime, powerdata$Sub_metering_3, col= "blue")
#Add the legend (remove the box around the legend (bty="n"))
legend("topright", lwd=1, col =c("black","red","blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), bty="n")
## Plot 4
# label for y axis to be the actual variable name "Global_reactive_power"
# x axis to labelled "datetime"
plot(powerdata$datetime, powerdata$Global_reactive_power,
type = "l",
ylab = "Global_reactive_power",
xlab = "datetime")
})
# Close the png file device
dev.off()
|
/04 - Exploratory Data Analysis/Assignment 1/plot4.R
|
no_license
|
twgoh/Practical-Machine-Learning
|
R
| false
| false
| 2,557
|
r
|
# Read in the electric power consumption dataset
rawdata <- read.table("household_power_consumption.txt", sep=";", header=TRUE)
# Keep only observations from the dates 1 Feb and 2 Feb 2007
rawdata$Date <- as.Date(rawdata$Date, "%d/%m/%Y")
powerdata <- subset(rawdata, as.Date(Date)=='2007-02-01' | as.Date(Date)=='2007-02-02')
# Convert the factor variables into numeric variables
powerdata$Global_active_power <- as.numeric(as.character(powerdata$Global_active_power))
powerdata$Sub_metering_1 <- as.numeric(as.character(powerdata$Sub_metering_1))
powerdata$Sub_metering_2 <- as.numeric(as.character(powerdata$Sub_metering_2))
powerdata$Sub_metering_3 <- as.numeric(as.character(powerdata$Sub_metering_3))
powerdata$Voltage <- as.numeric(as.character(powerdata$Voltage))
powerdata$Global_reactive_power <- as.numeric(as.character(powerdata$Global_reactive_power))
# Concatenating date and time to convert it to a time format
powerdata$concat_datetime <- paste(powerdata$Date,powerdata$Time,sep=' ')
# to time format but date is also included.
powerdata$datetime <- strptime(powerdata$concat_datetime,"%Y-%m-%d %H:%M:%S")
#####
#Launch a png file device
png(filename = "plot4.png",
width = 480, height = 480,
bg = "transparent")
par(mfrow = c(2, 2))
with(powerdata, {
## Plot 1
plot(powerdata$datetime, powerdata$Global_active_power,
type = "l",
ylab = "Global Active Power",
xlab = "")
## Plot 2
# x axis to labelled "datetime"
plot(powerdata$datetime, powerdata$Voltage,
type = "l",
ylab = "Voltage",
xlab = "datetime")
## Plot 3
plot(powerdata$datetime, powerdata$Sub_metering_1,
type = "l",
ylab = "Energy sub metering",
xlab = "",
ylim=c(0,38))
#Add the chart for Sub_metering_2
lines(powerdata$datetime, powerdata$Sub_metering_2, col= "red")
#Add the chart for Sub_metering_3
lines(powerdata$datetime, powerdata$Sub_metering_3, col= "blue")
#Add the legend (remove the box around the legend (bty="n"))
legend("topright", lwd=1, col =c("black","red","blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), bty="n")
## Plot 4
# label for y axis to be the actual variable name "Global_reactive_power"
# x axis to labelled "datetime"
plot(powerdata$datetime, powerdata$Global_reactive_power,
type = "l",
ylab = "Global_reactive_power",
xlab = "datetime")
})
# Close the png file device
dev.off()
|
## the following must match the'unique' method is ULI as explained there
calcUniqueGeo <- function(data) {
redondGeo <- apply(data,1,paste,collapse=" ") ## creates character string
dfforunique <- cbind(data,redondGeo) ## associates rownames of data to redondGeo
uniqueGeo <- unique(dfforunique[,ncol(dfforunique),drop=FALSE]) ## keeps rownames of first instances
uniqueGeo <- data[rownames(uniqueGeo),,drop=FALSE] ## uses rownames, 'unique' numeric values based on character representations
return(uniqueGeo)
}
`extract.check.coords` <- function(spatial.model,datanames) {
if ( ! is.null(spatial.model)) {
bars <- spatial.model[[2]]
coordinates <- DEPARSE(bars[[3]]) ## "x + y"
coordinates <- strsplit(coordinates," ")[[1]]
coordinates <- setdiff(coordinates,c("+","%in%",":","/","")) ## "" for hidden linebreaks (?)
} else {
stop("Call to 'HLCor' without a spatial term in the formula is suspect.")
## very old code handling old syntax with (1|pos) and default values of the coordinates argument
coordinates <- c("x","y") ## back compat
}
coordcols <- which(datanames %in% coordinates)
if ( length(coordcols) != length(coordinates) ) {
stop("variables 'coordinates' not all found in the 'data'")
}
return(coordinates) ## should be ordered as bars[[3]] (important for predict)
}
## better for development to avoid name conflicts with OKsmooth :toCanonical and :canonize
canonizeRanPars <- function(ranPars, ## should have a RHOMAX attribute when trRho in input
corr.model,checkComplete=TRUE) {
trueCorrpars <- list()
if (corr.model %in% c("Matern")) {
if (!is.null(ranPars$trNu)) { ## either we have nu,rho or trNu,trRho
ranPars$nu <- nuInv(ranPars$trNu,ranPars$trRho,NUMAX=attr(ranPars,"NUMAX")) ## before trRho is removed...
ranPars$trNu <- NULL
attr(ranPars,"type")$nu <- attr(ranPars,"type")$trNu
attr(ranPars,"type")$trNu <- NULL
}
nu <- ranPars$nu
if (is.null(nu) && checkComplete) {
mess <- pastefrom("nu missing from ranPars (or correlation model mis-identified).",prefix="(!) From ")
stop(mess)
}
trueCorrpars$nu <- nu
}
if (corr.model=="AR1") {
ARphi <- ranPars$ARphi
if (is.null(ARphi) && checkComplete) {
mess <- pastefrom("ARphi missing from ranPars.",prefix="(!) From ")
stop(mess)
}
trueCorrpars$ARphi <- ARphi
} else if (corr.model != "corrMatrix") { ## all models with a 'rho' parameter
if (!is.null(ranPars$trRho)) {
ranPars$rho <- rhoInv(ranPars$trRho,RHOMAX=attr(ranPars,"RHOMAX"))
ranPars$trRho <- NULL
attr(ranPars,"type")$rho <- attr(ranPars,"type")$trRho
attr(ranPars,"type")$trRho <- NULL
} ## else there may simply be rho rather than trRho (including for adjacency model through optim procedure !)
trueCorrpars$rho <- rho <- ranPars$rho
if (is.null(rho)) {
if(corr.model=="adjacency") { ## then allow a direct call through HLCor
ranPars$rho <- 0
attr(ranPars,"type")$rho <- "var"
} else if (checkComplete) {
mess <- pastefrom("rho missing from ranPars.",prefix="(!) From ")
stop(mess)
}
}
}
Nugget <- ranPars$Nugget
if (! is.null(Nugget)) trueCorrpars$Nugget <- Nugget
if (!is.null(ranPars$trPhi)) {
ranPars$phi <- dispInv(ranPars$trPhi)
ranPars$trPhi <- NULL
attr(ranPars,"type")$phi <- attr(ranPars,"type")$trPhi
attr(ranPars,"type")$trPhi <- NULL
} else if (!is.null(ranPars$logphi)) { ## debug code
## HL.info$ranFix$phi <- exp(ranPars$logphi)
stop("logphi in HLCor...")
} ##################### else HL.info$ranFix$phi <- ranPars$phi ## y st deja !?
if (!is.null(ranPars$trLambda)) {##
ranPars$lambda <- dispInv(ranPars$trLambda)
ranPars$trLambda <- NULL
attr(ranPars,"type")$lambda <- attr(ranPars,"type")$trLambda
attr(ranPars,"type")$trLambda <- NULL
} else if (!is.null(ranPars$loglambda)) { ## debug code
stop("loglambda in HLCor...")
} ##################### else HL.info$ranFix$lambda <- ranPars$lambda
return(list(trueCorrpars=trueCorrpars,ranPars=ranPars))
}
HLCor <- function(formula,
data,family=gaussian(),
ranPars=NULL, ## all dispersion and correlation params ideally provided through ranPars
distMatrix,uniqueGeo=NULL,adjMatrix,corrMatrix,
verbose=c(warn=TRUE,trace=FALSE,summary=FALSE),control.dist=list(),
...) {
oricall <- mc <- match.call(expand.dots = TRUE)
if ( ! is.null(mc$ranFix)) { ## avoiding user's confusion
stop("!From HLCor: ranFix found in '...'. Make sure to use ranPars only")
}
if (!is.null(mc$LamFix)) {
stop("argument LamFix of HLCor is obsolete")
}
if (!is.null(mc$PhiFix)) {
stop("argument PhiFix of HLCor is obsolete")
}
# frst steps as in HLFit: (no need to test missing(data) in several functions)
if (is.null(processed <- mc$processed)) { ## no 'processed'
## FR->FR suggests we should add processed as argument of HLCor...
family <- checkRespFam(family)
if ( identical(family$family,"multi")) {
if ( ! inherits(data,"list")) {
if(family$binfamily$family=="binomial") {
familyargs <- family
familyargs$family <- NULL
familyargs$binfamily <- NULL
data <- do.call(binomialize,c(list(data=data),familyargs)) ## if data not already binomialized
}
}
}
if ( inherits(data,"list")) {
## RUN THIS LOOP and return
fitlist <- lapply(seq_len(length(data)),function(it){
locmc <- mc
if (identical(family$family,"multi")) locmc$family <- family$binfamily
locmc$data <- data[[it]]
locmc$distMatrix <- mc$distMatrix[[it]]
locmc$uniqueGeo <- mc$uniqueGeo[[it]]
eval(locmc)
}) ## a pure list of HLCor objects
liks <- sapply(fitlist,function(v) {unlist(v$APHLs)})
liks <- apply(liks,1,sum)
attr(fitlist,"APHLs") <- as.list(liks)
class(fitlist) <- c("HLfitlist",class(fitlist))
return(fitlist) ## list of HLfit object + one attribute
} else {## there is a single data set, still without processed
FHF <- formals(HLfit) ## makes sure about default values
names_FHF <- names(FHF)
if ( ! is.null(mc$resid.formula)) mc$resid.model <- mc$resid.formula
names_nondefault <- intersect(names(mc),names_FHF) ## mc including dotlist
FHF[names_nondefault] <- mc[names_nondefault] ## full HLfit args
preprocess.formal.args <- FHF[which(names_FHF %in% names(formals(preprocess)))]
preprocess.formal.args$family <- family ## already checked
preprocess.formal.args$rand.families <- FHF$rand.family ## because preprocess expects $rand.families
preprocess.formal.args$predictor <- FHF$formula ## because preprocess stll expects $predictor
preprocess.formal.args$ranFix <- ranPars ## because preprocess expects ranFix
mc$processed <- do.call(preprocess,preprocess.formal.args,envir=environment(formula))
# HLCor_body() called below
}
} else { ## 'processed' is available
multiple <- attr(processed,"multiple")
if ( ( ! is.null(multiple)) && multiple) { ## "multiple" processed list
## RUN THIS LOOP and return
fitlist <- lapply(seq_len(length(processed)),function(it){
locmc <- mc
locmc$processed <- processed[[it]] ## The data are in processed !
locmc$distMatrix <- distMatrix[[it]] ## but the matrices are not HLfit args hence not in processed !
locmc$uniqueGeo <- uniqueGeo[[it]]
eval(locmc)
}) ## a pure list of HLCor objects
liks <- sapply(fitlist,function(v) {unlist(v$APHLs)})
liks <- apply(liks,1,sum)
attr(fitlist,"APHLs") <- as.list(liks)
class(fitlist) <- c("HLfitlist",class(fitlist))
return(fitlist) ## list of HLfit object + one attribute
} else { ## there is one processed for a single data set
# HLCor_body() called below
}
}
################# single processed, single data analysis:
mc$verbose <- reformat_verbose(mc$verbose,For="HLCor")
mc$data <- NULL
mc$family <- NULL
mc$formula <- NULL
mc$prior.weights <- NULL
mc$HLmethod <- NULL ## processed$HL
mc$rand.family <- NULL ## processed$rand.families
mc$resid.formula <- NULL ## mc$resid.model
mc$REMLformula <- NULL
mc[[1L]] <- quote(spaMM::HLCor_body)
hlcor <- eval(mc,parent.frame())
attr(hlcor,"HLCorcall") <- oricall ## potentially used by getCall(object) in update.HL
if (mc$verbose["HLCorSummary"]) { ## useful in final call from corrHLfit
summary(hlcor) ## input corr pars have been printed at the beginning...
}
return(hlcor)
}
HLCor_body <- function(processed,
ranPars=NULL, ## all dispersion and correlation params ideally provided through ranPars
distMatrix,uniqueGeo=NULL,adjMatrix,corrMatrix,
verbose=c(warn=TRUE,trace=FALSE,summary=FALSE),control.dist=list(),
...) {
dotlist <- list(...)
#################
data <- processed$data
predictor <- processed$predictor
spatial.terms <- findSpatial(predictor)
spatial.model <- spatial.terms[[1L]]
if ( ! is.null(spatial.model)) {
corr.model <- as.character(spatial.model[[1L]])
} else {
if ( ! missing(corrMatrix)) {
mess <- pastefrom("corrMatrix argument despite no corrMatrix term in formula:",prefix="(!) From ")
message(mess)
stop("This syntax is obsolete; add a corrMatrix(...) term in the formula.")
} ## ELSE more generic message:
stop("Call to 'HLCor' without a spatial term in the formula is suspect.")
}
## convert back ranPars to canonical scale:
rpblob <- canonizeRanPars(ranPars=ranPars,corr.model=corr.model)
ranPars <- rpblob$ranPars
trueCorrpars <- rpblob$trueCorrpars
rho <- ranPars$rho
#
coordinates <- NULL
test.in <- FALSE
### ensure LMatrix in predictor:
## if it is currently absent, first provide corr matrix or its symSVD, from which Lunique will be computed using designL.from.Corr
if (is.null(Lunique <- attr(predictor,"LMatrix"))) {
symSVD <- NULL
if (corr.model %in% c("adjacency","ar1")) { ## "ar1" != "AR1" is a tempo name for a futur generic model
if ( missing(adjMatrix) ) stop("missing 'adjMatrix' for adjacency model")
## no nugget in the adjacency model... ## (use additional ranef instead)
symSVD <- attr(adjMatrix,"symSVD")
if (is.null(symSVD) && identical(attr(ranPars,"type")$rho,"var")) { ## can occur in direct call of HLCor ## identical() handles NULL args
if (isSymmetric(adjMatrix)) {
symSVD <- selfAdjointWrapper(adjMatrix)
attr(adjMatrix,"symSVD") <- symSVD
}
}
if (is.null(symSVD)) {
corrm <- solve(diag(nrow(adjMatrix))-rho*(adjMatrix))
} else {
symSVD$adjd <- symSVD$d
symSVD$d <- 1/(1-rho*symSVD$d) ## from adjMatrix to correlation matrix
}
} else if (corr.model %in% c("SAR_WWt")) { ## "ar1" != "AR1" is a tempo name for a futur generic model
if ( missing(adjMatrix) ) stop("missing 'adjMatrix' for adjacency model")
UDU. <- attr(adjMatrix,"UDU.")
if (is.null(UDU.)) {
corrm <- solve(diag(nrow(adjMatrix))-rho*(adjMatrix))
} else {
corrm <- UDU.$u %*% sweep(UDU.$u.,MARGIN=1,1/(1-rho*UDU.$d),`*`)
}
corrm <- tcrossprodCpp(corrm)
} else if (corr.model=="AR1") {
coordinates <- extract.check.coords(spatial.model=spatial.model,datanames=names(data))
uniqueGeo <- calcUniqueGeo(data=data[,coordinates,drop=FALSE])
txt <- paste(spatial.model[[2]][[3]]) ## the RHS of the ( . | . )
if (length(grep("%in%",txt))>0) {
stop("HLCor code should be allowed again to handle blockDiag objects")
#scaled.dist <- as.blockDiag.bar(spatial.model[[2]],formula,data=uniqueGeo)
#test.in <- TRUE
} else scaled.dist <- proxy::dist(uniqueGeo)
corrm <- trueCorrpars$ARphi^scaled.dist
} else if (corr.model %in% c("Matern")) {
txt <- paste(spatial.model[[2]][[3]]) ## the RHS of the ( . | . )
if (length(grep("%in%",txt))>0) {
stop("(!) Matern( . | <coord> %in% <grp>) is not yet handled.")
test.in <- TRUE ## should be useful when this case will be handled
}
## in a typical call from corrHLfit the following test should be FALSE because uniqueGeo and maybe distMatrix should have been precomputed
if ((length(rho)>1 || missing(distMatrix)) && is.null(uniqueGeo)) { ## all cases where we need uniqueGeo
coordinates <- extract.check.coords(spatial.model=spatial.model,datanames=names(data))
uniqueGeo <- calcUniqueGeo(data=data[,coordinates,drop=FALSE]) ## keeps the names of first instances of the coordinates in data
}
## then compute scaled distances from unscaled info, for HLfit call
msd.arglist <- list(rho = rho)
msd.arglist$`dist.method` <- control.dist$`dist.method` ## may be NULL
if (length(rho)>1L) {
msd.arglist <- c(msd.arglist,list(uniqueGeo=uniqueGeo))
msd.arglist$`rho.mapping` <- control.dist$`rho.mapping` ## may be NULL
} else {
if ( missing(distMatrix)) {
dist.arglist <- list(x=uniqueGeo)
dist.arglist$method <- control.dist$dist.method ## may be NULL
distMatrix <- do.call(proxy::dist,dist.arglist)
}
msd.arglist <- c(msd.arglist,list(distMatrix=distMatrix))
}
corrm <- do.call("make_scaled_dist",msd.arglist)
## at this point is a single location, corrm should be dist(0) and make_scaled_dist was modified to that effect
if ( nrow(corrm)>1 ) { ## >1 locations
norho <- trueCorrpars; norho$rho <- NULL ## because the MaternCorr input will be an already scaled distance 'corrm'
corrm <- do.call(MaternCorr,args=c(norho,list(corrm)))
}
} else if (corr.model== "corrMatrix") {
if (missing(corrMatrix)) {
mess <- pastefrom("missing(corrMatrix) argument despite corrMatrix term in formula.",prefix="(!) From ")
stop(mess)
} ## ELSE:
corrm <- corrMatrix
Lunique <- attr(corrMatrix,"LMatrix") ## will typically be NULL, but super-users ;-) may have provided it
}
if (verbose["trace"] && length(trueCorrpars)>0) print(unlist(trueCorrpars))
## call designL.from.Corr if Lunique not available
if (is.null(Lunique)) { ## test FR 11/2013 ## modif 2015/04. Noter un calcul de Lunique ci dessus
if ( ! is.null(symSVD)) {
Lunique <- try(designL.from.Corr(symSVD=symSVD))
} else { ## corrm must exist
argsfordesignL <- dotlist[intersect(names(dotlist),names(formals(designL.from.Corr)))]
if (processed$HL[1L]=="SEM") argsfordesignL$try.chol <- FALSE
if (inherits(corrm,"dist")) {
corrm <- as.matrix(corrm)
diag(corrm) <- 1L ## always a correlation matrix
}
Lunique <- try(do.call(designL.from.Corr,c(list(m=corrm),argsfordesignL)))
}
if (inherits(Lunique,"try-error")) {
print("correlation parameters were:") ## makes sense if designL.from.Corr already issued some warning
print(unlist(trueCorrpars))
stop()
}
}
attr(predictor,"%in%") <- test.in
attr(Lunique,"corr.model") <- corr.model
attr(Lunique,"ranefs") <- unlist(lapply(spatial.terms,DEPARSE)) ## essentiel pour la construction de ZAL!
if ( corr.model=="adjacency"
&& ! is.null(attr(ranPars,"type")) ## ie through corrHLfit call
&& "var" %in% attr(ranPars,"type")$rho ## then not a call for fixed rho => estim of rho within HLfit through SEM or augm GLM
) { ## then define ZA.L as ZA. U(adjacency matrix)
Lunique[] <- attr(Lunique,"symsvd")$u ## "[]" keeps attributes
}
attr(predictor,"LMatrix") <- Lunique
}
processed$predictor <- predictor
###
HLFormals <- names(formals(HLfit))
good_dotnames <- intersect(names(dotlist),HLFormals)
if (length(good_dotnames)>0L) {
HL.info <- dotlist[good_dotnames]
} else HL.info <- list()
## all printing in HLfit is suppressed by default
HL.info$verbose <- verbose #[intersect(names(verbose),c("warn","trace","summary","SEM"))]
HL.info$processed <- processed
## convert ranPars to ranFix + init.HLfit
## allows log and not log:
varNames <- names(which(attr(ranPars,"type")=="var"))
HL.info$init.HLfit[varNames] <- ranPars[varNames] ## inherits values from corrHLfit(...,init.HLfit(...))
fixNames <- setdiff(names(ranPars),varNames)
if (!is.null(fixNames)) { ## could be NULL for corrMatrix case
ranFix <- ranPars[fixNames] ## 11/2014 as there is no other source for ranFix
typelist <- list()
typelist[fixNames] <- "fix"
if (!is.null(rPtype <- attr(ranPars,"type"))) { ## it may not exist, or elements may be "fix" or "outer"
typelist[names(rPtype)] <- rPtype
}
attr(ranFix,"type") <- typelist
HL.info$ranFix <- ranFix
}
hlfit <- do.call("HLfit",HL.info) ## with a _list_ of arguments -> do.call ## perhaps should create a list of unevaluated arguments ????
if ( ! is.null(hlfit$error)) {
errfile <- generateFileName("HLfitCall")
errfile <- paste(errfile,".RData",sep="")
save(HL.info,file=errfile)
mess <- pastefrom("'do.call(HLfit,HL.info)' failed:",prefix="(!) From ")
message(mess)
message(hlfit$error)
message("'HL.info' is saved in the ",errfile," file",sep="")
stop("I exit.")
} ## ELSE:
hlfit$control.dist <- control.dist
attr(hlfit,"info.uniqueGeo") <- uniqueGeo ## more spatial info is in the hlfit$predictor's attributes (Lunique = corrmat^1/2, and ZALMatrix)
if (corr.model %in% c("Matern")) attr(hlfit,"msd.arglist") <- msd.arglist ## more organized, easier to reuse.
# particulary for $rho.mapping:
# should be NULL if length rho = 1 or if original control.dist$rho.mapping was NULL
## FR->FR but info.uniqueGeo more general (eg AR1) -> a revoir
hlfit$call <- "$call removed by HLCor. Consider the 'HLCorcall' attribute instead." ## instead of the $call with evaluated arguments
return(hlfit) ##
}
## wrapper for HLCor, suitable input and output for optimization
`HLCor.obj` <- function(ranefParsVec,skeleton,HLCor.obj.value="p_bv",traceFileName=NULL,...) { ## name of first arg MUST differ from names in dotlist...
mc <- match.call(expand.dots=TRUE) ## (1) expand.dots added 11/04/2014 for the multinomial... eval
if (is.null(processed <- mc$processed)) {
stop("Call to HLCor.obj() without a 'processed' argument is invalid")
} else { ## 'processed' is available
multiple <- attr(processed,"multiple")
if ( ( ! is.null(multiple)) && multiple) { ## "multiple" processed list
## RUN THIS LOOP and return
fitlist <- lapply(seq_len(length(processed)),function(it){
locmc <- mc
locmc[[1L]] <- as.name("HLCor.obj") ## replaces "f" !
locmc$ranefParsVec <- ranefParsVec ## replaces "arg" !
locmc$processed <- processed[[it]] ## The data are in processed !
locmc$distMatrix <- mc$distMatrix[[it]] ## but the matrices are not HLfit args hence not in processed !
locmc$uniqueGeo <- mc$uniqueGeo[[it]]
eval(locmc)
}) ## a pure list of HLCor objects
resu <- sum(unlist(fitlist))
if (is.character(traceFileName)) {
verif <- paste("#global:",ranefParsVec,resu)
write(verif,file=traceFileName,append=T) ## the file is unlink'ed in corrHLfit()
}
return(resu)
} else { ## there is one processed for a single data set
family <- processed$family
data <- processed$data
}
}
HLCor.formals <- names(formals(HLCor))
names_formals_HLfit <- names(formals(HLfit))
designL.formals <- names(formals(designL.from.Corr))
makescaled.formals <- names(formals(make_scaled_dist))
HLnames <- (c(HLCor.formals,names_formals_HLfit,designL.formals,makescaled.formals)) ## cf parallel code in corrHLfit
HLCor.call <- mc[c(1,which(names(mc) %in% HLnames))] ## keep the call structure
HLCor.call[[1L]] <- quote(spaMM::HLCor)
forGiven <- relist(ranefParsVec,skeleton) ## given values of the optimized variables
## ... relist keeps the RHOMAX... attributes from the skeleton, but the partial copy into ranPars does not.
HLCor.call$ranPars[names(forGiven)] <- forGiven ## do not wipe out other fixed, non optimized variables
attr(HLCor.call$ranPars,"RHOMAX") <- attr(skeleton,"RHOMAX")
attr(HLCor.call$ranPars,"NUMAX") <- attr(skeleton,"NUMAX")
types <- attr(skeleton,"type")
attr(HLCor.call$ranPars,"type")[names(types)] <- types
if (is.character(traceFileName)) {
if(.spaMM.data$options$TRACE.UNLINK) unlink("HLCor.call*.RData")
zut <- paste(ranefParsVec,collapse="")
save(HLCor.call,file=paste("HLCor.call",zut,".RData",sep="")) ## for replicating the problem
}
hlfit <- eval(HLCor.call)
aphls <- hlfit$APHLs
resu <- aphls[[HLCor.obj.value]]
if (is.character(traceFileName)) {
readable <- unlist(canonizeRanPars(ranPars=forGiven,corr.model=mc$`corr.model`,checkComplete=FALSE)$ranPars)
verif <- c(unlist(aphls),hlfit$lambda,hlfit$phi,readable,ranefParsVec) ## hlfit$phi may be NULL
write(verif,file=traceFileName,ncolumns=length(verif),append=TRUE) ## the file is unlink'ed in corrHLfit()
}
return(resu) #
}
|
/spaMM/R/HLCor.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 21,910
|
r
|
## the following must match the'unique' method is ULI as explained there
calcUniqueGeo <- function(data) {
redondGeo <- apply(data,1,paste,collapse=" ") ## creates character string
dfforunique <- cbind(data,redondGeo) ## associates rownames of data to redondGeo
uniqueGeo <- unique(dfforunique[,ncol(dfforunique),drop=FALSE]) ## keeps rownames of first instances
uniqueGeo <- data[rownames(uniqueGeo),,drop=FALSE] ## uses rownames, 'unique' numeric values based on character representations
return(uniqueGeo)
}
`extract.check.coords` <- function(spatial.model,datanames) {
if ( ! is.null(spatial.model)) {
bars <- spatial.model[[2]]
coordinates <- DEPARSE(bars[[3]]) ## "x + y"
coordinates <- strsplit(coordinates," ")[[1]]
coordinates <- setdiff(coordinates,c("+","%in%",":","/","")) ## "" for hidden linebreaks (?)
} else {
stop("Call to 'HLCor' without a spatial term in the formula is suspect.")
## very old code handling old syntax with (1|pos) and default values of the coordinates argument
coordinates <- c("x","y") ## back compat
}
coordcols <- which(datanames %in% coordinates)
if ( length(coordcols) != length(coordinates) ) {
stop("variables 'coordinates' not all found in the 'data'")
}
return(coordinates) ## should be ordered as bars[[3]] (important for predict)
}
## better for development to avoid name conflicts with OKsmooth :toCanonical and :canonize
canonizeRanPars <- function(ranPars, ## should have a RHOMAX attribute when trRho in input
corr.model,checkComplete=TRUE) {
trueCorrpars <- list()
if (corr.model %in% c("Matern")) {
if (!is.null(ranPars$trNu)) { ## either we have nu,rho or trNu,trRho
ranPars$nu <- nuInv(ranPars$trNu,ranPars$trRho,NUMAX=attr(ranPars,"NUMAX")) ## before trRho is removed...
ranPars$trNu <- NULL
attr(ranPars,"type")$nu <- attr(ranPars,"type")$trNu
attr(ranPars,"type")$trNu <- NULL
}
nu <- ranPars$nu
if (is.null(nu) && checkComplete) {
mess <- pastefrom("nu missing from ranPars (or correlation model mis-identified).",prefix="(!) From ")
stop(mess)
}
trueCorrpars$nu <- nu
}
if (corr.model=="AR1") {
ARphi <- ranPars$ARphi
if (is.null(ARphi) && checkComplete) {
mess <- pastefrom("ARphi missing from ranPars.",prefix="(!) From ")
stop(mess)
}
trueCorrpars$ARphi <- ARphi
} else if (corr.model != "corrMatrix") { ## all models with a 'rho' parameter
if (!is.null(ranPars$trRho)) {
ranPars$rho <- rhoInv(ranPars$trRho,RHOMAX=attr(ranPars,"RHOMAX"))
ranPars$trRho <- NULL
attr(ranPars,"type")$rho <- attr(ranPars,"type")$trRho
attr(ranPars,"type")$trRho <- NULL
} ## else there may simply be rho rather than trRho (including for adjacency model through optim procedure !)
trueCorrpars$rho <- rho <- ranPars$rho
if (is.null(rho)) {
if(corr.model=="adjacency") { ## then allow a direct call through HLCor
ranPars$rho <- 0
attr(ranPars,"type")$rho <- "var"
} else if (checkComplete) {
mess <- pastefrom("rho missing from ranPars.",prefix="(!) From ")
stop(mess)
}
}
}
Nugget <- ranPars$Nugget
if (! is.null(Nugget)) trueCorrpars$Nugget <- Nugget
if (!is.null(ranPars$trPhi)) {
ranPars$phi <- dispInv(ranPars$trPhi)
ranPars$trPhi <- NULL
attr(ranPars,"type")$phi <- attr(ranPars,"type")$trPhi
attr(ranPars,"type")$trPhi <- NULL
} else if (!is.null(ranPars$logphi)) { ## debug code
## HL.info$ranFix$phi <- exp(ranPars$logphi)
stop("logphi in HLCor...")
} ##################### else HL.info$ranFix$phi <- ranPars$phi ## y st deja !?
if (!is.null(ranPars$trLambda)) {##
ranPars$lambda <- dispInv(ranPars$trLambda)
ranPars$trLambda <- NULL
attr(ranPars,"type")$lambda <- attr(ranPars,"type")$trLambda
attr(ranPars,"type")$trLambda <- NULL
} else if (!is.null(ranPars$loglambda)) { ## debug code
stop("loglambda in HLCor...")
} ##################### else HL.info$ranFix$lambda <- ranPars$lambda
return(list(trueCorrpars=trueCorrpars,ranPars=ranPars))
}
HLCor <- function(formula,
data,family=gaussian(),
ranPars=NULL, ## all dispersion and correlation params ideally provided through ranPars
distMatrix,uniqueGeo=NULL,adjMatrix,corrMatrix,
verbose=c(warn=TRUE,trace=FALSE,summary=FALSE),control.dist=list(),
...) {
oricall <- mc <- match.call(expand.dots = TRUE)
if ( ! is.null(mc$ranFix)) { ## avoiding user's confusion
stop("!From HLCor: ranFix found in '...'. Make sure to use ranPars only")
}
if (!is.null(mc$LamFix)) {
stop("argument LamFix of HLCor is obsolete")
}
if (!is.null(mc$PhiFix)) {
stop("argument PhiFix of HLCor is obsolete")
}
# frst steps as in HLFit: (no need to test missing(data) in several functions)
if (is.null(processed <- mc$processed)) { ## no 'processed'
## FR->FR suggests we should add processed as argument of HLCor...
family <- checkRespFam(family)
if ( identical(family$family,"multi")) {
if ( ! inherits(data,"list")) {
if(family$binfamily$family=="binomial") {
familyargs <- family
familyargs$family <- NULL
familyargs$binfamily <- NULL
data <- do.call(binomialize,c(list(data=data),familyargs)) ## if data not already binomialized
}
}
}
if ( inherits(data,"list")) {
## RUN THIS LOOP and return
fitlist <- lapply(seq_len(length(data)),function(it){
locmc <- mc
if (identical(family$family,"multi")) locmc$family <- family$binfamily
locmc$data <- data[[it]]
locmc$distMatrix <- mc$distMatrix[[it]]
locmc$uniqueGeo <- mc$uniqueGeo[[it]]
eval(locmc)
}) ## a pure list of HLCor objects
liks <- sapply(fitlist,function(v) {unlist(v$APHLs)})
liks <- apply(liks,1,sum)
attr(fitlist,"APHLs") <- as.list(liks)
class(fitlist) <- c("HLfitlist",class(fitlist))
return(fitlist) ## list of HLfit object + one attribute
} else {## there is a single data set, still without processed
FHF <- formals(HLfit) ## makes sure about default values
names_FHF <- names(FHF)
if ( ! is.null(mc$resid.formula)) mc$resid.model <- mc$resid.formula
names_nondefault <- intersect(names(mc),names_FHF) ## mc including dotlist
FHF[names_nondefault] <- mc[names_nondefault] ## full HLfit args
preprocess.formal.args <- FHF[which(names_FHF %in% names(formals(preprocess)))]
preprocess.formal.args$family <- family ## already checked
preprocess.formal.args$rand.families <- FHF$rand.family ## because preprocess expects $rand.families
preprocess.formal.args$predictor <- FHF$formula ## because preprocess stll expects $predictor
preprocess.formal.args$ranFix <- ranPars ## because preprocess expects ranFix
mc$processed <- do.call(preprocess,preprocess.formal.args,envir=environment(formula))
# HLCor_body() called below
}
} else { ## 'processed' is available
multiple <- attr(processed,"multiple")
if ( ( ! is.null(multiple)) && multiple) { ## "multiple" processed list
## RUN THIS LOOP and return
fitlist <- lapply(seq_len(length(processed)),function(it){
locmc <- mc
locmc$processed <- processed[[it]] ## The data are in processed !
locmc$distMatrix <- distMatrix[[it]] ## but the matrices are not HLfit args hence not in processed !
locmc$uniqueGeo <- uniqueGeo[[it]]
eval(locmc)
}) ## a pure list of HLCor objects
liks <- sapply(fitlist,function(v) {unlist(v$APHLs)})
liks <- apply(liks,1,sum)
attr(fitlist,"APHLs") <- as.list(liks)
class(fitlist) <- c("HLfitlist",class(fitlist))
return(fitlist) ## list of HLfit object + one attribute
} else { ## there is one processed for a single data set
# HLCor_body() called below
}
}
################# single processed, single data analysis:
mc$verbose <- reformat_verbose(mc$verbose,For="HLCor")
mc$data <- NULL
mc$family <- NULL
mc$formula <- NULL
mc$prior.weights <- NULL
mc$HLmethod <- NULL ## processed$HL
mc$rand.family <- NULL ## processed$rand.families
mc$resid.formula <- NULL ## mc$resid.model
mc$REMLformula <- NULL
mc[[1L]] <- quote(spaMM::HLCor_body)
hlcor <- eval(mc,parent.frame())
attr(hlcor,"HLCorcall") <- oricall ## potentially used by getCall(object) in update.HL
if (mc$verbose["HLCorSummary"]) { ## useful in final call from corrHLfit
summary(hlcor) ## input corr pars have been printed at the beginning...
}
return(hlcor)
}
HLCor_body <- function(processed,
ranPars=NULL, ## all dispersion and correlation params ideally provided through ranPars
distMatrix,uniqueGeo=NULL,adjMatrix,corrMatrix,
verbose=c(warn=TRUE,trace=FALSE,summary=FALSE),control.dist=list(),
...) {
dotlist <- list(...)
#################
data <- processed$data
predictor <- processed$predictor
spatial.terms <- findSpatial(predictor)
spatial.model <- spatial.terms[[1L]]
if ( ! is.null(spatial.model)) {
corr.model <- as.character(spatial.model[[1L]])
} else {
if ( ! missing(corrMatrix)) {
mess <- pastefrom("corrMatrix argument despite no corrMatrix term in formula:",prefix="(!) From ")
message(mess)
stop("This syntax is obsolete; add a corrMatrix(...) term in the formula.")
} ## ELSE more generic message:
stop("Call to 'HLCor' without a spatial term in the formula is suspect.")
}
## convert back ranPars to canonical scale:
rpblob <- canonizeRanPars(ranPars=ranPars,corr.model=corr.model)
ranPars <- rpblob$ranPars
trueCorrpars <- rpblob$trueCorrpars
rho <- ranPars$rho
#
coordinates <- NULL
test.in <- FALSE
### ensure LMatrix in predictor:
## if it is currently absent, first provide corr matrix or its symSVD, from which Lunique will be computed using designL.from.Corr
if (is.null(Lunique <- attr(predictor,"LMatrix"))) {
symSVD <- NULL
if (corr.model %in% c("adjacency","ar1")) { ## "ar1" != "AR1" is a tempo name for a futur generic model
if ( missing(adjMatrix) ) stop("missing 'adjMatrix' for adjacency model")
## no nugget in the adjacency model... ## (use additional ranef instead)
symSVD <- attr(adjMatrix,"symSVD")
if (is.null(symSVD) && identical(attr(ranPars,"type")$rho,"var")) { ## can occur in direct call of HLCor ## identical() handles NULL args
if (isSymmetric(adjMatrix)) {
symSVD <- selfAdjointWrapper(adjMatrix)
attr(adjMatrix,"symSVD") <- symSVD
}
}
if (is.null(symSVD)) {
corrm <- solve(diag(nrow(adjMatrix))-rho*(adjMatrix))
} else {
symSVD$adjd <- symSVD$d
symSVD$d <- 1/(1-rho*symSVD$d) ## from adjMatrix to correlation matrix
}
} else if (corr.model %in% c("SAR_WWt")) { ## "ar1" != "AR1" is a tempo name for a futur generic model
if ( missing(adjMatrix) ) stop("missing 'adjMatrix' for adjacency model")
UDU. <- attr(adjMatrix,"UDU.")
if (is.null(UDU.)) {
corrm <- solve(diag(nrow(adjMatrix))-rho*(adjMatrix))
} else {
corrm <- UDU.$u %*% sweep(UDU.$u.,MARGIN=1,1/(1-rho*UDU.$d),`*`)
}
corrm <- tcrossprodCpp(corrm)
} else if (corr.model=="AR1") {
coordinates <- extract.check.coords(spatial.model=spatial.model,datanames=names(data))
uniqueGeo <- calcUniqueGeo(data=data[,coordinates,drop=FALSE])
txt <- paste(spatial.model[[2]][[3]]) ## the RHS of the ( . | . )
if (length(grep("%in%",txt))>0) {
stop("HLCor code should be allowed again to handle blockDiag objects")
#scaled.dist <- as.blockDiag.bar(spatial.model[[2]],formula,data=uniqueGeo)
#test.in <- TRUE
} else scaled.dist <- proxy::dist(uniqueGeo)
corrm <- trueCorrpars$ARphi^scaled.dist
} else if (corr.model %in% c("Matern")) {
txt <- paste(spatial.model[[2]][[3]]) ## the RHS of the ( . | . )
if (length(grep("%in%",txt))>0) {
stop("(!) Matern( . | <coord> %in% <grp>) is not yet handled.")
test.in <- TRUE ## should be useful when this case will be handled
}
## in a typical call from corrHLfit the following test should be FALSE because uniqueGeo and maybe distMatrix should have been precomputed
if ((length(rho)>1 || missing(distMatrix)) && is.null(uniqueGeo)) { ## all cases where we need uniqueGeo
coordinates <- extract.check.coords(spatial.model=spatial.model,datanames=names(data))
uniqueGeo <- calcUniqueGeo(data=data[,coordinates,drop=FALSE]) ## keeps the names of first instances of the coordinates in data
}
## then compute scaled distances from unscaled info, for HLfit call
msd.arglist <- list(rho = rho)
msd.arglist$`dist.method` <- control.dist$`dist.method` ## may be NULL
if (length(rho)>1L) {
msd.arglist <- c(msd.arglist,list(uniqueGeo=uniqueGeo))
msd.arglist$`rho.mapping` <- control.dist$`rho.mapping` ## may be NULL
} else {
if ( missing(distMatrix)) {
dist.arglist <- list(x=uniqueGeo)
dist.arglist$method <- control.dist$dist.method ## may be NULL
distMatrix <- do.call(proxy::dist,dist.arglist)
}
msd.arglist <- c(msd.arglist,list(distMatrix=distMatrix))
}
corrm <- do.call("make_scaled_dist",msd.arglist)
## at this point is a single location, corrm should be dist(0) and make_scaled_dist was modified to that effect
if ( nrow(corrm)>1 ) { ## >1 locations
norho <- trueCorrpars; norho$rho <- NULL ## because the MaternCorr input will be an already scaled distance 'corrm'
corrm <- do.call(MaternCorr,args=c(norho,list(corrm)))
}
} else if (corr.model== "corrMatrix") {
if (missing(corrMatrix)) {
mess <- pastefrom("missing(corrMatrix) argument despite corrMatrix term in formula.",prefix="(!) From ")
stop(mess)
} ## ELSE:
corrm <- corrMatrix
Lunique <- attr(corrMatrix,"LMatrix") ## will typically be NULL, but super-users ;-) may have provided it
}
if (verbose["trace"] && length(trueCorrpars)>0) print(unlist(trueCorrpars))
## call designL.from.Corr if Lunique not available
if (is.null(Lunique)) { ## test FR 11/2013 ## modif 2015/04. Noter un calcul de Lunique ci dessus
if ( ! is.null(symSVD)) {
Lunique <- try(designL.from.Corr(symSVD=symSVD))
} else { ## corrm must exist
argsfordesignL <- dotlist[intersect(names(dotlist),names(formals(designL.from.Corr)))]
if (processed$HL[1L]=="SEM") argsfordesignL$try.chol <- FALSE
if (inherits(corrm,"dist")) {
corrm <- as.matrix(corrm)
diag(corrm) <- 1L ## always a correlation matrix
}
Lunique <- try(do.call(designL.from.Corr,c(list(m=corrm),argsfordesignL)))
}
if (inherits(Lunique,"try-error")) {
print("correlation parameters were:") ## makes sense if designL.from.Corr already issued some warning
print(unlist(trueCorrpars))
stop()
}
}
attr(predictor,"%in%") <- test.in
attr(Lunique,"corr.model") <- corr.model
attr(Lunique,"ranefs") <- unlist(lapply(spatial.terms,DEPARSE)) ## essentiel pour la construction de ZAL!
if ( corr.model=="adjacency"
&& ! is.null(attr(ranPars,"type")) ## ie through corrHLfit call
&& "var" %in% attr(ranPars,"type")$rho ## then not a call for fixed rho => estim of rho within HLfit through SEM or augm GLM
) { ## then define ZA.L as ZA. U(adjacency matrix)
Lunique[] <- attr(Lunique,"symsvd")$u ## "[]" keeps attributes
}
attr(predictor,"LMatrix") <- Lunique
}
processed$predictor <- predictor
###
HLFormals <- names(formals(HLfit))
good_dotnames <- intersect(names(dotlist),HLFormals)
if (length(good_dotnames)>0L) {
HL.info <- dotlist[good_dotnames]
} else HL.info <- list()
## all printing in HLfit is suppressed by default
HL.info$verbose <- verbose #[intersect(names(verbose),c("warn","trace","summary","SEM"))]
HL.info$processed <- processed
## convert ranPars to ranFix + init.HLfit
## allows log and not log:
varNames <- names(which(attr(ranPars,"type")=="var"))
HL.info$init.HLfit[varNames] <- ranPars[varNames] ## inherits values from corrHLfit(...,init.HLfit(...))
fixNames <- setdiff(names(ranPars),varNames)
if (!is.null(fixNames)) { ## could be NULL for corrMatrix case
ranFix <- ranPars[fixNames] ## 11/2014 as there is no other source for ranFix
typelist <- list()
typelist[fixNames] <- "fix"
if (!is.null(rPtype <- attr(ranPars,"type"))) { ## it may not exist, or elements may be "fix" or "outer"
typelist[names(rPtype)] <- rPtype
}
attr(ranFix,"type") <- typelist
HL.info$ranFix <- ranFix
}
hlfit <- do.call("HLfit",HL.info) ## with a _list_ of arguments -> do.call ## perhaps should create a list of unevaluated arguments ????
if ( ! is.null(hlfit$error)) {
errfile <- generateFileName("HLfitCall")
errfile <- paste(errfile,".RData",sep="")
save(HL.info,file=errfile)
mess <- pastefrom("'do.call(HLfit,HL.info)' failed:",prefix="(!) From ")
message(mess)
message(hlfit$error)
message("'HL.info' is saved in the ",errfile," file",sep="")
stop("I exit.")
} ## ELSE:
hlfit$control.dist <- control.dist
attr(hlfit,"info.uniqueGeo") <- uniqueGeo ## more spatial info is in the hlfit$predictor's attributes (Lunique = corrmat^1/2, and ZALMatrix)
if (corr.model %in% c("Matern")) attr(hlfit,"msd.arglist") <- msd.arglist ## more organized, easier to reuse.
# particulary for $rho.mapping:
# should be NULL if length rho = 1 or if original control.dist$rho.mapping was NULL
## FR->FR but info.uniqueGeo more general (eg AR1) -> a revoir
hlfit$call <- "$call removed by HLCor. Consider the 'HLCorcall' attribute instead." ## instead of the $call with evaluated arguments
return(hlfit) ##
}
## wrapper for HLCor, suitable input and output for optimization
`HLCor.obj` <- function(ranefParsVec,skeleton,HLCor.obj.value="p_bv",traceFileName=NULL,...) { ## name of first arg MUST differ from names in dotlist...
mc <- match.call(expand.dots=TRUE) ## (1) expand.dots added 11/04/2014 for the multinomial... eval
if (is.null(processed <- mc$processed)) {
stop("Call to HLCor.obj() without a 'processed' argument is invalid")
} else { ## 'processed' is available
multiple <- attr(processed,"multiple")
if ( ( ! is.null(multiple)) && multiple) { ## "multiple" processed list
## RUN THIS LOOP and return
fitlist <- lapply(seq_len(length(processed)),function(it){
locmc <- mc
locmc[[1L]] <- as.name("HLCor.obj") ## replaces "f" !
locmc$ranefParsVec <- ranefParsVec ## replaces "arg" !
locmc$processed <- processed[[it]] ## The data are in processed !
locmc$distMatrix <- mc$distMatrix[[it]] ## but the matrices are not HLfit args hence not in processed !
locmc$uniqueGeo <- mc$uniqueGeo[[it]]
eval(locmc)
}) ## a pure list of HLCor objects
resu <- sum(unlist(fitlist))
if (is.character(traceFileName)) {
verif <- paste("#global:",ranefParsVec,resu)
write(verif,file=traceFileName,append=T) ## the file is unlink'ed in corrHLfit()
}
return(resu)
} else { ## there is one processed for a single data set
family <- processed$family
data <- processed$data
}
}
HLCor.formals <- names(formals(HLCor))
names_formals_HLfit <- names(formals(HLfit))
designL.formals <- names(formals(designL.from.Corr))
makescaled.formals <- names(formals(make_scaled_dist))
HLnames <- (c(HLCor.formals,names_formals_HLfit,designL.formals,makescaled.formals)) ## cf parallel code in corrHLfit
HLCor.call <- mc[c(1,which(names(mc) %in% HLnames))] ## keep the call structure
HLCor.call[[1L]] <- quote(spaMM::HLCor)
forGiven <- relist(ranefParsVec,skeleton) ## given values of the optimized variables
## ... relist keeps the RHOMAX... attributes from the skeleton, but the partial copy into ranPars does not.
HLCor.call$ranPars[names(forGiven)] <- forGiven ## do not wipe out other fixed, non optimized variables
attr(HLCor.call$ranPars,"RHOMAX") <- attr(skeleton,"RHOMAX")
attr(HLCor.call$ranPars,"NUMAX") <- attr(skeleton,"NUMAX")
types <- attr(skeleton,"type")
attr(HLCor.call$ranPars,"type")[names(types)] <- types
if (is.character(traceFileName)) {
if(.spaMM.data$options$TRACE.UNLINK) unlink("HLCor.call*.RData")
zut <- paste(ranefParsVec,collapse="")
save(HLCor.call,file=paste("HLCor.call",zut,".RData",sep="")) ## for replicating the problem
}
hlfit <- eval(HLCor.call)
aphls <- hlfit$APHLs
resu <- aphls[[HLCor.obj.value]]
if (is.character(traceFileName)) {
readable <- unlist(canonizeRanPars(ranPars=forGiven,corr.model=mc$`corr.model`,checkComplete=FALSE)$ranPars)
verif <- c(unlist(aphls),hlfit$lambda,hlfit$phi,readable,ranefParsVec) ## hlfit$phi may be NULL
write(verif,file=traceFileName,ncolumns=length(verif),append=TRUE) ## the file is unlink'ed in corrHLfit()
}
return(resu) #
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interaction_graph.R
\name{BuildPriorInteraction}
\alias{BuildPriorInteraction}
\title{Build unweighted summarized interaction graph}
\usage{
BuildPriorInteraction(
object,
assay = "SCT",
slot = "data",
species = "human",
database = "OmniPath",
ligands = NULL,
recepts = NULL,
specific = F,
ranked_genes = NULL,
correct.depth = T,
graph_name = "prior_interaction"
)
}
\arguments{
\item{object}{A Seurat object}
\item{assay}{Assay in Seurat object from which to pull expression values}
\item{slot}{Slot within assay from which to pull expression values}
\item{species}{Name of species for which to pull ligand-receptor interactions. One of "human", "mouse", or "rat"}
\item{database}{Name of ligand-receptor database to use. Default: "OmniPath"
When species is "human", one of: OmniPath, CellChatDB, CellPhoneDB, Ramilowski2015, Baccin2019, LRdb, Kirouac2010, ICELLNET, iTALK, EMBRACE, HPMR, Guide2Pharma, connectomeDB2020, talklr, CellTalkDB
When species is "mouse" or "rat", only "OmniPath" is supported.
To pass a custom ligand-receptor database to this function, set database = "custom"}
\item{ligands}{Character vector of custom ligands to use for interaction graph generation. Ignored unless database = "custom"
When ligands is supplied, recepts must also be supplied and equidimensional.}
\item{recepts}{Character vector of custom receptors to use for interaction graph generation. Ignored unless database = "custom"
When recepts is supplied, ligands must also be supplied and equidimensional.}
\item{specific}{logical. When TRUE, consider only the genes in each cell's predefined gene signature (see crGeneSig) as expressed. Default FALSE}
\item{ranked_genes}{Cell-resolved gene signatures, used only when specific = T}
\item{correct.depth}{Correct summarized interaction graph for sequencing depth by linear regression. The sequencing depth of a cell-cell pair is the sum of UMI counts for each cell}
\item{graph_name}{Name of summarized interaction graph to place into output. Default "prior_interaction"}
}
\value{
Returns a Seurat object with an unweighted summarized interaction graph in the Graphs slot
}
\description{
Build unweighted summarized interaction graph
}
|
/man/BuildPriorInteraction.Rd
|
permissive
|
BlishLab/scriabin
|
R
| false
| true
| 2,289
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interaction_graph.R
\name{BuildPriorInteraction}
\alias{BuildPriorInteraction}
\title{Build unweighted summarized interaction graph}
\usage{
BuildPriorInteraction(
object,
assay = "SCT",
slot = "data",
species = "human",
database = "OmniPath",
ligands = NULL,
recepts = NULL,
specific = F,
ranked_genes = NULL,
correct.depth = T,
graph_name = "prior_interaction"
)
}
\arguments{
\item{object}{A Seurat object}
\item{assay}{Assay in Seurat object from which to pull expression values}
\item{slot}{Slot within assay from which to pull expression values}
\item{species}{Name of species for which to pull ligand-receptor interactions. One of "human", "mouse", or "rat"}
\item{database}{Name of ligand-receptor database to use. Default: "OmniPath"
When species is "human", one of: OmniPath, CellChatDB, CellPhoneDB, Ramilowski2015, Baccin2019, LRdb, Kirouac2010, ICELLNET, iTALK, EMBRACE, HPMR, Guide2Pharma, connectomeDB2020, talklr, CellTalkDB
When species is "mouse" or "rat", only "OmniPath" is supported.
To pass a custom ligand-receptor database to this function, set database = "custom"}
\item{ligands}{Character vector of custom ligands to use for interaction graph generation. Ignored unless database = "custom"
When ligands is supplied, recepts must also be supplied and equidimensional.}
\item{recepts}{Character vector of custom receptors to use for interaction graph generation. Ignored unless database = "custom"
When recepts is supplied, ligands must also be supplied and equidimensional.}
\item{specific}{logical. When TRUE, consider only the genes in each cell's predefined gene signature (see crGeneSig) as expressed. Default FALSE}
\item{ranked_genes}{Cell-resolved gene signatures, used only when specific = T}
\item{correct.depth}{Correct summarized interaction graph for sequencing depth by linear regression. The sequencing depth of a cell-cell pair is the sum of UMI counts for each cell}
\item{graph_name}{Name of summarized interaction graph to place into output. Default "prior_interaction"}
}
\value{
Returns a Seurat object with an unweighted summarized interaction graph in the Graphs slot
}
\description{
Build unweighted summarized interaction graph
}
|
# appMetadata -------------------------------------------------------------
test_that("clear error if no files", {
dir <- local_temp_app()
expect_snapshot(appMetadata(dir), error = TRUE)
})
test_that("quarto affects mode inference", {
dir <- local_temp_app(list("foo.Rmd" = ""))
metadata <- appMetadata(dir)
expect_equal(metadata$appMode, "rmd-static")
metadata <- appMetadata(dir, metadata = list(quarto_version = 1))
expect_equal(metadata$appMode, "quarto-static")
})
test_that("quarto path is deprecated", {
skip_if_no_quarto()
dir <- local_temp_app(list("foo.Rmd" = ""))
expect_snapshot(. <- appMetadata(dir, quarto = "abc"))
})
test_that("validates quarto argument", {
dir <- local_temp_app(list("foo.Rmd" = ""))
expect_snapshot(appMetadata(dir, quarto = 1), error = TRUE)
})
test_that("handles special case of appPrimaryDoc as R file", {
dir <- local_temp_app(list("foo.R" = ""))
metadata <- appMetadata(dir, appPrimaryDoc = "foo.R")
expect_equal(metadata$appMode, "shiny")
})
# checkLayout -------------------------------------------------------------
test_that("checkLayout() errors if primary doc & app.R", {
dir <- local_temp_app(list(
"app.R" = "",
"myscript.R" = ""
))
expect_snapshot(checkAppLayout(dir, "myscript.R"), error = TRUE)
})
test_that("checkLayout fails if no known structure", {
dir <- local_temp_app(list(
"data.txt" = "",
"cats.csv" = ""
))
expect_snapshot(checkAppLayout(dir), error = TRUE)
})
test_that("checkLayout succeeds with some common app structures", {
rmd <- local_temp_app(list("foo.Rmd" = ""))
expect_no_error(checkAppLayout(rmd))
shiny1 <- local_temp_app(list("app.R" = ""))
expect_no_error(checkAppLayout(rmd))
shiny2 <- local_temp_app(list("server.R" = "", "ui.R" = ""))
expect_no_error(checkAppLayout(rmd))
static <- local_temp_app(list("foo.html" = ""))
expect_no_error(checkAppLayout(rmd))
})
# inferAppMode ------------------------------------------------------------
test_that("can infer mode for APIs", {
expect_equal(inferAppMode("plumber.R"), "api")
expect_equal(inferAppMode("entrypoint.R"), "api")
})
test_that("can infer mode for shiny apps", {
expect_equal(inferAppMode("app.R"), "shiny")
expect_equal(inferAppMode("server.R"), "shiny")
})
test_that("can infer mode for static quarto and rmd docs", {
dir <- local_temp_app(list("foo.Rmd" = ""))
paths <- list.files(dir, full.names = TRUE)
expect_equal(inferAppMode(paths), "rmd-static")
expect_equal(inferAppMode(paths, usesQuarto = TRUE), "quarto-static")
# Static R Markdown treated as rmd-shiny for shinyapps targets
expect_equal(inferAppMode(paths, coerceStaticRmd = TRUE), "rmd-shiny")
})
test_that("can infer mode for shiny rmd docs", {
yaml_runtime <- function(runtime) {
c("---", paste0("runtime: ", runtime), "---")
}
dir <- local_temp_app(list("index.Rmd" = yaml_runtime("shiny")))
paths <- list.files(dir, full.names = TRUE)
expect_equal(inferAppMode(paths), "rmd-shiny")
dir <- local_temp_app(list("index.Rmd" = yaml_runtime("shinyrmd")))
paths <- list.files(dir, full.names = TRUE)
expect_equal(inferAppMode(paths), "rmd-shiny")
dir <- local_temp_app(list("index.Rmd" = yaml_runtime("shiny_prerendered")))
paths <- list.files(dir, full.names = TRUE)
expect_equal(inferAppMode(paths), "rmd-shiny")
# can pair server.R with shiny runtime
dir <- local_temp_app(list("index.Rmd" = yaml_runtime("shiny"), "server.R" = ""))
paths <- list.files(dir, full.names = TRUE)
expect_equal(inferAppMode(paths), "rmd-shiny")
# Beats static rmarkdowns
dir <- local_temp_app(list("index.Rmd" = yaml_runtime("shiny"), "foo.Rmd" = ""))
paths <- list.files(dir, full.names = TRUE)
expect_equal(inferAppMode(paths), "rmd-shiny")
})
test_that("can infer mode for shiny qmd docs", {
yaml_runtime <- function(runtime) {
c("---", paste0("runtime: ", runtime), "---")
}
dir <- local_temp_app(list("index.Qmd" = yaml_runtime("shiny")))
paths <- list.files(dir, full.names = TRUE)
expect_equal(inferAppMode(paths), "quarto-shiny")
# Can force Rmd to use quarto
dir <- local_temp_app(list("index.Rmd" = yaml_runtime("shiny")))
paths <- list.files(dir, full.names = TRUE)
expect_equal(inferAppMode(paths, usesQuarto = TRUE), "quarto-shiny")
# Prefers quarto if both present
dir <- local_temp_app(list(
"index.Qmd" = yaml_runtime("shiny"),
"index.Rmd" = yaml_runtime("shiny")
))
paths <- list.files(dir, full.names = TRUE)
expect_equal(inferAppMode(paths), "quarto-shiny")
})
test_that("Shiny R Markdown files are detected correctly", {
expect_true(isShinyRmd(test_path("shiny-rmds/shiny-rmd-dashes.Rmd")))
expect_true(isShinyRmd(test_path("shiny-rmds/shiny-rmd-dots.Rmd")))
expect_false(isShinyRmd(test_path("shiny-rmds/non-shiny-rmd.Rmd")))
})
test_that("shiny metadata process correctly", {
expect_false(is_shiny_prerendered(NULL, NULL))
expect_true(is_shiny_prerendered("shiny_prerendered", NULL))
expect_true(is_shiny_prerendered("shinyrmd", NULL))
expect_true(is_shiny_prerendered(NULL, "shiny"))
expect_true(is_shiny_prerendered(NULL, list(type = "shiny")))
})
test_that("otherwise, fallsback to static deploy", {
expect_equal(inferAppMode(c("a.html", "b.html")), "static")
})
# inferAppPrimaryDoc ------------------------------------------------------
test_that("leaves addPrimaryDoc unchanged or not a document", {
expect_equal(inferAppPrimaryDoc("foo.Rmd"), "foo.Rmd")
expect_equal(inferAppPrimaryDoc(NULL, appMode = "shiny"), NULL)
expect_equal(inferAppPrimaryDoc(NULL, appMode = "api"), NULL)
})
test_that("uses index file if present", {
files <- c("index.html", "index.Rmd", "a.html", "b.html", "a.Rmd", "b.Rmd")
expect_equal(inferAppPrimaryDoc(NULL, files, "static"), "index.html")
expect_equal(inferAppPrimaryDoc(NULL, files, "rmd-shiny"), "index.Rmd")
})
test_that("otherwise fails back to first file with matching extensions", {
files <- c("a.html", "b.html", "a.Rmd", "b.Rmd")
expect_equal(inferAppPrimaryDoc(NULL, files, "static"), "a.html")
expect_equal(inferAppPrimaryDoc(NULL, files, "rmd-shiny"), "a.Rmd")
})
test_that("errors if no files with needed extension", {
expect_snapshot(error = TRUE, {
inferAppPrimaryDoc(NULL, "a.R", "static")
inferAppPrimaryDoc(NULL, "a.R", "rmd-shiny")
})
})
# appHasParameters --------------------------------------------------------
test_that("non-documents don't have parameters", {
dir <- local_temp_app(list("foo.R" = ""))
expect_false(appHasParameters(dir, "foo.R", "static"))
expect_false(appHasParameters(dir, "foo.R", "shiny"))
})
test_that("documents don't have parameters if part of a site", {
dir <- local_temp_app(list("index.Rmd" = c("---", "params: [1, 2]", "---")))
expect_false(appHasParameters(dir, "index.Rmd", "rmd-static", "site"))
expect_false(appHasParameters(dir, "index.Rmd", "qmd-shiny", "site"))
})
test_that("non-Rmd files don't have parameters", {
dir <- local_temp_app(list("app.r" = c("")))
expect_false(appHasParameters(dir, "app.R", "rmd-shiny"))
})
test_that("otherwise look at yaml metadata", {
dir <- local_temp_app(list("index.Rmd" = c("---", "params: [1, 2]", "---")))
expect_true(appHasParameters(dir, "index.Rmd", "rmd-shiny"))
dir <- local_temp_app(list("index.Rmd" = c("---", "params: ~", "---")))
expect_false(appHasParameters(dir, "index.Rmd", "rmd-shiny"))
})
# detectPythonInDocuments -------------------------------------------------
test_that("dir without Rmds doesn't have have python", {
dir <- local_temp_app()
expect_false(detectPythonInDocuments(dir))
dir <- local_temp_app(list("foo.R" = ""))
expect_false(detectPythonInDocuments(dir))
})
test_that("Rmd or qmd with python chunk has python", {
dir <- local_temp_app(list("foo.qmd" = c("```{r}", "1+1", "````")))
expect_false(detectPythonInDocuments(dir))
dir <- local_temp_app(list("foo.Rmd" = c("```{python}", "1+1", "````")))
expect_true(detectPythonInDocuments(dir))
dir <- local_temp_app(list("foo.qmd" = c("```{python}", "1+1", "````")))
expect_true(detectPythonInDocuments(dir))
})
|
/tests/testthat/test-appMetadata.R
|
no_license
|
cran/rsconnect
|
R
| false
| false
| 8,154
|
r
|
# appMetadata -------------------------------------------------------------
test_that("clear error if no files", {
dir <- local_temp_app()
expect_snapshot(appMetadata(dir), error = TRUE)
})
test_that("quarto affects mode inference", {
dir <- local_temp_app(list("foo.Rmd" = ""))
metadata <- appMetadata(dir)
expect_equal(metadata$appMode, "rmd-static")
metadata <- appMetadata(dir, metadata = list(quarto_version = 1))
expect_equal(metadata$appMode, "quarto-static")
})
test_that("quarto path is deprecated", {
skip_if_no_quarto()
dir <- local_temp_app(list("foo.Rmd" = ""))
expect_snapshot(. <- appMetadata(dir, quarto = "abc"))
})
test_that("validates quarto argument", {
dir <- local_temp_app(list("foo.Rmd" = ""))
expect_snapshot(appMetadata(dir, quarto = 1), error = TRUE)
})
test_that("handles special case of appPrimaryDoc as R file", {
dir <- local_temp_app(list("foo.R" = ""))
metadata <- appMetadata(dir, appPrimaryDoc = "foo.R")
expect_equal(metadata$appMode, "shiny")
})
# checkLayout -------------------------------------------------------------
test_that("checkLayout() errors if primary doc & app.R", {
dir <- local_temp_app(list(
"app.R" = "",
"myscript.R" = ""
))
expect_snapshot(checkAppLayout(dir, "myscript.R"), error = TRUE)
})
test_that("checkLayout fails if no known structure", {
dir <- local_temp_app(list(
"data.txt" = "",
"cats.csv" = ""
))
expect_snapshot(checkAppLayout(dir), error = TRUE)
})
test_that("checkLayout succeeds with some common app structures", {
rmd <- local_temp_app(list("foo.Rmd" = ""))
expect_no_error(checkAppLayout(rmd))
shiny1 <- local_temp_app(list("app.R" = ""))
expect_no_error(checkAppLayout(rmd))
shiny2 <- local_temp_app(list("server.R" = "", "ui.R" = ""))
expect_no_error(checkAppLayout(rmd))
static <- local_temp_app(list("foo.html" = ""))
expect_no_error(checkAppLayout(rmd))
})
# inferAppMode ------------------------------------------------------------
test_that("can infer mode for APIs", {
expect_equal(inferAppMode("plumber.R"), "api")
expect_equal(inferAppMode("entrypoint.R"), "api")
})
test_that("can infer mode for shiny apps", {
expect_equal(inferAppMode("app.R"), "shiny")
expect_equal(inferAppMode("server.R"), "shiny")
})
test_that("can infer mode for static quarto and rmd docs", {
dir <- local_temp_app(list("foo.Rmd" = ""))
paths <- list.files(dir, full.names = TRUE)
expect_equal(inferAppMode(paths), "rmd-static")
expect_equal(inferAppMode(paths, usesQuarto = TRUE), "quarto-static")
# Static R Markdown treated as rmd-shiny for shinyapps targets
expect_equal(inferAppMode(paths, coerceStaticRmd = TRUE), "rmd-shiny")
})
test_that("can infer mode for shiny rmd docs", {
yaml_runtime <- function(runtime) {
c("---", paste0("runtime: ", runtime), "---")
}
dir <- local_temp_app(list("index.Rmd" = yaml_runtime("shiny")))
paths <- list.files(dir, full.names = TRUE)
expect_equal(inferAppMode(paths), "rmd-shiny")
dir <- local_temp_app(list("index.Rmd" = yaml_runtime("shinyrmd")))
paths <- list.files(dir, full.names = TRUE)
expect_equal(inferAppMode(paths), "rmd-shiny")
dir <- local_temp_app(list("index.Rmd" = yaml_runtime("shiny_prerendered")))
paths <- list.files(dir, full.names = TRUE)
expect_equal(inferAppMode(paths), "rmd-shiny")
# can pair server.R with shiny runtime
dir <- local_temp_app(list("index.Rmd" = yaml_runtime("shiny"), "server.R" = ""))
paths <- list.files(dir, full.names = TRUE)
expect_equal(inferAppMode(paths), "rmd-shiny")
# Beats static rmarkdowns
dir <- local_temp_app(list("index.Rmd" = yaml_runtime("shiny"), "foo.Rmd" = ""))
paths <- list.files(dir, full.names = TRUE)
expect_equal(inferAppMode(paths), "rmd-shiny")
})
test_that("can infer mode for shiny qmd docs", {
yaml_runtime <- function(runtime) {
c("---", paste0("runtime: ", runtime), "---")
}
dir <- local_temp_app(list("index.Qmd" = yaml_runtime("shiny")))
paths <- list.files(dir, full.names = TRUE)
expect_equal(inferAppMode(paths), "quarto-shiny")
# Can force Rmd to use quarto
dir <- local_temp_app(list("index.Rmd" = yaml_runtime("shiny")))
paths <- list.files(dir, full.names = TRUE)
expect_equal(inferAppMode(paths, usesQuarto = TRUE), "quarto-shiny")
# Prefers quarto if both present
dir <- local_temp_app(list(
"index.Qmd" = yaml_runtime("shiny"),
"index.Rmd" = yaml_runtime("shiny")
))
paths <- list.files(dir, full.names = TRUE)
expect_equal(inferAppMode(paths), "quarto-shiny")
})
test_that("Shiny R Markdown files are detected correctly", {
expect_true(isShinyRmd(test_path("shiny-rmds/shiny-rmd-dashes.Rmd")))
expect_true(isShinyRmd(test_path("shiny-rmds/shiny-rmd-dots.Rmd")))
expect_false(isShinyRmd(test_path("shiny-rmds/non-shiny-rmd.Rmd")))
})
test_that("shiny metadata process correctly", {
expect_false(is_shiny_prerendered(NULL, NULL))
expect_true(is_shiny_prerendered("shiny_prerendered", NULL))
expect_true(is_shiny_prerendered("shinyrmd", NULL))
expect_true(is_shiny_prerendered(NULL, "shiny"))
expect_true(is_shiny_prerendered(NULL, list(type = "shiny")))
})
test_that("otherwise, fallsback to static deploy", {
expect_equal(inferAppMode(c("a.html", "b.html")), "static")
})
# inferAppPrimaryDoc ------------------------------------------------------
test_that("leaves addPrimaryDoc unchanged or not a document", {
expect_equal(inferAppPrimaryDoc("foo.Rmd"), "foo.Rmd")
expect_equal(inferAppPrimaryDoc(NULL, appMode = "shiny"), NULL)
expect_equal(inferAppPrimaryDoc(NULL, appMode = "api"), NULL)
})
test_that("uses index file if present", {
files <- c("index.html", "index.Rmd", "a.html", "b.html", "a.Rmd", "b.Rmd")
expect_equal(inferAppPrimaryDoc(NULL, files, "static"), "index.html")
expect_equal(inferAppPrimaryDoc(NULL, files, "rmd-shiny"), "index.Rmd")
})
test_that("otherwise fails back to first file with matching extensions", {
files <- c("a.html", "b.html", "a.Rmd", "b.Rmd")
expect_equal(inferAppPrimaryDoc(NULL, files, "static"), "a.html")
expect_equal(inferAppPrimaryDoc(NULL, files, "rmd-shiny"), "a.Rmd")
})
test_that("errors if no files with needed extension", {
expect_snapshot(error = TRUE, {
inferAppPrimaryDoc(NULL, "a.R", "static")
inferAppPrimaryDoc(NULL, "a.R", "rmd-shiny")
})
})
# appHasParameters --------------------------------------------------------
test_that("non-documents don't have parameters", {
dir <- local_temp_app(list("foo.R" = ""))
expect_false(appHasParameters(dir, "foo.R", "static"))
expect_false(appHasParameters(dir, "foo.R", "shiny"))
})
test_that("documents don't have parameters if part of a site", {
dir <- local_temp_app(list("index.Rmd" = c("---", "params: [1, 2]", "---")))
expect_false(appHasParameters(dir, "index.Rmd", "rmd-static", "site"))
expect_false(appHasParameters(dir, "index.Rmd", "qmd-shiny", "site"))
})
test_that("non-Rmd files don't have parameters", {
dir <- local_temp_app(list("app.r" = c("")))
expect_false(appHasParameters(dir, "app.R", "rmd-shiny"))
})
test_that("otherwise look at yaml metadata", {
dir <- local_temp_app(list("index.Rmd" = c("---", "params: [1, 2]", "---")))
expect_true(appHasParameters(dir, "index.Rmd", "rmd-shiny"))
dir <- local_temp_app(list("index.Rmd" = c("---", "params: ~", "---")))
expect_false(appHasParameters(dir, "index.Rmd", "rmd-shiny"))
})
# detectPythonInDocuments -------------------------------------------------
test_that("dir without Rmds doesn't have have python", {
dir <- local_temp_app()
expect_false(detectPythonInDocuments(dir))
dir <- local_temp_app(list("foo.R" = ""))
expect_false(detectPythonInDocuments(dir))
})
test_that("Rmd or qmd with python chunk has python", {
dir <- local_temp_app(list("foo.qmd" = c("```{r}", "1+1", "````")))
expect_false(detectPythonInDocuments(dir))
dir <- local_temp_app(list("foo.Rmd" = c("```{python}", "1+1", "````")))
expect_true(detectPythonInDocuments(dir))
dir <- local_temp_app(list("foo.qmd" = c("```{python}", "1+1", "````")))
expect_true(detectPythonInDocuments(dir))
})
|
#' Calculate significance of EOFs compared to a null model (prcomp version)
#'
#' The \code{prcompNull} function uses a randomization approach to
#' calculate a null model for use in Empirical Orthogonal Function analysis (EOF)
#' with the \code{\link[stats]{prcomp}} function. EOF mode significance is assessed against the
#' distribution of EOF singular values ("Lambda") calculated by the null models
#'
#' @param x,retx,center,scale.,tol See \code{\link[stats]{prcomp}}
#' for argument definitions.
#' @param nperm Numeric. The number of null model permutations to calculate.
#'
#' @examples
#' # Generate data
#' m=50
#' n=100
#' frac.gaps <- 0.5 # the fraction of data with NaNs
#' N.S.ratio <- 0.1 # the Noise to Signal ratio for adding noise to data
#' x <- (seq(m)*2*pi)/m
#' t <- (seq(n)*2*pi)/n
#'
#' # True field
#' Xt <-
#' outer(sin(x), sin(t)) +
#' outer(sin(2.1*x), sin(2.1*t)) +
#' outer(sin(3.1*x), sin(3.1*t)) +
#' outer(tanh(x), cos(t)) +
#' outer(tanh(2*x), cos(2.1*t)) +
#' outer(tanh(4*x), cos(0.1*t)) +
#' outer(tanh(2.4*x), cos(1.1*t)) +
#' tanh(outer(x, t, FUN="+")) +
#' tanh(outer(x, 2*t, FUN="+"))
#'
#' Xt <- t(Xt)
#'
#' # Noise field
#' set.seed(1)
#' RAND <- matrix(runif(length(Xt), min=-1, max=1), nrow=nrow(Xt), ncol=ncol(Xt))
#' R <- RAND * N.S.ratio * Xt
#'
#' # True field + Noise field
#' Xp <- Xt + R
#'
#' res <- prcompNull(Xp, center=FALSE, scale=FALSE, nperm=499)
#' ylim <- range(res$Lambda.orig, res$Lambda)
#' boxplot(res$Lambda, log="y", col=8, border=2, outpch="", ylim=ylim)
#' points(res$Lambda.orig)
#' abline(v=res$n.sig+0.5, lty=2, col=4)
#' mtext(paste("Significant PCs =", res$n.sig), side=3, line=0.5, col=4)
#'
#' @importFrom stats prcomp quantile
#'
#' @export
#'
prcompNull <- function(x, retx = TRUE, center = TRUE, scale. = FALSE,
tol = NULL, nperm=99
){
E <- prcomp(x, retx = retx, center = center, scale. = scale.,
tol = tol)
Lambda <- matrix(NaN, nrow=nperm, ncol=length(E$sdev))
#For each permutation
for(p in seq(nperm)){
# Randomly reorganize dimensions of scaled field
x.tmp <- x
for(i in seq(ncol(x.tmp))){
x.tmp[,i] <- x.tmp[,i][sample(nrow(x.tmp))]
}
# Conduct EOF
E.tmp <- prcomp(x.tmp, retx = retx, center = center, scale. = scale.,
tol = tol)
#record Lambda
Lambda[p,] <- E.tmp$sdev^2
print(paste("permutation", p, "of", nperm, "is completed"))
}
result <- list(Lambda=Lambda, Lambda.orig=E$sdev^2)
aboveNull <- as.numeric(E$sdev^2 > apply(Lambda, 2, quantile, probs=0.95))
RLE <- rle(aboveNull)
if(RLE$values[1]==1){
result$n.sig <- RLE$length[1]
} else {
result$n.sig <- 0
}
result
}
|
/R/prcompNull.R
|
no_license
|
ValentinLouis/sinkr
|
R
| false
| false
| 2,742
|
r
|
#' Calculate significance of EOFs compared to a null model (prcomp version)
#'
#' The \code{prcompNull} function uses a randomization approach to
#' calculate a null model for use in Empirical Orthogonal Function analysis (EOF)
#' with the \code{\link[stats]{prcomp}} function. EOF mode significance is assessed against the
#' distribution of EOF singular values ("Lambda") calculated by the null models
#'
#' @param x,retx,center,scale.,tol See \code{\link[stats]{prcomp}}
#' for argument definitions.
#' @param nperm Numeric. The number of null model permutations to calculate.
#'
#' @examples
#' # Generate data
#' m=50
#' n=100
#' frac.gaps <- 0.5 # the fraction of data with NaNs
#' N.S.ratio <- 0.1 # the Noise to Signal ratio for adding noise to data
#' x <- (seq(m)*2*pi)/m
#' t <- (seq(n)*2*pi)/n
#'
#' # True field
#' Xt <-
#' outer(sin(x), sin(t)) +
#' outer(sin(2.1*x), sin(2.1*t)) +
#' outer(sin(3.1*x), sin(3.1*t)) +
#' outer(tanh(x), cos(t)) +
#' outer(tanh(2*x), cos(2.1*t)) +
#' outer(tanh(4*x), cos(0.1*t)) +
#' outer(tanh(2.4*x), cos(1.1*t)) +
#' tanh(outer(x, t, FUN="+")) +
#' tanh(outer(x, 2*t, FUN="+"))
#'
#' Xt <- t(Xt)
#'
#' # Noise field
#' set.seed(1)
#' RAND <- matrix(runif(length(Xt), min=-1, max=1), nrow=nrow(Xt), ncol=ncol(Xt))
#' R <- RAND * N.S.ratio * Xt
#'
#' # True field + Noise field
#' Xp <- Xt + R
#'
#' res <- prcompNull(Xp, center=FALSE, scale=FALSE, nperm=499)
#' ylim <- range(res$Lambda.orig, res$Lambda)
#' boxplot(res$Lambda, log="y", col=8, border=2, outpch="", ylim=ylim)
#' points(res$Lambda.orig)
#' abline(v=res$n.sig+0.5, lty=2, col=4)
#' mtext(paste("Significant PCs =", res$n.sig), side=3, line=0.5, col=4)
#'
#' @importFrom stats prcomp quantile
#'
#' @export
#'
prcompNull <- function(x, retx = TRUE, center = TRUE, scale. = FALSE,
tol = NULL, nperm=99
){
E <- prcomp(x, retx = retx, center = center, scale. = scale.,
tol = tol)
Lambda <- matrix(NaN, nrow=nperm, ncol=length(E$sdev))
#For each permutation
for(p in seq(nperm)){
# Randomly reorganize dimensions of scaled field
x.tmp <- x
for(i in seq(ncol(x.tmp))){
x.tmp[,i] <- x.tmp[,i][sample(nrow(x.tmp))]
}
# Conduct EOF
E.tmp <- prcomp(x.tmp, retx = retx, center = center, scale. = scale.,
tol = tol)
#record Lambda
Lambda[p,] <- E.tmp$sdev^2
print(paste("permutation", p, "of", nperm, "is completed"))
}
result <- list(Lambda=Lambda, Lambda.orig=E$sdev^2)
aboveNull <- as.numeric(E$sdev^2 > apply(Lambda, 2, quantile, probs=0.95))
RLE <- rle(aboveNull)
if(RLE$values[1]==1){
result$n.sig <- RLE$length[1]
} else {
result$n.sig <- 0
}
result
}
|
.filler <- function(x, y, maxv=12, circular=FALSE) {
# should rewrite this using apply (or C)
fill <- function(x, y) {
r <- matrix(NA, nrow=length(x), ncol=maxv)
if (circular) {
for (i in 1:nrow(r)) {
if (!is.na(y[i])) {
if (x[i] < y[i]) {
r[i, x[i]:y[i]] <- 1
} else {
r[i, c(x[i]:maxv, 1:y[i])] <- 1
}
}
}
r
} else {
for (i in 1:nrow(r)) {
if (!is.na(y[i])) {
r[i, x[i]:y[i]] <- 1
}
}
r
}
}
x <- overlay(x, y, fun=fill)
names(x) = paste('v', 1:maxv, sep='')
x
}
|
/R/filler.R
|
no_license
|
cran/raster
|
R
| false
| false
| 584
|
r
|
.filler <- function(x, y, maxv=12, circular=FALSE) {
# should rewrite this using apply (or C)
fill <- function(x, y) {
r <- matrix(NA, nrow=length(x), ncol=maxv)
if (circular) {
for (i in 1:nrow(r)) {
if (!is.na(y[i])) {
if (x[i] < y[i]) {
r[i, x[i]:y[i]] <- 1
} else {
r[i, c(x[i]:maxv, 1:y[i])] <- 1
}
}
}
r
} else {
for (i in 1:nrow(r)) {
if (!is.na(y[i])) {
r[i, x[i]:y[i]] <- 1
}
}
r
}
}
x <- overlay(x, y, fun=fill)
names(x) = paste('v', 1:maxv, sep='')
x
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/organizations_operations.R
\name{organizations_list_aws_service_access_for_organization}
\alias{organizations_list_aws_service_access_for_organization}
\title{Returns a list of the AWS services that you enabled to integrate with
your organization}
\usage{
organizations_list_aws_service_access_for_organization(NextToken,
MaxResults)
}
\arguments{
\item{NextToken}{Use this parameter if you receive a \code{NextToken} response in a previous
request that indicates that there is more output available. Set it to
the value of the previous call\'s \code{NextToken} response to indicate where
the output should continue from.}
\item{MaxResults}{(Optional) Use this to limit the number of results you want included per
page in the response. If you do not include this parameter, it defaults
to a value that is specific to the operation. If additional items exist
beyond the maximum you specify, the \code{NextToken} response element is
present and has a value (is not null). Include that value as the
\code{NextToken} request parameter in the next call to the operation to get
the next part of the results. Note that Organizations might return fewer
results than the maximum even when there are more results available. You
should check \code{NextToken} after every operation to ensure that you
receive all of the results.}
}
\description{
Returns a list of the AWS services that you enabled to integrate with
your organization. After a service on this list creates the resources
that it requires for the integration, it can perform operations on your
organization and its accounts.
}
\details{
For more information about integrating other services with AWS
Organizations, including the list of services that currently work with
Organizations, see \href{http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html}{Integrating AWS Organizations with Other AWS Services}
in the \emph{AWS Organizations User Guide.}
This operation can be called only from the organization\'s master
account.
}
\section{Request syntax}{
\preformatted{svc$list_aws_service_access_for_organization(
NextToken = "string",
MaxResults = 123
)
}
}
\keyword{internal}
|
/paws/man/organizations_list_aws_service_access_for_organization.Rd
|
permissive
|
johnnytommy/paws
|
R
| false
| true
| 2,249
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/organizations_operations.R
\name{organizations_list_aws_service_access_for_organization}
\alias{organizations_list_aws_service_access_for_organization}
\title{Returns a list of the AWS services that you enabled to integrate with
your organization}
\usage{
organizations_list_aws_service_access_for_organization(NextToken,
MaxResults)
}
\arguments{
\item{NextToken}{Use this parameter if you receive a \code{NextToken} response in a previous
request that indicates that there is more output available. Set it to
the value of the previous call\'s \code{NextToken} response to indicate where
the output should continue from.}
\item{MaxResults}{(Optional) Use this to limit the number of results you want included per
page in the response. If you do not include this parameter, it defaults
to a value that is specific to the operation. If additional items exist
beyond the maximum you specify, the \code{NextToken} response element is
present and has a value (is not null). Include that value as the
\code{NextToken} request parameter in the next call to the operation to get
the next part of the results. Note that Organizations might return fewer
results than the maximum even when there are more results available. You
should check \code{NextToken} after every operation to ensure that you
receive all of the results.}
}
\description{
Returns a list of the AWS services that you enabled to integrate with
your organization. After a service on this list creates the resources
that it requires for the integration, it can perform operations on your
organization and its accounts.
}
\details{
For more information about integrating other services with AWS
Organizations, including the list of services that currently work with
Organizations, see \href{http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html}{Integrating AWS Organizations with Other AWS Services}
in the \emph{AWS Organizations User Guide.}
This operation can be called only from the organization\'s master
account.
}
\section{Request syntax}{
\preformatted{svc$list_aws_service_access_for_organization(
NextToken = "string",
MaxResults = 123
)
}
}
\keyword{internal}
|
modelInfo <- list(label = "Naive Bayes",
library = "klaR",
loop = NULL,
type = c('Classification'),
parameters = data.frame(parameter = c('fL', 'usekernel', "adjust"),
class = c('numeric', 'logical', "numeric"),
label = c('Laplace Correction', 'Distribution Type', "Bandwidth Adjustment")),
grid = function(x, y, len = NULL, search = "grid")
expand.grid(usekernel = c(TRUE, FALSE), fL = 0, adjust = 1),
fit = function(x, y, wts, param, lev, last, classProbs, ...)
NaiveBayes(x, y, usekernel= param$usekernel, fL = param$fL, adjust = param$adjust, ...),
predict = function(modelFit, newdata, submodels = NULL) {
if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata)
predict(modelFit , newdata)$class
},
prob = function(modelFit, newdata, submodels = NULL) {
if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata)
predict(modelFit, newdata, type = "raw")$posterior
},
predictors = function(x, ...) if(hasTerms(x)) predictors(x$terms) else x$varnames,
tags = c("Bayesian Model"),
levels = function(x) x$levels,
sort = function(x) x[order(x[,1]),])
|
/models/files/nb.R
|
no_license
|
paulhendricks/caret
|
R
| false
| false
| 1,520
|
r
|
modelInfo <- list(label = "Naive Bayes",
library = "klaR",
loop = NULL,
type = c('Classification'),
parameters = data.frame(parameter = c('fL', 'usekernel', "adjust"),
class = c('numeric', 'logical', "numeric"),
label = c('Laplace Correction', 'Distribution Type', "Bandwidth Adjustment")),
grid = function(x, y, len = NULL, search = "grid")
expand.grid(usekernel = c(TRUE, FALSE), fL = 0, adjust = 1),
fit = function(x, y, wts, param, lev, last, classProbs, ...)
NaiveBayes(x, y, usekernel= param$usekernel, fL = param$fL, adjust = param$adjust, ...),
predict = function(modelFit, newdata, submodels = NULL) {
if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata)
predict(modelFit , newdata)$class
},
prob = function(modelFit, newdata, submodels = NULL) {
if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata)
predict(modelFit, newdata, type = "raw")$posterior
},
predictors = function(x, ...) if(hasTerms(x)) predictors(x$terms) else x$varnames,
tags = c("Bayesian Model"),
levels = function(x) x$levels,
sort = function(x) x[order(x[,1]),])
|
\name{xmp11.01}
\alias{xmp11.01}
\docType{data}
\title{R Data set: xmp11.01}
\description{
The \code{xmp11.01} data frame has 12 rows and 3 columns.
}
\usage{data(xmp11.01)}
\format{
A data frame with 12 observations on the following 3 variables.
\describe{
\item{\code{strength}}{a numeric vector}
\item{\code{brand}}{a factor with levels \code{1} \code{2} \code{3}}
\item{\code{treatment}}{a factor with levels \code{1} \code{2} \code{3} \code{4}}
}
}
\details{
Consult the web site \url{http://www.thomsonedu.com/statistics/devore} for additional online resources that are available for this book.
}
\source{
Devore, J. L. (2008) \emph{Probability and Statistics for Engineering and the Sciences (7th Edition)}, ISBN-10: 0495382175 ISBN-13: 9780495382171
}
\examples{
data(xmp11.01)
str(xmp11.01)
}
\keyword{datasets}
|
/man/xmp11.01.Rd
|
no_license
|
cran/Devore7
|
R
| false
| false
| 842
|
rd
|
\name{xmp11.01}
\alias{xmp11.01}
\docType{data}
\title{R Data set: xmp11.01}
\description{
The \code{xmp11.01} data frame has 12 rows and 3 columns.
}
\usage{data(xmp11.01)}
\format{
A data frame with 12 observations on the following 3 variables.
\describe{
\item{\code{strength}}{a numeric vector}
\item{\code{brand}}{a factor with levels \code{1} \code{2} \code{3}}
\item{\code{treatment}}{a factor with levels \code{1} \code{2} \code{3} \code{4}}
}
}
\details{
Consult the web site \url{http://www.thomsonedu.com/statistics/devore} for additional online resources that are available for this book.
}
\source{
Devore, J. L. (2008) \emph{Probability and Statistics for Engineering and the Sciences (7th Edition)}, ISBN-10: 0495382175 ISBN-13: 9780495382171
}
\examples{
data(xmp11.01)
str(xmp11.01)
}
\keyword{datasets}
|
out_degree <- function(frame){
colnames(frame) <- c("From", "to", "value")
colnames(frame) <- toupper(colnames(frame))
my_data_subset <- subset(frame, frame[,3] != 0, select = c(1:3))
my_data_subset<- my_data_subset[
with(my_data_subset, order(my_data_subset$FROM,my_data_subset$VALUE)),
]
CD_out <- as.data.frame(table(my_data_subset[,1]))
colnames(CD_out) <- c("Facility","degree_centrality")
return(CD_out)
}
|
/R/out_degree.R
|
no_license
|
Saraghanadian/SNAinSCM
|
R
| false
| false
| 449
|
r
|
out_degree <- function(frame){
colnames(frame) <- c("From", "to", "value")
colnames(frame) <- toupper(colnames(frame))
my_data_subset <- subset(frame, frame[,3] != 0, select = c(1:3))
my_data_subset<- my_data_subset[
with(my_data_subset, order(my_data_subset$FROM,my_data_subset$VALUE)),
]
CD_out <- as.data.frame(table(my_data_subset[,1]))
colnames(CD_out) <- c("Facility","degree_centrality")
return(CD_out)
}
|
\name{to_time}
\alias{to_time}
\title{To time}
\author{Hadley Wickham <h.wickham@gmail.com>}
\description{
Turn numeric vector into POSIXct vector
}
\usage{to_time(x)}
\arguments{
\item{x}{}
}
\keyword{internal}
|
/man/to-time-1x.rd
|
no_license
|
genome-vendor/r-cran-ggplot2
|
R
| false
| false
| 217
|
rd
|
\name{to_time}
\alias{to_time}
\title{To time}
\author{Hadley Wickham <h.wickham@gmail.com>}
\description{
Turn numeric vector into POSIXct vector
}
\usage{to_time(x)}
\arguments{
\item{x}{}
}
\keyword{internal}
|
\name{stableMerge}
\alias{stableMerge}
\title{Execute a Stable Left Join}
\description{
Merge Two Data Sets Without Altering Material from the Left Argument.
}
\usage{
stableMerge(x, y)
}
\arguments{
\item{x}{the main data.frame}
\item{y}{a second data.frame containing related information}
}
\details{
\code{merge} is unpredictable, because it can add, drop, and
reorder rows, as well as reorder columns and recreate row names.
\code{stableMerge} expects a primary data.frame and
a secondary data.frame, and performs a \sQuote{stable} left join (\code{all.x=TRUE},
\code{all.y=FALSE}). Absence of extra arguments is by design, for integrity.
Keys are defined by common columns, and are repeatable in the primary,
but must be unique in the secondary data.frame. New columns in the
secondary data.frame are appended to the primary, supplying secondary information
by key match. No rows are added, or dropped, row/column order is
unaffected, and row names are preserved.
}
\value{
A dataframe with as many rows as \code{nrow(x)} but possibly more columns.
}
\references{\url{http://metrumrg.googlecode.com}}
\author{Tim Bergsma}
\seealso{
\itemize{
\item \code{\link{merge}}
}
}
\examples{
#a nonsense example
stableMerge(Theoph,BOD)
}
\keyword{manip}
|
/man/stableMerge.Rd
|
no_license
|
metrumresearchgroup/metrumrg
|
R
| false
| false
| 1,279
|
rd
|
\name{stableMerge}
\alias{stableMerge}
\title{Execute a Stable Left Join}
\description{
Merge Two Data Sets Without Altering Material from the Left Argument.
}
\usage{
stableMerge(x, y)
}
\arguments{
\item{x}{the main data.frame}
\item{y}{a second data.frame containing related information}
}
\details{
\code{merge} is unpredictable, because it can add, drop, and
reorder rows, as well as reorder columns and recreate row names.
\code{stableMerge} expects a primary data.frame and
a secondary data.frame, and performs a \sQuote{stable} left join (\code{all.x=TRUE},
\code{all.y=FALSE}). Absence of extra arguments is by design, for integrity.
Keys are defined by common columns, and are repeatable in the primary,
but must be unique in the secondary data.frame. New columns in the
secondary data.frame are appended to the primary, supplying secondary information
by key match. No rows are added, or dropped, row/column order is
unaffected, and row names are preserved.
}
\value{
A dataframe with as many rows as \code{nrow(x)} but possibly more columns.
}
\references{\url{http://metrumrg.googlecode.com}}
\author{Tim Bergsma}
\seealso{
\itemize{
\item \code{\link{merge}}
}
}
\examples{
#a nonsense example
stableMerge(Theoph,BOD)
}
\keyword{manip}
|
library(data.table)
library(tidyverse)
library(text2vec)
library(caTools)
library(glmnet)
nlpdata
nlpdata %>% class()
nlpdata$...1 <- nlpdata$...1 %>% as.character()
set.seed(123)
split <- nlpdata$Liked %>% sample.split(SplitRatio = 0.8)
train <- nlpdata %>% subset(split == T)
test <- nlpdata %>% subset(split == F)
colnames(nlpdata, do.NULL = TRUE, prefix = "col")
it_train <- train$Review %>%
itoken(preprocessor = tolower,
tokenizer = word_tokenizer,
ids = train$...1,
progressbar = F)
vocab <- it_train %>% create_vocabulary()
vocab %>%
arrange(desc(term_count)) %>%
head(110) %>%
tail(10)
vectorizer <- vocab %>% vocab_vectorizer()
dtm_train <- it_train %>% create_dtm(vectorizer)
dtm_train %>% dim()
identical(rownames(dtm_train), train$...1)
glmnet_classifier <- dtm_train %>%
cv.glmnet(y = train[['Liked']],
family = 'binomial',
type.measure = "auc",
nfolds = 10,
thresh = 0.001,# high value is less accurate, but has faster training
maxit = 1000)# again lower number of iterations for faster training
glmnet_classifier$cvm %>% max() %>% round(3) %>% paste("-> Max AUC")
it_test <- test$Liked %>% tolower() %>% word_tokenizer()
it_test <- it_test %>%
itoken(ids = test$...1,
progressbar = F)
dtm_test <- it_test %>% create_dtm(vectorizer)
dtm_test %>% head(1)
preds <- predict(glmnet_classifier, dtm_test, type = 'response')[,1]
glmnet:::auc(test$Liked, preds) %>% round(2)
stop_words <- c("i", "you", "he", "she", "it", "we", "they",
"me", "him", "her", "them",
"my", "your", "yours", "his", "our", "ours",
"myself", "yourself", "himself", "herself", "ourselves",
"the", "a", "an", "and", "or", "on", "by", "so",
"from", "about", "to", "for", "of",
"that", "this", "is", "are")
vocab <- it_train %>% create_vocabulary(stopwords = stop_words)
pruned_vocab <- vocab %>%
prune_vocabulary(term_count_min = 10,
doc_proportion_max = 0.5,
doc_proportion_min = 0.001)
pruned_vocab %>%
arrange(desc(term_count)) %>%
head(10)
vectorizer <- pruned_vocab %>% vocab_vectorizer()
dtm_train <- it_train %>% create_dtm(vectorizer)
dtm_train %>% dim()
glmnet_classifier <- dtm_train %>%
cv.glmnet(y = train[['Liked']],
family = 'binomial',
type.measure = "auc",
nfolds = 4,
thresh = 0.001,
maxit = 1000)
glmnet_classifier$cvm %>% max() %>% round(3) %>% paste("-> Max AUC")
dtm_test <- it_test %>% create_dtm(vectorizer)
preds <- predict(glmnet_classifier, dtm_test, type = 'response')[,1]
glmnet:::auc(test$Liked, preds) %>% round(2)
# N Grams ----
vocab <- it_train %>% create_vocabulary(ngram = c(1L, 2L))
vocab <- vocab %>%
prune_vocabulary(term_count_min = 10,
doc_proportion_max = 0.5)
bigram_vectorizer <- vocab %>% vocab_vectorizer()
dtm_train <- it_train %>% create_dtm(bigram_vectorizer)
dtm_train %>% dim()
glmnet_classifier <- dtm_train %>%
cv.glmnet(y = train[['Liked']],
family = 'binomial',
type.measure = "auc",
nfolds = 4,
thresh = 0.001,
maxit = 1000)
glmnet_classifier$cvm %>% max() %>% round(3) %>% paste("-> Max AUC")
dtm_test <- it_test %>% create_dtm(bigram_vectorizer)
preds <- predict(glmnet_classifier, dtm_test, type = 'response')[,1]
glmnet:::auc(test$Liked, preds) %>% round(2)
|
/week12_day2_part2_case.R
|
no_license
|
orkhanjamalov1991/Restaurant-Review-Analysis
|
R
| false
| false
| 3,548
|
r
|
library(data.table)
library(tidyverse)
library(text2vec)
library(caTools)
library(glmnet)
nlpdata
nlpdata %>% class()
nlpdata$...1 <- nlpdata$...1 %>% as.character()
set.seed(123)
split <- nlpdata$Liked %>% sample.split(SplitRatio = 0.8)
train <- nlpdata %>% subset(split == T)
test <- nlpdata %>% subset(split == F)
colnames(nlpdata, do.NULL = TRUE, prefix = "col")
it_train <- train$Review %>%
itoken(preprocessor = tolower,
tokenizer = word_tokenizer,
ids = train$...1,
progressbar = F)
vocab <- it_train %>% create_vocabulary()
vocab %>%
arrange(desc(term_count)) %>%
head(110) %>%
tail(10)
vectorizer <- vocab %>% vocab_vectorizer()
dtm_train <- it_train %>% create_dtm(vectorizer)
dtm_train %>% dim()
identical(rownames(dtm_train), train$...1)
glmnet_classifier <- dtm_train %>%
cv.glmnet(y = train[['Liked']],
family = 'binomial',
type.measure = "auc",
nfolds = 10,
thresh = 0.001,# high value is less accurate, but has faster training
maxit = 1000)# again lower number of iterations for faster training
glmnet_classifier$cvm %>% max() %>% round(3) %>% paste("-> Max AUC")
it_test <- test$Liked %>% tolower() %>% word_tokenizer()
it_test <- it_test %>%
itoken(ids = test$...1,
progressbar = F)
dtm_test <- it_test %>% create_dtm(vectorizer)
dtm_test %>% head(1)
preds <- predict(glmnet_classifier, dtm_test, type = 'response')[,1]
glmnet:::auc(test$Liked, preds) %>% round(2)
stop_words <- c("i", "you", "he", "she", "it", "we", "they",
"me", "him", "her", "them",
"my", "your", "yours", "his", "our", "ours",
"myself", "yourself", "himself", "herself", "ourselves",
"the", "a", "an", "and", "or", "on", "by", "so",
"from", "about", "to", "for", "of",
"that", "this", "is", "are")
vocab <- it_train %>% create_vocabulary(stopwords = stop_words)
pruned_vocab <- vocab %>%
prune_vocabulary(term_count_min = 10,
doc_proportion_max = 0.5,
doc_proportion_min = 0.001)
pruned_vocab %>%
arrange(desc(term_count)) %>%
head(10)
vectorizer <- pruned_vocab %>% vocab_vectorizer()
dtm_train <- it_train %>% create_dtm(vectorizer)
dtm_train %>% dim()
glmnet_classifier <- dtm_train %>%
cv.glmnet(y = train[['Liked']],
family = 'binomial',
type.measure = "auc",
nfolds = 4,
thresh = 0.001,
maxit = 1000)
glmnet_classifier$cvm %>% max() %>% round(3) %>% paste("-> Max AUC")
dtm_test <- it_test %>% create_dtm(vectorizer)
preds <- predict(glmnet_classifier, dtm_test, type = 'response')[,1]
glmnet:::auc(test$Liked, preds) %>% round(2)
# N Grams ----
vocab <- it_train %>% create_vocabulary(ngram = c(1L, 2L))
vocab <- vocab %>%
prune_vocabulary(term_count_min = 10,
doc_proportion_max = 0.5)
bigram_vectorizer <- vocab %>% vocab_vectorizer()
dtm_train <- it_train %>% create_dtm(bigram_vectorizer)
dtm_train %>% dim()
glmnet_classifier <- dtm_train %>%
cv.glmnet(y = train[['Liked']],
family = 'binomial',
type.measure = "auc",
nfolds = 4,
thresh = 0.001,
maxit = 1000)
glmnet_classifier$cvm %>% max() %>% round(3) %>% paste("-> Max AUC")
dtm_test <- it_test %>% create_dtm(bigram_vectorizer)
preds <- predict(glmnet_classifier, dtm_test, type = 'response')[,1]
glmnet:::auc(test$Liked, preds) %>% round(2)
|
\name{PLS_lm_wvc}
\alias{PLS_lm_wvc}
\title{Light version of PLS\_lm for cross validation purposes}
\description{
Light version of \code{PLS_lm} for cross validation purposes either on complete or incomplete datasets.
}
\usage{
PLS_lm_wvc(dataY, dataX, nt = 2, dataPredictY = dataX, modele = "pls",
scaleX = TRUE, scaleY = NULL, keepcoeffs = FALSE,
keepstd.coeffs=FALSE, tol_Xi = 10^(-12), weights, verbose=TRUE)
}
\arguments{
\item{dataY}{response (training) dataset}
\item{dataX}{predictor(s) (training) dataset}
\item{nt}{number of components to be extracted}
\item{dataPredictY}{predictor(s) (testing) dataset}
\item{modele}{name of the PLS model to be fitted, only (\code{"pls"} available for this fonction.}
\item{scaleX}{scale the predictor(s) : must be set to TRUE for \code{modele="pls"} and should be for glms pls.}
\item{scaleY}{scale the response : Yes/No. Ignored since non always possible for glm responses.}
\item{keepcoeffs}{whether the coefficients of unstandardized eXplanatory variables should be returned or not.}
\item{keepstd.coeffs}{whether the coefficients of standardized eXplanatory variables should be returned or not.}
\item{tol_Xi}{minimal value for Norm2(Xi) and \eqn{\mathrm{det}(pp' \times pp)}{det(pp'*pp)} if there is any missing value in the \code{dataX}. It defaults to \eqn{10^{-12}}{10^{-12}}}
\item{weights}{an optional vector of 'prior weights' to be used in the fitting process. Should be \code{NULL} or a numeric vector.}
\item{verbose}{should info messages be displayed ?}
}
\details{
This function is called by \code{\link{PLS_lm_kfoldcv}} in order to perform cross-validation either on complete or incomplete datasets.
Non-NULL weights can be used to indicate that different observations have different dispersions (with the values in weights being inversely proportional to the dispersions); or equivalently, when the elements of weights are positive integers w_i, that each response y_i is the mean of w_i unit-weight observations.
}
\value{
\item{valsPredict}{\code{nrow(dataPredictY) * nt} matrix of the predicted values}
\item{\code{coeffs}}{ If the coefficients of the eXplanatory variables were requested:\cr
i.e. \code{keepcoeffs=TRUE}.\cr
\code{ncol(dataX) * 1} matrix of the coefficients of the the eXplanatory variables}
}
\references{
Nicolas Meyer, Myriam Maumy-Bertrand et \enc{Frederic}{Fr\'ed\'eric} Bertrand (2010). Comparing the linear and the logistic PLS regression with qualitative predictors: application to allelotyping data. \emph{Journal de la Societe Francaise de Statistique}, 151(2), pages 1-18.
\url{http://publications-sfds.math.cnrs.fr/index.php/J-SFdS/article/view/47}
}
\author{\enc{Frederic}{Fr\'ed\'eric} Bertrand\cr
\email{frederic.bertrand@math.unistra.fr}\cr
\url{http://www-irma.u-strasbg.fr/~fbertran/}
}
\note{
Use \code{\link{PLS_lm_kfoldcv}} for a wrapper in view of cross-validation.
}
\seealso{\code{\link{PLS_lm}} for more detailed results, \code{\link{PLS_lm_kfoldcv}} for cross-validating models and \code{\link{PLS_glm_wvc}} for the same function dedicated to plsRglm models}
\examples{
data(Cornell)
XCornell<-Cornell[,1:7]
yCornell<-Cornell[,8]
PLS_lm_wvc(dataY=yCornell,dataX=XCornell,nt=3,dataPredictY=XCornell[1,])
PLS_lm_wvc(dataY=yCornell[-c(1,2)],dataX=XCornell[-c(1,2),],nt=3,dataPredictY=XCornell[c(1,2),],
verbose=FALSE)
PLS_lm_wvc(dataY=yCornell[-c(1,2)],dataX=XCornell[-c(1,2),],nt=3,dataPredictY=XCornell[c(1,2),],
keepcoeffs=TRUE, verbose=FALSE)
rm("XCornell","yCornell")
## With an incomplete dataset (X[1,2] is NA)
data(pine)
ypine <- pine[,11]
data(XpineNAX21)
PLS_lm_wvc(dataY=ypine[-1],dataX=XpineNAX21[-1,],nt=3, verbose=FALSE)
PLS_lm_wvc(dataY=ypine[-1],dataX=XpineNAX21[-1,],nt=3,dataPredictY=XpineNAX21[1,], verbose=FALSE)
PLS_lm_wvc(dataY=ypine[-2],dataX=XpineNAX21[-2,],nt=3,dataPredictY=XpineNAX21[2,], verbose=FALSE)
PLS_lm_wvc(dataY=ypine,dataX=XpineNAX21,nt=3, verbose=FALSE)
rm("ypine")
}
\keyword{models}
\keyword{regression}
|
/man/PLS_lm_wvc.Rd
|
no_license
|
kongdd/plsRglm
|
R
| false
| false
| 4,004
|
rd
|
\name{PLS_lm_wvc}
\alias{PLS_lm_wvc}
\title{Light version of PLS\_lm for cross validation purposes}
\description{
Light version of \code{PLS_lm} for cross validation purposes either on complete or incomplete datasets.
}
\usage{
PLS_lm_wvc(dataY, dataX, nt = 2, dataPredictY = dataX, modele = "pls",
scaleX = TRUE, scaleY = NULL, keepcoeffs = FALSE,
keepstd.coeffs=FALSE, tol_Xi = 10^(-12), weights, verbose=TRUE)
}
\arguments{
\item{dataY}{response (training) dataset}
\item{dataX}{predictor(s) (training) dataset}
\item{nt}{number of components to be extracted}
\item{dataPredictY}{predictor(s) (testing) dataset}
\item{modele}{name of the PLS model to be fitted, only (\code{"pls"} available for this fonction.}
\item{scaleX}{scale the predictor(s) : must be set to TRUE for \code{modele="pls"} and should be for glms pls.}
\item{scaleY}{scale the response : Yes/No. Ignored since non always possible for glm responses.}
\item{keepcoeffs}{whether the coefficients of unstandardized eXplanatory variables should be returned or not.}
\item{keepstd.coeffs}{whether the coefficients of standardized eXplanatory variables should be returned or not.}
\item{tol_Xi}{minimal value for Norm2(Xi) and \eqn{\mathrm{det}(pp' \times pp)}{det(pp'*pp)} if there is any missing value in the \code{dataX}. It defaults to \eqn{10^{-12}}{10^{-12}}}
\item{weights}{an optional vector of 'prior weights' to be used in the fitting process. Should be \code{NULL} or a numeric vector.}
\item{verbose}{should info messages be displayed ?}
}
\details{
This function is called by \code{\link{PLS_lm_kfoldcv}} in order to perform cross-validation either on complete or incomplete datasets.
Non-NULL weights can be used to indicate that different observations have different dispersions (with the values in weights being inversely proportional to the dispersions); or equivalently, when the elements of weights are positive integers w_i, that each response y_i is the mean of w_i unit-weight observations.
}
\value{
\item{valsPredict}{\code{nrow(dataPredictY) * nt} matrix of the predicted values}
\item{\code{coeffs}}{ If the coefficients of the eXplanatory variables were requested:\cr
i.e. \code{keepcoeffs=TRUE}.\cr
\code{ncol(dataX) * 1} matrix of the coefficients of the the eXplanatory variables}
}
\references{
Nicolas Meyer, Myriam Maumy-Bertrand et \enc{Frederic}{Fr\'ed\'eric} Bertrand (2010). Comparing the linear and the logistic PLS regression with qualitative predictors: application to allelotyping data. \emph{Journal de la Societe Francaise de Statistique}, 151(2), pages 1-18.
\url{http://publications-sfds.math.cnrs.fr/index.php/J-SFdS/article/view/47}
}
\author{\enc{Frederic}{Fr\'ed\'eric} Bertrand\cr
\email{frederic.bertrand@math.unistra.fr}\cr
\url{http://www-irma.u-strasbg.fr/~fbertran/}
}
\note{
Use \code{\link{PLS_lm_kfoldcv}} for a wrapper in view of cross-validation.
}
\seealso{\code{\link{PLS_lm}} for more detailed results, \code{\link{PLS_lm_kfoldcv}} for cross-validating models and \code{\link{PLS_glm_wvc}} for the same function dedicated to plsRglm models}
\examples{
data(Cornell)
XCornell<-Cornell[,1:7]
yCornell<-Cornell[,8]
PLS_lm_wvc(dataY=yCornell,dataX=XCornell,nt=3,dataPredictY=XCornell[1,])
PLS_lm_wvc(dataY=yCornell[-c(1,2)],dataX=XCornell[-c(1,2),],nt=3,dataPredictY=XCornell[c(1,2),],
verbose=FALSE)
PLS_lm_wvc(dataY=yCornell[-c(1,2)],dataX=XCornell[-c(1,2),],nt=3,dataPredictY=XCornell[c(1,2),],
keepcoeffs=TRUE, verbose=FALSE)
rm("XCornell","yCornell")
## With an incomplete dataset (X[1,2] is NA)
data(pine)
ypine <- pine[,11]
data(XpineNAX21)
PLS_lm_wvc(dataY=ypine[-1],dataX=XpineNAX21[-1,],nt=3, verbose=FALSE)
PLS_lm_wvc(dataY=ypine[-1],dataX=XpineNAX21[-1,],nt=3,dataPredictY=XpineNAX21[1,], verbose=FALSE)
PLS_lm_wvc(dataY=ypine[-2],dataX=XpineNAX21[-2,],nt=3,dataPredictY=XpineNAX21[2,], verbose=FALSE)
PLS_lm_wvc(dataY=ypine,dataX=XpineNAX21,nt=3, verbose=FALSE)
rm("ypine")
}
\keyword{models}
\keyword{regression}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nwos_tables_raw.R
\name{nwos_tables_raw}
\alias{nwos_tables_raw}
\title{nwos_tables_raw}
\usage{
nwos_tables_raw(
area_data = NA,
coop_data = NA,
quest_data = QUEST_EST,
quest_tab_meta_data = REF_TABLE
)
}
\description{
Create the body of an NWOS core, area, or cooperataion rate table
}
\details{
For area and cooperation rate tables see ...
nwos_table_raw()
}
|
/man/nwos_tables_raw.Rd
|
no_license
|
jfontestad/nwos
|
R
| false
| true
| 448
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nwos_tables_raw.R
\name{nwos_tables_raw}
\alias{nwos_tables_raw}
\title{nwos_tables_raw}
\usage{
nwos_tables_raw(
area_data = NA,
coop_data = NA,
quest_data = QUEST_EST,
quest_tab_meta_data = REF_TABLE
)
}
\description{
Create the body of an NWOS core, area, or cooperataion rate table
}
\details{
For area and cooperation rate tables see ...
nwos_table_raw()
}
|
#' raster.standardize, standardizes all values in a raster file
#'
#' This function is used by a lot of the metrics calculated by ENMTools, in order to
#' standardize suitability scores so they sum to 1 over a geographic space.
#'
#'
#' @param x A raster object or path to a raster.
#' @param verbose Controls printing of diagnostic messages
#'
#'
#' @keywords keywords
#'
#' @export raster.standardize
#'
#' @examples
#' raster.standardize(env[[1]])
raster.standardize <- function(x, verbose=FALSE){
# Test if the args are raster objects or paths to files
if(class(x) == "character"){
x <- raster(x)
}
if(verbose){
print(paste("Starting standardize on", x, "at", Sys.time()))
}
return(x/cellStats(x, stat=sum))
}
|
/R/raster.standardize.R
|
no_license
|
danlwarren/sim-code-Warren-et-al-2019
|
R
| false
| false
| 738
|
r
|
#' raster.standardize, standardizes all values in a raster file
#'
#' This function is used by a lot of the metrics calculated by ENMTools, in order to
#' standardize suitability scores so they sum to 1 over a geographic space.
#'
#'
#' @param x A raster object or path to a raster.
#' @param verbose Controls printing of diagnostic messages
#'
#'
#' @keywords keywords
#'
#' @export raster.standardize
#'
#' @examples
#' raster.standardize(env[[1]])
raster.standardize <- function(x, verbose=FALSE){
# Test if the args are raster objects or paths to files
if(class(x) == "character"){
x <- raster(x)
}
if(verbose){
print(paste("Starting standardize on", x, "at", Sys.time()))
}
return(x/cellStats(x, stat=sum))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stackingutilities.R
\name{ProcessAndStackDataForRegression}
\alias{ProcessAndStackDataForRegression}
\title{Helper function to stack data for a regression model.}
\usage{
ProcessAndStackDataForRegression(
unstacked.data,
formula,
interaction,
subset,
weights
)
}
\arguments{
\item{unstacked.data}{A named list of two data frames. Element "X" is the data frame containing
containing the outcome variable (usually a Numeric - Multi or Nominal/Ordinal - Multi
variable set in Displayr), and element "Y" is the data frame containing the predictor
variables (a Binary - Grid or Numeric - Grid variable set in Displayr).}
\item{formula}{A formula object for the regression.}
\item{interaction}{Optional variable to test for interaction with other variables in the model.}
\item{subset}{An optional vector specifying a subset of observations to be
used in the fitting process.}
\item{weights}{An optional vector of sampling weights.}
}
\description{
Takes the input unstacked data, interaction, subset, weights and formula terms,
processes the unstacked data, and stacks it.
If the stacking is successful, the interaction, subset, weights are also updated
to be the appropriate size
}
|
/man/ProcessAndStackDataForRegression.Rd
|
no_license
|
Displayr/flipTransformations
|
R
| false
| true
| 1,271
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stackingutilities.R
\name{ProcessAndStackDataForRegression}
\alias{ProcessAndStackDataForRegression}
\title{Helper function to stack data for a regression model.}
\usage{
ProcessAndStackDataForRegression(
unstacked.data,
formula,
interaction,
subset,
weights
)
}
\arguments{
\item{unstacked.data}{A named list of two data frames. Element "X" is the data frame containing
containing the outcome variable (usually a Numeric - Multi or Nominal/Ordinal - Multi
variable set in Displayr), and element "Y" is the data frame containing the predictor
variables (a Binary - Grid or Numeric - Grid variable set in Displayr).}
\item{formula}{A formula object for the regression.}
\item{interaction}{Optional variable to test for interaction with other variables in the model.}
\item{subset}{An optional vector specifying a subset of observations to be
used in the fitting process.}
\item{weights}{An optional vector of sampling weights.}
}
\description{
Takes the input unstacked data, interaction, subset, weights and formula terms,
processes the unstacked data, and stacks it.
If the stacking is successful, the interaction, subset, weights are also updated
to be the appropriate size
}
|
#
# SMart Install Packages (SMIP)
#
# User friendly interface into install.packages(...) and biocLite(...)
#
# All functions check existence of packages before installation and skip installation
# if the package already exists. For group of packages, each one is checked
# individually and only missing ones are installed.
#
# Function also offer more automation compared to original ones, allowing
# installation of whole repositories, scavenging web pages for packages etc.
# Usage examples:
#
#smart.install.bioconductor.repo(repo.index = 4)
#smart.install.packages(package="Matrix")
#smart.install.packages(package="Matrix", mirror="http://ftp.sunet.se/pub/lang/CRAN")
#smart.install.packages(package="foobarstatistics") # should fail
#smart.install.packages(bioconductor.package="Matrix")
#smart.install.packages(bioconductor.package="biofoobar") # should fail
#smart.install.packages(package="Matrix", bioconductor.package="biofoobar") # should fail
#smart.install.packages(url.package="http://www.math.utu.fi/projects/software/bio/ROTS_1.1.1.tar.gz")
#smart.install.scavenge.web.packages("http://brainarray.mbni.med.umich.edu/Brainarray/Database/CustomCDF/16.0.0/entrezg.asp")
smart.install.scavenge.web.packages <- function(url, chiptype="all", update=0, list.only=0) {
# Name of the static pages
if(length(grep("latest", url)) > 0) {
web.page <- paste(readLines(url), collapse="")
latest.build <- gsub("\'.*$", "", gsub("^.*URL=\\.\\.", "", web.page))
url <- gsub("\\/latest\\/.*$", latest.build, url);
}
# Parse URL
url.root <- substring(url, 1, gregexpr("/[^/]*$", url)[[1]][1])
# Read page that contains links
web.page <- paste(readLines(url), collapse="")
# Parse links
links.raw <- regmatches(web.page, gregexpr("href=\"([^\"]*)\"", web.page))[[1]]
links.stripped <- substr(links.raw, nchar("href=\"")+1, nchar(links.raw)-1)
links <- links.stripped[grep(".tar.gz", links.stripped)]
# Convert relative URL's to absolute
ind.relative.url <- grep("http://", links, invert=TRUE)
links[ind.relative.url] <- paste(url.root, links[ind.relative.url], sep="")
if(chiptype != "all") {
links <- links[grep(chiptype, links)];
}
# if list.only == 1, only the list of available packages will returned
if(list.only == 1) {
return(links)
}
# Install each linked package
for (link in links) {
smart.install.packages(url.package=link, update=update)
}
}
smart.install.bioconductor.repo <- function(repo.index, mirror=NA, update=0) {
# check what version of Bionconductor is being used
bioc.intaller.loaded <- try(library(BiocInstaller))
if(class(bioc.intaller.loaded) == "try-error") {
smart.install.packages(bioconductor.package="BiocInstaller", mirror=mirror, update=update)
}
library(BiocInstaller)
current.bioc.version <- biocVersion()
# Install available annotation packages
current.bioc.url <- paste("http://www.bioconductor.org/packages/", current.bioc.version, "/data/annotation", sep="");
for (package in available.packages(contrib.url(current.bioc.url))[,"Package"]) {
smart.install.packages(bioconductor.package = package, mirror = mirror, update=update)
}
# Select the given repository
#orig.repos <- setRepositories(ind=c(repo.index))
#
#for (package in available.packages()[,"Package"]) {
# smart.install.packages(bioconductor.package = package, mirror = mirror, update=update)
#}
#
# Restore original repositories
#if(length(orig.repos) == 1) {
# if(orig.repos$repos["CRAN"] == "@CRAN@") {
# #orig.repos$repos["CRAN"] = "@CRAN@";
# options(repos = orig.repos);
# } else {
# setRepositories(orig.repos)
# }
#} else {
# setRepositories(orig.repos)
#}
}
smart.install.packages <- function(package=NA, bioconductor.package=NA, url.package=NA, mirror=NA, update=0) {
# Check parameters
package.defs <- c(package, bioconductor.package, url.package)
if (sum(!is.na(package.defs)) != 1) {
stop("Must use exactly one of the alternative ways to specify the package to install")
}
# Check that not already installed
if (is.na(url.package)) {
package.name <- package.defs[!is.na(package.defs)]
} else {
package.name <- gsub(".*/(.*)_.*", "\\1", url.package)
}
if(update==0) {
if (!is.installed(package.name)) {
cat(paste("Will now install", package.name, "\n"))
} else {
cat(paste("Already installed", package.name, "\n"))
return(invisible(TRUE))
}
}
if (!is.na(package)) {
repos = ifelse(is.na(mirror), getOption("repos"), mirror)
install.packages(pkgs=c(package), repos=repos)
} else if (!is.na(bioconductor.package)) {
source("http://www.bioconductor.org/biocLite.R")
if (!is.na(mirror)) {
options("BioC_mirror" = c("Mirror"=mirror))
}
biocLite(bioconductor.package, suppressUpdates=TRUE)
} else if (!is.na(url.package)) {
# Download URL to temp file, install and remove
tempfile.path <- tempfile("package", fileext=".tar.gz")
a <- try(download.file(url=url.package, destfile=tempfile.path))
if(class(a) != "try-error") {
install.packages(pkgs=c(tempfile.path), repos=NULL)
unlink(tempfile.path)
} else {
warning(paste("package", url.package, "is not valid a web-page", sep=" "))
stop("Error occurred!")
}
} else {
stop("Must specify something to install");
return(invisible(FALSE))
}
# Install was successfull
return(invisible(TRUE))
}
is.installed <- function(package) {
if(package %in% rownames(installed.packages()) == FALSE) {
sink("/dev/null") # the only way to get rid of all output (some packages don't behave)
is.installed <- suppressPackageStartupMessages(suppressWarnings(suppressMessages(require(package, character.only=TRUE, warn.conflicts=FALSE, quietly=TRUE))))
sink()
return(is.installed)
} else {
return(package %in% rownames(installed.packages()))
}
}
check.affy.customnames <- function(script.basename, db.custom.packages) {
# Find R scripts used to normalize affy chips
r.script.basename <- gsub("/admin/", "/microarray/", script.basename);
all.packages <- rownames(installed.packages())
all.r.scripts <- list.files(path=r.script.basename);
affy.r.script <- all.r.scripts[grep("norm-affy", all.r.scripts)];
# Install stringdist package. Needed for finding partial matches
smart.install.packages(package="stringdist", mirror=repo.cran)
library("stringdist");
# Polish up custom-CDF names
if(length(db.custom.packages) > 0) {
db.custom.packages <- gsub(".*/(.*)_.*", "\\1", db.custom.packages)
} else {
stop("Must specify a list of custom-CDF packages");
}
# For each affy-norm script, find code defining which custom-CDF packages are supported
supported.custom.packages <- NULL;
for(i in 1:length(affy.r.script)) {
r.script <- scan(file=file.path(r.script.basename, affy.r.script[i]), what="", sep="\n")
#Find instances in SADL descriptions
if(length(grep("PARAMETER custom\\.chiptype", r.script) > 0)) {
sadl.row <- r.script[grep("PARAMETER custom\\.chiptype", r.script)];
sadl.row <- gsub("^.*\\[", "", sadl.row);
sadl.row <- gsub("\\].*$", "", sadl.row);
packages.in.sadl.row <- unlist(strsplit(sadl.row, "\\s*,\\s*"))
for(j in 1:length(packages.in.sadl.row)) {
custom.package <- unlist(strsplit(packages.in.sadl.row[j], "\\s*:\\s*"))[2];
custom.package <- gsub("\\(.+\\)", "cdf", custom.package);
supported.custom.packages <- c(supported.custom.packages, custom.package);
}
}
#Find other instances where parameter custom_cdf has been used
if(length(grep("custom_cdf\\s*<-|custom_cdf\\s*=", r.script) > 0)) {
rscript.row <- r.script[grep("custom_cdf\\s*<-|custom_cdf\\s*=", r.script)];
rscript.row <- gsub("^.*<-|^.*=", "", rscript.row);
rscript.row <- gsub("\\s+|\"", "", rscript.row);
supported.custom.packages <- c(supported.custom.packages, rscript.row);
}
}
# Check if the package exists
for(j in 1:length(supported.custom.packages)) {
if(!(supported.custom.packages[j] %in% db.custom.packages)) {
cat(paste("Package", supported.custom.packages[j], "in", affy.r.script[i], "not found\n"));
partial.match <- db.custom.packages[ain(db.custom.packages, supported.custom.packages[j], maxDist=3)];
if(length(partial.match) > 0) {
for(k in 1:length(partial.match)) {
if(partial.match[k] %in% rownames(installed.packages()) == TRUE) {
cat(paste("\tConsider using", partial.match[k], "\n"));
} else {
cat(paste("\tConsider installing and using", partial.match[k], "\n"));
}
}
} else {
cat(paste("\tPackage", supported.custom.packages[j], "in", affy.r.script[i], "has not matches in current custom-CDF database\n"));
}
}
}
}
|
/smip.R
|
permissive
|
winni2k/r-parallel-install-packages
|
R
| false
| false
| 8,596
|
r
|
#
# SMart Install Packages (SMIP)
#
# User friendly interface into install.packages(...) and biocLite(...)
#
# All functions check existence of packages before installation and skip installation
# if the package already exists. For group of packages, each one is checked
# individually and only missing ones are installed.
#
# Function also offer more automation compared to original ones, allowing
# installation of whole repositories, scavenging web pages for packages etc.
# Usage examples:
#
#smart.install.bioconductor.repo(repo.index = 4)
#smart.install.packages(package="Matrix")
#smart.install.packages(package="Matrix", mirror="http://ftp.sunet.se/pub/lang/CRAN")
#smart.install.packages(package="foobarstatistics") # should fail
#smart.install.packages(bioconductor.package="Matrix")
#smart.install.packages(bioconductor.package="biofoobar") # should fail
#smart.install.packages(package="Matrix", bioconductor.package="biofoobar") # should fail
#smart.install.packages(url.package="http://www.math.utu.fi/projects/software/bio/ROTS_1.1.1.tar.gz")
#smart.install.scavenge.web.packages("http://brainarray.mbni.med.umich.edu/Brainarray/Database/CustomCDF/16.0.0/entrezg.asp")
smart.install.scavenge.web.packages <- function(url, chiptype="all", update=0, list.only=0) {
# Name of the static pages
if(length(grep("latest", url)) > 0) {
web.page <- paste(readLines(url), collapse="")
latest.build <- gsub("\'.*$", "", gsub("^.*URL=\\.\\.", "", web.page))
url <- gsub("\\/latest\\/.*$", latest.build, url);
}
# Parse URL
url.root <- substring(url, 1, gregexpr("/[^/]*$", url)[[1]][1])
# Read page that contains links
web.page <- paste(readLines(url), collapse="")
# Parse links
links.raw <- regmatches(web.page, gregexpr("href=\"([^\"]*)\"", web.page))[[1]]
links.stripped <- substr(links.raw, nchar("href=\"")+1, nchar(links.raw)-1)
links <- links.stripped[grep(".tar.gz", links.stripped)]
# Convert relative URL's to absolute
ind.relative.url <- grep("http://", links, invert=TRUE)
links[ind.relative.url] <- paste(url.root, links[ind.relative.url], sep="")
if(chiptype != "all") {
links <- links[grep(chiptype, links)];
}
# if list.only == 1, only the list of available packages will returned
if(list.only == 1) {
return(links)
}
# Install each linked package
for (link in links) {
smart.install.packages(url.package=link, update=update)
}
}
smart.install.bioconductor.repo <- function(repo.index, mirror=NA, update=0) {
# check what version of Bionconductor is being used
bioc.intaller.loaded <- try(library(BiocInstaller))
if(class(bioc.intaller.loaded) == "try-error") {
smart.install.packages(bioconductor.package="BiocInstaller", mirror=mirror, update=update)
}
library(BiocInstaller)
current.bioc.version <- biocVersion()
# Install available annotation packages
current.bioc.url <- paste("http://www.bioconductor.org/packages/", current.bioc.version, "/data/annotation", sep="");
for (package in available.packages(contrib.url(current.bioc.url))[,"Package"]) {
smart.install.packages(bioconductor.package = package, mirror = mirror, update=update)
}
# Select the given repository
#orig.repos <- setRepositories(ind=c(repo.index))
#
#for (package in available.packages()[,"Package"]) {
# smart.install.packages(bioconductor.package = package, mirror = mirror, update=update)
#}
#
# Restore original repositories
#if(length(orig.repos) == 1) {
# if(orig.repos$repos["CRAN"] == "@CRAN@") {
# #orig.repos$repos["CRAN"] = "@CRAN@";
# options(repos = orig.repos);
# } else {
# setRepositories(orig.repos)
# }
#} else {
# setRepositories(orig.repos)
#}
}
smart.install.packages <- function(package=NA, bioconductor.package=NA, url.package=NA, mirror=NA, update=0) {
# Check parameters
package.defs <- c(package, bioconductor.package, url.package)
if (sum(!is.na(package.defs)) != 1) {
stop("Must use exactly one of the alternative ways to specify the package to install")
}
# Check that not already installed
if (is.na(url.package)) {
package.name <- package.defs[!is.na(package.defs)]
} else {
package.name <- gsub(".*/(.*)_.*", "\\1", url.package)
}
if(update==0) {
if (!is.installed(package.name)) {
cat(paste("Will now install", package.name, "\n"))
} else {
cat(paste("Already installed", package.name, "\n"))
return(invisible(TRUE))
}
}
if (!is.na(package)) {
repos = ifelse(is.na(mirror), getOption("repos"), mirror)
install.packages(pkgs=c(package), repos=repos)
} else if (!is.na(bioconductor.package)) {
source("http://www.bioconductor.org/biocLite.R")
if (!is.na(mirror)) {
options("BioC_mirror" = c("Mirror"=mirror))
}
biocLite(bioconductor.package, suppressUpdates=TRUE)
} else if (!is.na(url.package)) {
# Download URL to temp file, install and remove
tempfile.path <- tempfile("package", fileext=".tar.gz")
a <- try(download.file(url=url.package, destfile=tempfile.path))
if(class(a) != "try-error") {
install.packages(pkgs=c(tempfile.path), repos=NULL)
unlink(tempfile.path)
} else {
warning(paste("package", url.package, "is not valid a web-page", sep=" "))
stop("Error occurred!")
}
} else {
stop("Must specify something to install");
return(invisible(FALSE))
}
# Install was successfull
return(invisible(TRUE))
}
is.installed <- function(package) {
if(package %in% rownames(installed.packages()) == FALSE) {
sink("/dev/null") # the only way to get rid of all output (some packages don't behave)
is.installed <- suppressPackageStartupMessages(suppressWarnings(suppressMessages(require(package, character.only=TRUE, warn.conflicts=FALSE, quietly=TRUE))))
sink()
return(is.installed)
} else {
return(package %in% rownames(installed.packages()))
}
}
check.affy.customnames <- function(script.basename, db.custom.packages) {
# Find R scripts used to normalize affy chips
r.script.basename <- gsub("/admin/", "/microarray/", script.basename);
all.packages <- rownames(installed.packages())
all.r.scripts <- list.files(path=r.script.basename);
affy.r.script <- all.r.scripts[grep("norm-affy", all.r.scripts)];
# Install stringdist package. Needed for finding partial matches
smart.install.packages(package="stringdist", mirror=repo.cran)
library("stringdist");
# Polish up custom-CDF names
if(length(db.custom.packages) > 0) {
db.custom.packages <- gsub(".*/(.*)_.*", "\\1", db.custom.packages)
} else {
stop("Must specify a list of custom-CDF packages");
}
# For each affy-norm script, find code defining which custom-CDF packages are supported
supported.custom.packages <- NULL;
for(i in 1:length(affy.r.script)) {
r.script <- scan(file=file.path(r.script.basename, affy.r.script[i]), what="", sep="\n")
#Find instances in SADL descriptions
if(length(grep("PARAMETER custom\\.chiptype", r.script) > 0)) {
sadl.row <- r.script[grep("PARAMETER custom\\.chiptype", r.script)];
sadl.row <- gsub("^.*\\[", "", sadl.row);
sadl.row <- gsub("\\].*$", "", sadl.row);
packages.in.sadl.row <- unlist(strsplit(sadl.row, "\\s*,\\s*"))
for(j in 1:length(packages.in.sadl.row)) {
custom.package <- unlist(strsplit(packages.in.sadl.row[j], "\\s*:\\s*"))[2];
custom.package <- gsub("\\(.+\\)", "cdf", custom.package);
supported.custom.packages <- c(supported.custom.packages, custom.package);
}
}
#Find other instances where parameter custom_cdf has been used
if(length(grep("custom_cdf\\s*<-|custom_cdf\\s*=", r.script) > 0)) {
rscript.row <- r.script[grep("custom_cdf\\s*<-|custom_cdf\\s*=", r.script)];
rscript.row <- gsub("^.*<-|^.*=", "", rscript.row);
rscript.row <- gsub("\\s+|\"", "", rscript.row);
supported.custom.packages <- c(supported.custom.packages, rscript.row);
}
}
# Check if the package exists
for(j in 1:length(supported.custom.packages)) {
if(!(supported.custom.packages[j] %in% db.custom.packages)) {
cat(paste("Package", supported.custom.packages[j], "in", affy.r.script[i], "not found\n"));
partial.match <- db.custom.packages[ain(db.custom.packages, supported.custom.packages[j], maxDist=3)];
if(length(partial.match) > 0) {
for(k in 1:length(partial.match)) {
if(partial.match[k] %in% rownames(installed.packages()) == TRUE) {
cat(paste("\tConsider using", partial.match[k], "\n"));
} else {
cat(paste("\tConsider installing and using", partial.match[k], "\n"));
}
}
} else {
cat(paste("\tPackage", supported.custom.packages[j], "in", affy.r.script[i], "has not matches in current custom-CDF database\n"));
}
}
}
}
|
## makeCacheMatrix uses a superassignment operator ( <<- ) to create a "cached" matrix
## and its inverse (m) which can be
## The first function, makeCacheMatrix creates a special "matrix", which is
## really a list containing a function to
##
## set the value of the matrix
## get the value of the matrix
## set the value of the inverse
## get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL # holds the cached matrix inverse, initialize to NULL
set <- function(y) {
x <<- y
m <<- NULL # reinitialize the the inverse if a new matrix is set
}
get <- function() x
setInverse <- function(inverse) m <<- inverse
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve
##
## The following function calculates the inverse of the special "matrix"
## created with the above function. However, it first checks to see if the inverse
## has already been calculated. If so, it gets the inverse from the cache and
## skips the computation. Otherwise, it calculates the inverse of the data and
## sets the value of the inverse in the cache via the setInverse function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get() # get the cached matrix
m <- solve(data, ...) # returns the inverse of data (matrix)
x$setInverse(m) # stores the inverse in the cache variable
m # returns the inverse matrix
# can also be retrieved by x$getInverse()
}
# Usage might be:
#
# test <- makeCacheMatrix(test)
# test$set(matrix(rnorm(100),10,10))
# test$get()
#
# itest <- cacheSolve(test)
# class(itest) # returned as matrix()
# class(test) # class is a list "CacheMatrix"
#
# itest %*% test$get() # test$get() returns a matrix()
|
/cachematrix.R
|
no_license
|
bbneo/ProgrammingAssignment2
|
R
| false
| false
| 2,074
|
r
|
## makeCacheMatrix uses a superassignment operator ( <<- ) to create a "cached" matrix
## and its inverse (m) which can be
## The first function, makeCacheMatrix creates a special "matrix", which is
## really a list containing a function to
##
## set the value of the matrix
## get the value of the matrix
## set the value of the inverse
## get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL # holds the cached matrix inverse, initialize to NULL
set <- function(y) {
x <<- y
m <<- NULL # reinitialize the the inverse if a new matrix is set
}
get <- function() x
setInverse <- function(inverse) m <<- inverse
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve
##
## The following function calculates the inverse of the special "matrix"
## created with the above function. However, it first checks to see if the inverse
## has already been calculated. If so, it gets the inverse from the cache and
## skips the computation. Otherwise, it calculates the inverse of the data and
## sets the value of the inverse in the cache via the setInverse function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get() # get the cached matrix
m <- solve(data, ...) # returns the inverse of data (matrix)
x$setInverse(m) # stores the inverse in the cache variable
m # returns the inverse matrix
# can also be retrieved by x$getInverse()
}
# Usage might be:
#
# test <- makeCacheMatrix(test)
# test$set(matrix(rnorm(100),10,10))
# test$get()
#
# itest <- cacheSolve(test)
# class(itest) # returned as matrix()
# class(test) # class is a list "CacheMatrix"
#
# itest %*% test$get() # test$get() returns a matrix()
|
# http://www.capitalspectator.com/
# A Momentum-Based Trading Signal With Strategic Value
# load packages
library("TTR")
library("xts")
library("quantmod")
# download daily S&P 500 prices from Dec 31, 1990 forward
gspc <- getSymbols("^gspc", from = "1990-12-31", auto.assign = FALSE)
# error adjusted momentum function
eam <- function(x, y, z) {
# x=ticker, y=lookback period for forecast, z=SMA period
a <- na.omit(ROC(Cl(x), 1, "discrete"))
print(paste("a", length(a)))
# forecast based on 'y' trailing period returns
b <- na.omit(SMA(a, y))
print(paste("b", length(b)))
# lag forecasts by 1 period
c <- na.omit(Lag(b, k = 1))
print(paste("c", length(c)))
# combine lagged forecasts with actual returns into one file
d <- na.omit(cbind(c, a))
print(paste("d", length(d)))
# actual daily return less forecast
e <- as.xts(apply(d, 1, diff))
print(paste("e", length(e)))
# mean absolute error
f <- to.daily(na.omit(rollapplyr(e, y, function(x) mean(abs(x)))),
drop.time = FALSE, OHLC = FALSE)
print(paste("f", length(f)))
# combine actual return with MAE into one file
g <- cbind(a, f)
print(paste("g", length(g)))
# divide actual return by MAE
h <- na.omit(a[, 1]/g[, 2])
print(paste("h", length(h)))
# generate 200-day moving average of adjusted return
i <- na.omit(SMA(h, z))
print(paste("i", length(i)))
# lag adjusted return signal by one day for trading analysis
j <- na.omit(Lag(ifelse(i > 0, 1, 0)))
}
eam.sig <- eam(gspc, 10, 200)
# function to generate raw EAM signal data
eam.ret <- function(x, y, z) {
# x = ticker, y = lookback period for vol forecast, z = SMA period
a <- eam(x, y, z)
# print("a")
# print(head(a))
# print(length(a))
b <- na.omit(ROC(Cl(x), 1, "discrete"))
# print("b")
# print(head(b))
# print(length(b))
c <- length(a) - 1
# print("c")
# print(head(c))
# print(length(c))
d <- tail(b, c)
# print("d")
# print(head(d))
# print(length(d))
e <- d * a
# print("e")
# print(head(e))
# print(length(e))
f <- cumprod(c(100, 1 + e))
# print("f")
# print(head(f))
# print(length(f))
g <- tail(b, c)
# print("g")
# print(head(g))
# print(length(g))
h <- cumprod(c(100, 1 + g))
# print("h")
# print(head(h))
# print(length(h))
i <- cbind(f, h)
colnames(i) <- c("model", "asset")
# print("i")
# print(head(i))
# print(length(i))
date.a <- c((first(tail((as.Date(index(x))), c)) - 1), (tail((as.Date(index(x))),
c)))
# print("date.a")
# print(head(date.a))
# print(length(date.a))
j <- xts(i, date.a)
# print("j")
# print(head(j))
# print(length(j))
return(j)
}
eam.model <- eam.ret(gspc, 10, 200)
eam.data <- function(x, y, z) {
# x=ticker, y=lookback period for forecast, z=SMA period
a <- na.omit(ROC(Cl(x), 1, "discrete"))
b <- na.omit(SMA(a, y)) # forecast based on 'y' trailing period returns
c <- na.omit(Lag(b, k = 1)) # lag forecasts by 1 period
d <- na.omit(cbind(c, a))
e <- as.xts(apply(d, 1, diff))
f <- to.daily(na.omit(rollapplyr(e, y, function(x) mean(abs(x)))), drop.time = TRUE,
OHLC = FALSE)
g <- cbind(a, f)
h <- na.omit(a[, 1]/g[, 2])
i <- na.omit(SMA(h, z))
colnames(i) <- c("eam data")
return(i)
}
eam.data.history <- eam.data(gspc, 10, 200)
|
/TODO-eam-momentum.R
|
no_license
|
PsaksuMeRap/r-misc
|
R
| false
| false
| 3,383
|
r
|
# http://www.capitalspectator.com/
# A Momentum-Based Trading Signal With Strategic Value
# load packages
library("TTR")
library("xts")
library("quantmod")
# download daily S&P 500 prices from Dec 31, 1990 forward
gspc <- getSymbols("^gspc", from = "1990-12-31", auto.assign = FALSE)
# error adjusted momentum function
eam <- function(x, y, z) {
# x=ticker, y=lookback period for forecast, z=SMA period
a <- na.omit(ROC(Cl(x), 1, "discrete"))
print(paste("a", length(a)))
# forecast based on 'y' trailing period returns
b <- na.omit(SMA(a, y))
print(paste("b", length(b)))
# lag forecasts by 1 period
c <- na.omit(Lag(b, k = 1))
print(paste("c", length(c)))
# combine lagged forecasts with actual returns into one file
d <- na.omit(cbind(c, a))
print(paste("d", length(d)))
# actual daily return less forecast
e <- as.xts(apply(d, 1, diff))
print(paste("e", length(e)))
# mean absolute error
f <- to.daily(na.omit(rollapplyr(e, y, function(x) mean(abs(x)))),
drop.time = FALSE, OHLC = FALSE)
print(paste("f", length(f)))
# combine actual return with MAE into one file
g <- cbind(a, f)
print(paste("g", length(g)))
# divide actual return by MAE
h <- na.omit(a[, 1]/g[, 2])
print(paste("h", length(h)))
# generate 200-day moving average of adjusted return
i <- na.omit(SMA(h, z))
print(paste("i", length(i)))
# lag adjusted return signal by one day for trading analysis
j <- na.omit(Lag(ifelse(i > 0, 1, 0)))
}
eam.sig <- eam(gspc, 10, 200)
# function to generate raw EAM signal data
eam.ret <- function(x, y, z) {
# x = ticker, y = lookback period for vol forecast, z = SMA period
a <- eam(x, y, z)
# print("a")
# print(head(a))
# print(length(a))
b <- na.omit(ROC(Cl(x), 1, "discrete"))
# print("b")
# print(head(b))
# print(length(b))
c <- length(a) - 1
# print("c")
# print(head(c))
# print(length(c))
d <- tail(b, c)
# print("d")
# print(head(d))
# print(length(d))
e <- d * a
# print("e")
# print(head(e))
# print(length(e))
f <- cumprod(c(100, 1 + e))
# print("f")
# print(head(f))
# print(length(f))
g <- tail(b, c)
# print("g")
# print(head(g))
# print(length(g))
h <- cumprod(c(100, 1 + g))
# print("h")
# print(head(h))
# print(length(h))
i <- cbind(f, h)
colnames(i) <- c("model", "asset")
# print("i")
# print(head(i))
# print(length(i))
date.a <- c((first(tail((as.Date(index(x))), c)) - 1), (tail((as.Date(index(x))),
c)))
# print("date.a")
# print(head(date.a))
# print(length(date.a))
j <- xts(i, date.a)
# print("j")
# print(head(j))
# print(length(j))
return(j)
}
eam.model <- eam.ret(gspc, 10, 200)
eam.data <- function(x, y, z) {
# x=ticker, y=lookback period for forecast, z=SMA period
a <- na.omit(ROC(Cl(x), 1, "discrete"))
b <- na.omit(SMA(a, y)) # forecast based on 'y' trailing period returns
c <- na.omit(Lag(b, k = 1)) # lag forecasts by 1 period
d <- na.omit(cbind(c, a))
e <- as.xts(apply(d, 1, diff))
f <- to.daily(na.omit(rollapplyr(e, y, function(x) mean(abs(x)))), drop.time = TRUE,
OHLC = FALSE)
g <- cbind(a, f)
h <- na.omit(a[, 1]/g[, 2])
i <- na.omit(SMA(h, z))
colnames(i) <- c("eam data")
return(i)
}
eam.data.history <- eam.data(gspc, 10, 200)
|
#' Recombine.
#'
#' Carry out crossover of parent chromosomes in a mating pool.
#'
#' @param pop_mating Matrix of population of chromosomes that form the mating pool.
#' @param pop_size Integer; Number of chromosomes in a generation.
#' @param method String; "onepoint", "twopoint", "uniform" (default);
#' Type of crossover, at one point, at two points or uniformly (at all possible points).
#' @param prob_recombine Numeric, between 0 and 1; Default is 0.6;
#' Probability of recombination.
#' @param do_parallel Logical; Default FALSE; Do in parallel?
#' @return Matrix of population of chromosomes resulting from recombination.
recombine <- function(
pop_mating,
pop_size,
method = "uniform",
prob_recombine = 0.6,
do_parallel = FALSE
) {
stopifnot(is.matrix(pop_mating))
stopifnot(all(c(pop_mating) %in% c(0, 1)))
stopifnot(is.numeric(pop_size))
stopifnot(method %in% c("onepoint", "twopoint", "uniform"))
stopifnot(is.logical(do_parallel))
stopifnot(is.numeric(prob_recombine))
stopifnot(prob_recombine >= 0 & prob_recombine <= 1)
do_recombine <- as.logical(rbinom(pop_size, 1, prob_recombine))
if (do_parallel) {
pop_new <- foreach (i = 1:pop_size, .combine = rbind) %dopar% {
indices_parents <- sample(1:nrow(pop_mating), size = 2)
if (do_recombine[i]) {
recombine_once(parent1 = pop_mating[indices_parents[1], ],
parent2 = pop_mating[indices_parents[2], ],
method = method)
} else {
pop_mating[indices_parents[1], ]
}
}
} else {
pop_new <- matrix(NA, pop_size, ncol(pop_mating))
for (i in 1:pop_size) {
indices_parents <- sample(1:nrow(pop_mating), size = 2)
if (do_recombine[i]) {
pop_new[i, ] <- recombine_once(parent1 = pop_mating[indices_parents[1], ],
parent2 = pop_mating[indices_parents[2], ],
method = method)
} else {
pop_new[i, ] <- pop_mating[indices_parents[1], ]
}
}
}
return(pop_new)
}
#' Recombine once.
#'
#' Carry out crossover of two parent chromosomes to produce one child chromosome.
#'
#' @param parent1 Integer vector of 1st parent chromosome containing 1's and 0's.
#' @param parent2 Integer vector of 2nd parent chromosome containing 1's and 0's.
#' @param method String; "onepoint", "twopoint", "uniform" (default);
#' Type of crossover, at one point, at two points or uniformly (at all possible points).
#' @return Integer vector of child chromosome containing 1's and 0's.
recombine_once <- function(
parent1,
parent2,
method = "uniform"
) {
if (all(parent1 == parent2)) {
# Crossover not necessary (has no effect) if both parents are identical
child <- parent1
} else {
num_genes <- length(parent1)
if (method == "onepoint") {
breakpoint <- sample(1:num_genes, size = 1)
if (breakpoint == num_genes) {
child <- parent1
} else {
child <- c(parent1[1:breakpoint], parent2[(breakpoint + 1):num_genes])
}
} else if (method == "twopoint") {
breakpoints <- sort(sample(1:num_genes, size = 2))
if (breakpoints[2] == num_genes) {
child <- c(parent1[1:breakpoints[1]], parent2[(breakpoints[1] + 1):breakpoints[2]])
} else {
child <- c(parent1[1:breakpoints[1]], parent2[(breakpoints[1] + 1):breakpoints[2]],
parent1[(breakpoints[2] + 1):num_genes])
}
} else if (method == "uniform") {
child <- ifelse(rbinom(num_genes, 1, 0.5) == 1, parent1, parent2)
}
}
return(child)
}
|
/R/recombine.R
|
no_license
|
jrnew/genetic-algo
|
R
| false
| false
| 3,620
|
r
|
#' Recombine.
#'
#' Carry out crossover of parent chromosomes in a mating pool.
#'
#' @param pop_mating Matrix of population of chromosomes that form the mating pool.
#' @param pop_size Integer; Number of chromosomes in a generation.
#' @param method String; "onepoint", "twopoint", "uniform" (default);
#' Type of crossover, at one point, at two points or uniformly (at all possible points).
#' @param prob_recombine Numeric, between 0 and 1; Default is 0.6;
#' Probability of recombination.
#' @param do_parallel Logical; Default FALSE; Do in parallel?
#' @return Matrix of population of chromosomes resulting from recombination.
recombine <- function(
pop_mating,
pop_size,
method = "uniform",
prob_recombine = 0.6,
do_parallel = FALSE
) {
stopifnot(is.matrix(pop_mating))
stopifnot(all(c(pop_mating) %in% c(0, 1)))
stopifnot(is.numeric(pop_size))
stopifnot(method %in% c("onepoint", "twopoint", "uniform"))
stopifnot(is.logical(do_parallel))
stopifnot(is.numeric(prob_recombine))
stopifnot(prob_recombine >= 0 & prob_recombine <= 1)
do_recombine <- as.logical(rbinom(pop_size, 1, prob_recombine))
if (do_parallel) {
pop_new <- foreach (i = 1:pop_size, .combine = rbind) %dopar% {
indices_parents <- sample(1:nrow(pop_mating), size = 2)
if (do_recombine[i]) {
recombine_once(parent1 = pop_mating[indices_parents[1], ],
parent2 = pop_mating[indices_parents[2], ],
method = method)
} else {
pop_mating[indices_parents[1], ]
}
}
} else {
pop_new <- matrix(NA, pop_size, ncol(pop_mating))
for (i in 1:pop_size) {
indices_parents <- sample(1:nrow(pop_mating), size = 2)
if (do_recombine[i]) {
pop_new[i, ] <- recombine_once(parent1 = pop_mating[indices_parents[1], ],
parent2 = pop_mating[indices_parents[2], ],
method = method)
} else {
pop_new[i, ] <- pop_mating[indices_parents[1], ]
}
}
}
return(pop_new)
}
#' Recombine once.
#'
#' Carry out crossover of two parent chromosomes to produce one child chromosome.
#'
#' @param parent1 Integer vector of 1st parent chromosome containing 1's and 0's.
#' @param parent2 Integer vector of 2nd parent chromosome containing 1's and 0's.
#' @param method String; "onepoint", "twopoint", "uniform" (default);
#' Type of crossover, at one point, at two points or uniformly (at all possible points).
#' @return Integer vector of child chromosome containing 1's and 0's.
recombine_once <- function(
parent1,
parent2,
method = "uniform"
) {
if (all(parent1 == parent2)) {
# Crossover not necessary (has no effect) if both parents are identical
child <- parent1
} else {
num_genes <- length(parent1)
if (method == "onepoint") {
breakpoint <- sample(1:num_genes, size = 1)
if (breakpoint == num_genes) {
child <- parent1
} else {
child <- c(parent1[1:breakpoint], parent2[(breakpoint + 1):num_genes])
}
} else if (method == "twopoint") {
breakpoints <- sort(sample(1:num_genes, size = 2))
if (breakpoints[2] == num_genes) {
child <- c(parent1[1:breakpoints[1]], parent2[(breakpoints[1] + 1):breakpoints[2]])
} else {
child <- c(parent1[1:breakpoints[1]], parent2[(breakpoints[1] + 1):breakpoints[2]],
parent1[(breakpoints[2] + 1):num_genes])
}
} else if (method == "uniform") {
child <- ifelse(rbinom(num_genes, 1, 0.5) == 1, parent1, parent2)
}
}
return(child)
}
|
#Install packages
install.packages ("data.table")
install.packages ("stringr")
###PART ONE
#This is the labels for each activity
labels<-read.table("./UCI HAR Dataset/UCI HAR Dataset/activity_labels.txt", col.names=c("labelcode", "label"))
#getting the features
features<-read.table("./UCI HAR Dataset/UCI HAR Dataset/features.txt")
#filtering the features
filter_features<-grep("mean\\(|std\\(", features[, 2]) #tAKES ONLY VALUES WITH MEAN OR STD AS INSTRUCTGED
#READING IN The X_TEST DATASET
xtest<-read.table("./UCI HAR Dataset/UCI HAR Dataset/test/X_test.txt", col.names = features[,2], check.names=FALSE)
xtest<-xtest[, filter_features]
#READING IN Y_TEST AND SUBJECT_TEST
testlabel<-read.table("./UCI HAR Dataset/UCI HAR Dataset/test/y_test.txt", col.names="labelcode")
testsubject<-read.table("./UCI HAR Dataset/UCI HAR Dataset/test/subject_test.txt", col.names = "subject")
#BINDING ALL 3 DATASETS TOGETHER
ttest<-cbind(testlabel, testsubject, xtest)
####PART TWO
#Reading in all the data for training
trainsubject<-read.table("./UCI HAR Dataset/UCI HAR Dataset/train/subject_train.txt", col.names="subject")
xtrain<-read.table("./UCI HAR Dataset/UCI HAR Dataset/train/X_train.txt",col.names=features[,2], check.names=FALSE)
xtrain<-xtrain[, filter_features] #Only want features that have mean/sd
trainlabel<-read.table("./UCI HAR Dataset/UCI HAR Dataset/train/y_train.txt", col.names="labelcode")
trainsubject<-read.table("./UCI HAR Dataset/UCI HAR Dataset/train/subject_train.txt", col.names="subject")
ttrain<-cbind(trainlabel, trainsubject, xtrain) #BINDING ALL TRAINING DATASET TOGETHER
###PART 3
#Going to merge both data sets now
total<-rbind(ttest, ttrain)
#Giving the labelcodes an actual activity name/description
total$labelcode<-factor(total$labelcode, levels=labels$labelcode, label=labels$label)
total<-total[,-2] #Removing one column that is an overlap from merging the datasets
###PART 4
#reshaping the array for a tidy data set then saving it
library(reshape2)
meltdata<-melt(total, id=c("labelcode", "subject"))
## produce the tidy dataset with mean/average of each variable
## for each activyt and each subject
tidydata <- dcast(meltdata, labelcode + subject ~ variable, mean)
## write tidy dataset to hard drive
write.table(tidydata, file="tidydata.txt", quote=FALSE, row.names=FALSE, sep="\t")
|
/run_analysis.R
|
no_license
|
aho-git/Getting
|
R
| false
| false
| 2,348
|
r
|
#Install packages
install.packages ("data.table")
install.packages ("stringr")
###PART ONE
#This is the labels for each activity
labels<-read.table("./UCI HAR Dataset/UCI HAR Dataset/activity_labels.txt", col.names=c("labelcode", "label"))
#getting the features
features<-read.table("./UCI HAR Dataset/UCI HAR Dataset/features.txt")
#filtering the features
filter_features<-grep("mean\\(|std\\(", features[, 2]) #tAKES ONLY VALUES WITH MEAN OR STD AS INSTRUCTGED
#READING IN The X_TEST DATASET
xtest<-read.table("./UCI HAR Dataset/UCI HAR Dataset/test/X_test.txt", col.names = features[,2], check.names=FALSE)
xtest<-xtest[, filter_features]
#READING IN Y_TEST AND SUBJECT_TEST
testlabel<-read.table("./UCI HAR Dataset/UCI HAR Dataset/test/y_test.txt", col.names="labelcode")
testsubject<-read.table("./UCI HAR Dataset/UCI HAR Dataset/test/subject_test.txt", col.names = "subject")
#BINDING ALL 3 DATASETS TOGETHER
ttest<-cbind(testlabel, testsubject, xtest)
####PART TWO
#Reading in all the data for training
trainsubject<-read.table("./UCI HAR Dataset/UCI HAR Dataset/train/subject_train.txt", col.names="subject")
xtrain<-read.table("./UCI HAR Dataset/UCI HAR Dataset/train/X_train.txt",col.names=features[,2], check.names=FALSE)
xtrain<-xtrain[, filter_features] #Only want features that have mean/sd
trainlabel<-read.table("./UCI HAR Dataset/UCI HAR Dataset/train/y_train.txt", col.names="labelcode")
trainsubject<-read.table("./UCI HAR Dataset/UCI HAR Dataset/train/subject_train.txt", col.names="subject")
ttrain<-cbind(trainlabel, trainsubject, xtrain) #BINDING ALL TRAINING DATASET TOGETHER
###PART 3
#Going to merge both data sets now
total<-rbind(ttest, ttrain)
#Giving the labelcodes an actual activity name/description
total$labelcode<-factor(total$labelcode, levels=labels$labelcode, label=labels$label)
total<-total[,-2] #Removing one column that is an overlap from merging the datasets
###PART 4
#reshaping the array for a tidy data set then saving it
library(reshape2)
meltdata<-melt(total, id=c("labelcode", "subject"))
## produce the tidy dataset with mean/average of each variable
## for each activyt and each subject
tidydata <- dcast(meltdata, labelcode + subject ~ variable, mean)
## write tidy dataset to hard drive
write.table(tidydata, file="tidydata.txt", quote=FALSE, row.names=FALSE, sep="\t")
|
i = 31
library(isoform, lib.loc="/nas02/home/w/e/weisun/R/Rlibs/")
bedFile = "/nas02/home/w/e/weisun/research/data/human/Homo_sapiens.GRCh37.66.nonoverlap.exon.bed"
setwd("/lustre/scr/w/e/weisun/TCGA/bam/")
cmd = "ls *_asCounts_hetSNP_EA_hap1.bam"
ffs = system(cmd, intern=TRUE)
length(ffs)
head(ffs)
sams = gsub("_asCounts_hetSNP_EA_hap1.bam", "", ffs)
sam1 = sams[i]
cat(i, sam1, date(), "\n")
bamFile = ffs[i]
outFile = sprintf("%s_asCounts_hap1.txt", sam1)
countReads(bamFile, bedFile, outFile)
bamFile = gsub("_hap1", "_hap2", ffs[i], fixed=TRUE)
outFile = sprintf("%s_asCounts_hap2.txt", sam1)
countReads(bamFile, bedFile, outFile)
|
/data_preparation/R_batch3/_step3/step3_countReads_EA.30.R
|
no_license
|
jasa-acs/Mapping-Tumor-Specific-Expression-QTLs-in-Impure-Tumor-Samples
|
R
| false
| false
| 650
|
r
|
i = 31
library(isoform, lib.loc="/nas02/home/w/e/weisun/R/Rlibs/")
bedFile = "/nas02/home/w/e/weisun/research/data/human/Homo_sapiens.GRCh37.66.nonoverlap.exon.bed"
setwd("/lustre/scr/w/e/weisun/TCGA/bam/")
cmd = "ls *_asCounts_hetSNP_EA_hap1.bam"
ffs = system(cmd, intern=TRUE)
length(ffs)
head(ffs)
sams = gsub("_asCounts_hetSNP_EA_hap1.bam", "", ffs)
sam1 = sams[i]
cat(i, sam1, date(), "\n")
bamFile = ffs[i]
outFile = sprintf("%s_asCounts_hap1.txt", sam1)
countReads(bamFile, bedFile, outFile)
bamFile = gsub("_hap1", "_hap2", ffs[i], fixed=TRUE)
outFile = sprintf("%s_asCounts_hap2.txt", sam1)
countReads(bamFile, bedFile, outFile)
|
#Load the required packages
library(tm)
library(wordcloud)
library(RColorBrewer)
#Load the required Data
setwd('~/Coding/R/RahulArnab/')
rawData = read.csv('RahulGandhiArnab.csv')
write.csv(rawData,'InterviewData.csv')
#Initializing the variable to store their respective conversation
RahulData = ''
ArnabData = ''
for(i in rawData[,1]){
if(length(grep('^Rahul:',i)) > 0){
RahulData <- c(RahulData,i)
} else if(length(grep('^Arnab:',i)) > 0 ){
ArnabData <- c(ArnabData,i)
}
}
#World cloud generating function. Also provides the top 5 words
wordcloudGenerator <- function(name,conversation,stopwords){
# build a corpus
mydata.corpus <- Corpus(VectorSource(conversation))
# make each letter lowercase
mydata.corpus <- tm_map(mydata.corpus, tolower)
mydata.corpus <- tm_map(mydata.corpus, removeNumbers)
# remove punctuation
mydata.corpus <- tm_map(mydata.corpus, removePunctuation)
# remove generic and custom stopwords
my_stopwords <- c(stopwords('english'),stopwords)
mydata.corpus <- tm_map(mydata.corpus, removeWords, my_stopwords)
# build a term-document matrix
tdm <- TermDocumentMatrix(mydata.corpus)
tdm <- removeSparseTerms(tdm, sparse=0.999)
m <- as.matrix(tdm)
v <- sort(rowSums(m),decreasing=TRUE)
d <- data.frame(word = names(v),freq=v)
pal <- brewer.pal(9, "PuBuGn")
pal <- pal[-(1:2)]
png(paste(name,"wordcloud.png",sep=""), width=1280,height=800)
wordcloud(d$word,d$freq, colors=pal)
dev.off()
print(d[1:5,])
}
#Generate their repective word clouds
stopwords <- c( 'rahul','arnab','going','gandhi','want','question','can','say','will','said','narendra')
wordcloudGenerator('Arnab',ArnabData,stopwords)
wordcloudGenerator('Rahul',RahulData,stopwords)
|
/RahulGandhi.R
|
no_license
|
samzer/rahularnabWorldCloud
|
R
| false
| false
| 1,760
|
r
|
#Load the required packages
library(tm)
library(wordcloud)
library(RColorBrewer)
#Load the required Data
setwd('~/Coding/R/RahulArnab/')
rawData = read.csv('RahulGandhiArnab.csv')
write.csv(rawData,'InterviewData.csv')
#Initializing the variable to store their respective conversation
RahulData = ''
ArnabData = ''
for(i in rawData[,1]){
if(length(grep('^Rahul:',i)) > 0){
RahulData <- c(RahulData,i)
} else if(length(grep('^Arnab:',i)) > 0 ){
ArnabData <- c(ArnabData,i)
}
}
#World cloud generating function. Also provides the top 5 words
wordcloudGenerator <- function(name,conversation,stopwords){
# build a corpus
mydata.corpus <- Corpus(VectorSource(conversation))
# make each letter lowercase
mydata.corpus <- tm_map(mydata.corpus, tolower)
mydata.corpus <- tm_map(mydata.corpus, removeNumbers)
# remove punctuation
mydata.corpus <- tm_map(mydata.corpus, removePunctuation)
# remove generic and custom stopwords
my_stopwords <- c(stopwords('english'),stopwords)
mydata.corpus <- tm_map(mydata.corpus, removeWords, my_stopwords)
# build a term-document matrix
tdm <- TermDocumentMatrix(mydata.corpus)
tdm <- removeSparseTerms(tdm, sparse=0.999)
m <- as.matrix(tdm)
v <- sort(rowSums(m),decreasing=TRUE)
d <- data.frame(word = names(v),freq=v)
pal <- brewer.pal(9, "PuBuGn")
pal <- pal[-(1:2)]
png(paste(name,"wordcloud.png",sep=""), width=1280,height=800)
wordcloud(d$word,d$freq, colors=pal)
dev.off()
print(d[1:5,])
}
#Generate their repective word clouds
stopwords <- c( 'rahul','arnab','going','gandhi','want','question','can','say','will','said','narendra')
wordcloudGenerator('Arnab',ArnabData,stopwords)
wordcloudGenerator('Rahul',RahulData,stopwords)
|
# Script for analyzing DDE game data.
# Adam Morris
# 8/16/2014
# Get data
data = read.csv("Parsed.csv");
data_crits = data[data$Crits==1,];
data_crits_incog = data[data$Crits_incog==1,];
# Create tosslist
DDEModel_noncrits <- glmer(Choice~MB_X+MB_Y+MF_X+MF_Y+(1+MB_X+MB_Y+MF_X+MF_Y|Subj),family=binomial,data=data);
DDEModel_noncrits_coef = coef(DDEModel_noncrits);
slopes_MBX = DDEModel_noncrits_coef$Subj$MB_X;
slopes_MBY = DDEModel_noncrits_coef$Subj$MB_Y;
slopes_MFX = DDEModel_noncrits_coef$Subj$MF_X;
slopes_MFY = DDEModel_noncrits_coef$Subj$MF_Y;
slopes_MB = -slopes_MBX+slopes_MBY;
slopes_MF = -slopes_MFX+slopes_MFY;
tosslist = (slopes_MB < quantile(slopes_MB,.2,type=1) & slopes_MF < quantile(slopes_MF,.2,type=1));
#tosslist = (slopes_MB < quantile(slopes_MB,.2,type=1));
tosslist <- as.numeric(row.names(DDEModel_noncrits_coef$Subj)[tosslist]);
tosslist_crits <- matrix(FALSE,length(data_crits$Subj),1);
for (i in 1:length(data_crits$Subj)) {
if (any(data_crits$Subj[i]==tosslist)) tosslist_crits[i]=TRUE;
}
tosslist_crits_incog <- matrix(FALSE,length(data_crits_incog$Subj),1);
for (i in 1:length(data_crits_incog$Subj)) {
if (any(data_crits_incog$Subj[i]==tosslist)) tosslist_crits_incog[i]=TRUE;
}
#tosslist_crits_dummy <- matrix(FALSE,length(data_crits_dummy$Subj),1);
#for (i in 1:length(data_crits_dummy$Subj)) {
# if (any(data_crits_dummy$Subj[i]==tosslist)) tosslist_crits_dummy[i]=TRUE;
#}
#tosslist_crits_dummy_incog <- matrix(FALSE,length(data_crits_dummy_incog$Subj),1);
#for (i in 1:length(data_crits_dummy_incog$Subj)) {
# if (any(data_crits_dummy_incog$Subj[i]==tosslist)) tosslist_crits_dummy_incog[i]=TRUE;
#}
# Do models
model_crits = glmer(Choice~MFonMB+(1+MFonMB|Subj),family=binomial,data=data_crits[!tosslist_crits,],control=glmerControl(optimizer="bobyqa"));
model_crits_null = glmer(Choice~1+(1|Subj),family=binomial,data=data_crits[!tosslist_crits,],control=glmerControl(optimizer="bobyqa"));
model_crits_incog = glmer(Choice~MFonMB+(1+MFonMB|Subj),family=binomial,data=data_crits_incog[!tosslist_crits_incog,],control=glmerControl(optimizer="bobyqa"));
model_crits_incog_null = glmer(Choice~1+(1|Subj),family=binomial,data=data_crits_incog[!tosslist_crits_incog,],control=glmerControl(optimizer="bobyqa"));
model_crits_notoss = glmer(Choice~MFonMB+(1+MFonMB|Subj),family=binomial,data=data_crits,control=glmerControl(optimizer="bobyqa"));
model_crits_notoss_coef = coef(model_crits_notoss);
slopes_MFonMB = model_crits_notoss_coef$Subj$MFonMB;
test <- glm(slopes_MFonMB~slopes_MB*slopes_MF);
model_crits_roundnum = glmer(Choice~MFonMB*RoundNum+(1+MFonMB*RoundNum|Subj),family=binomial,data=data_crits[!tosslist_crits,],control=glmerControl(optimizer="bobyqa"));
# Check convergence
relgrad <- with(model_all_rp @ optinfo$derivs,solve(Hessian,gradient));
max(abs(relgrad))
|
/Game/Data/Old/Round 5/Analyze.R
|
no_license
|
adammmorris/DDE
|
R
| false
| false
| 2,823
|
r
|
# Script for analyzing DDE game data.
# Adam Morris
# 8/16/2014
# Get data
data = read.csv("Parsed.csv");
data_crits = data[data$Crits==1,];
data_crits_incog = data[data$Crits_incog==1,];
# Create tosslist
DDEModel_noncrits <- glmer(Choice~MB_X+MB_Y+MF_X+MF_Y+(1+MB_X+MB_Y+MF_X+MF_Y|Subj),family=binomial,data=data);
DDEModel_noncrits_coef = coef(DDEModel_noncrits);
slopes_MBX = DDEModel_noncrits_coef$Subj$MB_X;
slopes_MBY = DDEModel_noncrits_coef$Subj$MB_Y;
slopes_MFX = DDEModel_noncrits_coef$Subj$MF_X;
slopes_MFY = DDEModel_noncrits_coef$Subj$MF_Y;
slopes_MB = -slopes_MBX+slopes_MBY;
slopes_MF = -slopes_MFX+slopes_MFY;
tosslist = (slopes_MB < quantile(slopes_MB,.2,type=1) & slopes_MF < quantile(slopes_MF,.2,type=1));
#tosslist = (slopes_MB < quantile(slopes_MB,.2,type=1));
tosslist <- as.numeric(row.names(DDEModel_noncrits_coef$Subj)[tosslist]);
tosslist_crits <- matrix(FALSE,length(data_crits$Subj),1);
for (i in 1:length(data_crits$Subj)) {
if (any(data_crits$Subj[i]==tosslist)) tosslist_crits[i]=TRUE;
}
tosslist_crits_incog <- matrix(FALSE,length(data_crits_incog$Subj),1);
for (i in 1:length(data_crits_incog$Subj)) {
if (any(data_crits_incog$Subj[i]==tosslist)) tosslist_crits_incog[i]=TRUE;
}
#tosslist_crits_dummy <- matrix(FALSE,length(data_crits_dummy$Subj),1);
#for (i in 1:length(data_crits_dummy$Subj)) {
# if (any(data_crits_dummy$Subj[i]==tosslist)) tosslist_crits_dummy[i]=TRUE;
#}
#tosslist_crits_dummy_incog <- matrix(FALSE,length(data_crits_dummy_incog$Subj),1);
#for (i in 1:length(data_crits_dummy_incog$Subj)) {
# if (any(data_crits_dummy_incog$Subj[i]==tosslist)) tosslist_crits_dummy_incog[i]=TRUE;
#}
# Do models
model_crits = glmer(Choice~MFonMB+(1+MFonMB|Subj),family=binomial,data=data_crits[!tosslist_crits,],control=glmerControl(optimizer="bobyqa"));
model_crits_null = glmer(Choice~1+(1|Subj),family=binomial,data=data_crits[!tosslist_crits,],control=glmerControl(optimizer="bobyqa"));
model_crits_incog = glmer(Choice~MFonMB+(1+MFonMB|Subj),family=binomial,data=data_crits_incog[!tosslist_crits_incog,],control=glmerControl(optimizer="bobyqa"));
model_crits_incog_null = glmer(Choice~1+(1|Subj),family=binomial,data=data_crits_incog[!tosslist_crits_incog,],control=glmerControl(optimizer="bobyqa"));
model_crits_notoss = glmer(Choice~MFonMB+(1+MFonMB|Subj),family=binomial,data=data_crits,control=glmerControl(optimizer="bobyqa"));
model_crits_notoss_coef = coef(model_crits_notoss);
slopes_MFonMB = model_crits_notoss_coef$Subj$MFonMB;
test <- glm(slopes_MFonMB~slopes_MB*slopes_MF);
model_crits_roundnum = glmer(Choice~MFonMB*RoundNum+(1+MFonMB*RoundNum|Subj),family=binomial,data=data_crits[!tosslist_crits,],control=glmerControl(optimizer="bobyqa"));
# Check convergence
relgrad <- with(model_all_rp @ optinfo$derivs,solve(Hessian,gradient));
max(abs(relgrad))
|
library(pacman)
### Name: p_install
### Title: Installs & Loads Packages
### Aliases: p_install p_get
### Keywords: install package
### ** Examples
## Not run: p_install(pacman)
|
/data/genthat_extracted_code/pacman/examples/p_install.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 185
|
r
|
library(pacman)
### Name: p_install
### Title: Installs & Loads Packages
### Aliases: p_install p_get
### Keywords: install package
### ** Examples
## Not run: p_install(pacman)
|
press <- function(lm){
X <- model.matrix(lm)
H <- X%*%(solve((t(X)) %*% (X))) %*% t(X)
e.i <- resid(lm)
sum((e.i/(1-diag(H)))^2)
}
|
/asbio/R/press.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 139
|
r
|
press <- function(lm){
X <- model.matrix(lm)
H <- X%*%(solve((t(X)) %*% (X))) %*% t(X)
e.i <- resid(lm)
sum((e.i/(1-diag(H)))^2)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BipartiteGroupIPW_function.R
\name{BipartiteGroupIPW}
\alias{BipartiteGroupIPW}
\title{Function that estimates the cluster average potential outcome in settings of
bipartite interference.}
\usage{
BipartiteGroupIPW(
int_dta,
out_dta,
cov_cols,
phi_hat,
alpha,
trt_col = NULL,
out_col = NULL,
integral_bound = 10
)
}
\arguments{
\item{int_dta}{Data on the interventional units including treatment,
neighborhood index and covariates. Neighborhood variable should be named
'neigh' and only include continuous integers from 1 to the number of
neighborhoods.}
\item{out_dta}{Data on the outcome units including outcome, neighborhood,
and index of the closest interventional unit as 'closest_int'.
Neighborhood variable should be named 'neigh' and only include continuous
integers from 1 to the number of neighborhoods.}
\item{cov_cols}{The indices including the covariates of the ps model.}
\item{phi_hat}{A list with two elements. The first one is a vector of
coefficients of the ps, and the second one is the random effect variance.}
\item{alpha}{The values of alpha for which we want to estimate the group
average potential outcome.}
\item{trt_col}{If the treatment is not named 'A' in int_dta, specify the
treatment column index.}
\item{out_col}{If the outcome is not named 'Y' in out_dta, specify the
outcome column index.}
\item{integral_bound}{The number of standard deviations of the random effect
that will be used as the lower and upper limit. Defaults to 10.}
}
\description{
Function that estimates the cluster average potential outcome in settings of
bipartite interference.
}
|
/man/BipartiteGroupIPW.Rd
|
no_license
|
gpapadog/Interference
|
R
| false
| true
| 1,686
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BipartiteGroupIPW_function.R
\name{BipartiteGroupIPW}
\alias{BipartiteGroupIPW}
\title{Function that estimates the cluster average potential outcome in settings of
bipartite interference.}
\usage{
BipartiteGroupIPW(
int_dta,
out_dta,
cov_cols,
phi_hat,
alpha,
trt_col = NULL,
out_col = NULL,
integral_bound = 10
)
}
\arguments{
\item{int_dta}{Data on the interventional units including treatment,
neighborhood index and covariates. Neighborhood variable should be named
'neigh' and only include continuous integers from 1 to the number of
neighborhoods.}
\item{out_dta}{Data on the outcome units including outcome, neighborhood,
and index of the closest interventional unit as 'closest_int'.
Neighborhood variable should be named 'neigh' and only include continuous
integers from 1 to the number of neighborhoods.}
\item{cov_cols}{The indices including the covariates of the ps model.}
\item{phi_hat}{A list with two elements. The first one is a vector of
coefficients of the ps, and the second one is the random effect variance.}
\item{alpha}{The values of alpha for which we want to estimate the group
average potential outcome.}
\item{trt_col}{If the treatment is not named 'A' in int_dta, specify the
treatment column index.}
\item{out_col}{If the outcome is not named 'Y' in out_dta, specify the
outcome column index.}
\item{integral_bound}{The number of standard deviations of the random effect
that will be used as the lower and upper limit. Defaults to 10.}
}
\description{
Function that estimates the cluster average potential outcome in settings of
bipartite interference.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MDataUpdater.GoogleFinance.News.R
\docType{data}
\name{MDataUpdater.GoogleFinance.News}
\alias{MDataUpdater.GoogleFinance.News}
\title{MDataUpdater.GoogleFinance.News}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
MDataUpdater.GoogleFinance.News
}
\description{
MDataUpdater.GoogleFinance.News
}
\keyword{datasets}
|
/MUpdaters/man/MDataUpdater.GoogleFinance.News.Rd
|
no_license
|
pashkovds/mdlibs
|
R
| false
| true
| 422
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MDataUpdater.GoogleFinance.News.R
\docType{data}
\name{MDataUpdater.GoogleFinance.News}
\alias{MDataUpdater.GoogleFinance.News}
\title{MDataUpdater.GoogleFinance.News}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
MDataUpdater.GoogleFinance.News
}
\description{
MDataUpdater.GoogleFinance.News
}
\keyword{datasets}
|
############################# LSTM DNN Model ##################################################
## Function ANN.test.training
## Parameters:
## list: data input
## epochs: number of epochs
## units: Number of neuros within the hidden layer
library(keras)
library(yardstick)
library(dplyr)
ANN.test.training<-function(list,epochs,units){
## Empty dataframes to save the results. According to the number of epochs
results.epochs.loss<-matrix(ncol = 10,nrow = epochs) #ncol number of folds, nrow = number of epochs
results.epochs.accuracy<-matrix(ncol = 10,nrow = epochs) #ncol number of folds, nrow = number of epochs
results.epochs.val_loss<-matrix(ncol = 10,nrow = epochs)
results.epochs.val_accuracy<-matrix(ncol = 10,nrow = epochs)
results.predictions<-list()
results.training<-list()
results.metrics<-setNames(data.frame(matrix(ncol = 7,nrow = 10)),c("Acurracy Test","Acurracy Training","AUC","PR AUC","Precision","Recall","F1"))
results.times<-setNames(data.frame(matrix(ncol = 3,nrow = 10)),c("start","end","end_time"))
for(i in 1:length(list)){
results.times[i,1]<-Sys.time()
### Prepare data ######
df<-list[[i]]
df$DEFAULT<-as.numeric(as.character(df$DEFAULT))
#Training
df.training<-df%>%filter(SET.IS == "TRAINING")%>%select(c(LIMIT_BAL:DEFAULT))
df.training.targer<-as.matrix(df.training%>%select(DEFAULT))
df.training<-df.training%>%select(LIMIT_BAL:PAY_AMT6)
df.training<-as.matrix(df.training)
dimnames(df.training)<-NULL
#Validation
df.validation<-df%>%filter(SET.IS == "TEST")%>%select(c(LIMIT_BAL:DEFAULT))
df.validation.targer<-as.matrix(df.validation%>%select(DEFAULT))
df.validation<-df.validation%>%select(LIMIT_BAL:PAY_AMT6)
df.validation<-as.matrix(df.validation)
dimnames(df.validation)<-NULL
### ANN Model ########
use_session_with_seed(1) # Same seed
options(keras.view_metrics = FALSE) # doen't show the loss and gain chart
model<-keras_model_sequential()%>%
layer_flatten(input_shape = dim(df.training)[2])%>% #input layer
layer_dense(units = units, activation = "relu", use_bias = TRUE)%>% #hidden laye
layer_dense(1,activation = "sigmoid", use_bias = TRUE) #output layer
model %>%
compile(loss = "binary_crossentropy",optimizer = "adam",metrics = "accuracy")
history<-model %>%
fit(
df.training,
df.training.targer,
epochs = epochs,
batch_size = 32,
verbose = 0, # Print the epochs
validation_split = 0,
validation_data = list(df.validation, df.validation.targer))
results.epochs.loss[,i]<-history$metrics$loss # loss per epoch training
results.epochs.accuracy[,i]<-history$metrics$acc # accuracy per epoch training
results.epochs.val_loss[,i]<-history$metrics$val_loss # loss per epoch validation
results.epochs.val_accuracy[,i]<-history$metrics$val_acc #accuracy per epoch validation
predictions<-model%>%predict_classes(df.validation) # Predictions
predictions.prob<-model%>%predict_proba(df.validation) %>% as.vector() # Prob Predictions
predictions.training<-model%>%predict_classes(df.training) # Predictions Training
results.predictions[[i]]<-tibble::tibble(
Real = as.factor(df.validation.targer),
Estimate = as.factor(predictions),
Prob = predictions.prob)
results.training[[i]]<-tibble::tibble(
Real.training = as.factor(df.training.targer),
Estimate.training = as.factor(predictions.training))
options(yardstick.event_first = FALSE)
results.metrics[i,1]<-data.frame(results.predictions[[i]] %>% yardstick::metrics(Real, Estimate))[1,3] #accuracy test
results.metrics[i,2]<-data.frame(results.training[[i]] %>% yardstick::metrics(Real.training, Estimate.training))[1,3] #accuracy training
results.metrics[i,3]<-data.frame(results.predictions[[i]] %>% yardstick::roc_auc(Real, Prob))[,3] # ROC AUC
results.metrics[i,4]<-data.frame(results.predictions[[i]] %>% mutate(Estimate=as.numeric(as.character(Estimate)))%>%yardstick::pr_auc(Real,Estimate))[3] # PR AUC
results.metrics[i,5]<-data.frame(results.predictions[[i]] %>% yardstick::precision(Real, Estimate))[,3] #Precision
results.metrics[i,6]<-data.frame(results.predictions[[i]] %>% yardstick::recall(Real, Estimate))[,3] # Recall
results.metrics[i,7]<-data.frame(results.predictions[[i]] %>% yardstick::f_meas(Real, Estimate, beta = 1))[,3] # F1
results.times[i,2]<-Sys.time()
results.times[i,3]<-(results.times[i,2]-results.times[i,1])/60
}
return(list(results.epochs.loss,results.epochs.accuracy,results.epochs.val_loss,results.epochs.val_accuracy,results.predictions,results.metrics,results.times))
}
############### ANN Target Mean Encoding with SMOTE 300 epochs#########################
results.ann.targetmean.1.300.nosmote<-ANN.test.training(kfold.targetMean.scale.nosmote,300,1)
results.ann.targetmean.5.300.nosmote<-ANN.test.training(kfold.targetMean.scale.nosmote,300,5)
results.ann.targetmean.10.300.nosmote<-ANN.test.training(kfold.targetMean.scale.nosmote,300,10)
results.ann.targetmean.15.300.nosmote<-ANN.test.training(kfold.targetMean.scale.nosmote,300,15)
results.ann.targetmean.20.300.nosmote<-ANN.test.training(kfold.targetMean.scale.nosmote,300,20)
results.ann.targetmean.23.300.nosmote<-ANN.test.training(kfold.targetMean.scale.nosmote,300,23)
############### ANN Frequency Encoding with SMOTE 300 epochs ###########################
results.ann.frequency.1.300.nosmote<-ANN.test.training(kfold.frequencyEncoding.scale.nosmote,300,1)
results.ann.frequency.5.300.nosmote<-ANN.test.training(kfold.frequencyEncoding.scale.nosmote,300,5)
results.ann.frequency.10.300.nosmote<-ANN.test.training(kfold.frequencyEncoding.scale.nosmote,300,10)
results.ann.frequency.15.300.nosmote<-ANN.test.training(kfold.frequencyEncoding.scale.nosmote,300,15)
results.ann.frequency.20.300.nosmote<-ANN.test.training(kfold.frequencyEncoding.scale.nosmote,300,20)
results.ann.frequency.23.300.nosmote<-ANN.test.training(kfold.frequencyEncoding.scale.nosmote,300,23)
############### ANN One-Hot Encoding with SMOTE 300 epochs #############################
results.ann.onehot.1.300.nosmote<-ANN.test.training(kfold.onehotEncoding.sale.nosmote,300,1)
results.ann.onehot.6.300.nosmote<-ANN.test.training(kfold.onehotEncoding.sale.nosmote,300,6)
results.ann.onehot.12.300.nosmote<-ANN.test.training(kfold.onehotEncoding.sale.nosmote,300,12)
results.ann.onehot.18.300.nosmote<-ANN.test.training(kfold.onehotEncoding.sale.nosmote,300,18)
results.ann.onehot.23.300.nosmote<-ANN.test.training(kfold.onehotEncoding.sale.nosmote,300,23)
results.ann.onehot.27.300.nosmote<-ANN.test.training(kfold.onehotEncoding.sale.nosmote,300,27)
|
/ANN Model.R
|
no_license
|
oordenesg/deep_learning_research_project
|
R
| false
| false
| 6,853
|
r
|
############################# LSTM DNN Model ##################################################
## Function ANN.test.training
## Parameters:
## list: data input
## epochs: number of epochs
## units: Number of neuros within the hidden layer
library(keras)
library(yardstick)
library(dplyr)
ANN.test.training<-function(list,epochs,units){
## Empty dataframes to save the results. According to the number of epochs
results.epochs.loss<-matrix(ncol = 10,nrow = epochs) #ncol number of folds, nrow = number of epochs
results.epochs.accuracy<-matrix(ncol = 10,nrow = epochs) #ncol number of folds, nrow = number of epochs
results.epochs.val_loss<-matrix(ncol = 10,nrow = epochs)
results.epochs.val_accuracy<-matrix(ncol = 10,nrow = epochs)
results.predictions<-list()
results.training<-list()
results.metrics<-setNames(data.frame(matrix(ncol = 7,nrow = 10)),c("Acurracy Test","Acurracy Training","AUC","PR AUC","Precision","Recall","F1"))
results.times<-setNames(data.frame(matrix(ncol = 3,nrow = 10)),c("start","end","end_time"))
for(i in 1:length(list)){
results.times[i,1]<-Sys.time()
### Prepare data ######
df<-list[[i]]
df$DEFAULT<-as.numeric(as.character(df$DEFAULT))
#Training
df.training<-df%>%filter(SET.IS == "TRAINING")%>%select(c(LIMIT_BAL:DEFAULT))
df.training.targer<-as.matrix(df.training%>%select(DEFAULT))
df.training<-df.training%>%select(LIMIT_BAL:PAY_AMT6)
df.training<-as.matrix(df.training)
dimnames(df.training)<-NULL
#Validation
df.validation<-df%>%filter(SET.IS == "TEST")%>%select(c(LIMIT_BAL:DEFAULT))
df.validation.targer<-as.matrix(df.validation%>%select(DEFAULT))
df.validation<-df.validation%>%select(LIMIT_BAL:PAY_AMT6)
df.validation<-as.matrix(df.validation)
dimnames(df.validation)<-NULL
### ANN Model ########
use_session_with_seed(1) # Same seed
options(keras.view_metrics = FALSE) # doen't show the loss and gain chart
model<-keras_model_sequential()%>%
layer_flatten(input_shape = dim(df.training)[2])%>% #input layer
layer_dense(units = units, activation = "relu", use_bias = TRUE)%>% #hidden laye
layer_dense(1,activation = "sigmoid", use_bias = TRUE) #output layer
model %>%
compile(loss = "binary_crossentropy",optimizer = "adam",metrics = "accuracy")
history<-model %>%
fit(
df.training,
df.training.targer,
epochs = epochs,
batch_size = 32,
verbose = 0, # Print the epochs
validation_split = 0,
validation_data = list(df.validation, df.validation.targer))
results.epochs.loss[,i]<-history$metrics$loss # loss per epoch training
results.epochs.accuracy[,i]<-history$metrics$acc # accuracy per epoch training
results.epochs.val_loss[,i]<-history$metrics$val_loss # loss per epoch validation
results.epochs.val_accuracy[,i]<-history$metrics$val_acc #accuracy per epoch validation
predictions<-model%>%predict_classes(df.validation) # Predictions
predictions.prob<-model%>%predict_proba(df.validation) %>% as.vector() # Prob Predictions
predictions.training<-model%>%predict_classes(df.training) # Predictions Training
results.predictions[[i]]<-tibble::tibble(
Real = as.factor(df.validation.targer),
Estimate = as.factor(predictions),
Prob = predictions.prob)
results.training[[i]]<-tibble::tibble(
Real.training = as.factor(df.training.targer),
Estimate.training = as.factor(predictions.training))
options(yardstick.event_first = FALSE)
results.metrics[i,1]<-data.frame(results.predictions[[i]] %>% yardstick::metrics(Real, Estimate))[1,3] #accuracy test
results.metrics[i,2]<-data.frame(results.training[[i]] %>% yardstick::metrics(Real.training, Estimate.training))[1,3] #accuracy training
results.metrics[i,3]<-data.frame(results.predictions[[i]] %>% yardstick::roc_auc(Real, Prob))[,3] # ROC AUC
results.metrics[i,4]<-data.frame(results.predictions[[i]] %>% mutate(Estimate=as.numeric(as.character(Estimate)))%>%yardstick::pr_auc(Real,Estimate))[3] # PR AUC
results.metrics[i,5]<-data.frame(results.predictions[[i]] %>% yardstick::precision(Real, Estimate))[,3] #Precision
results.metrics[i,6]<-data.frame(results.predictions[[i]] %>% yardstick::recall(Real, Estimate))[,3] # Recall
results.metrics[i,7]<-data.frame(results.predictions[[i]] %>% yardstick::f_meas(Real, Estimate, beta = 1))[,3] # F1
results.times[i,2]<-Sys.time()
results.times[i,3]<-(results.times[i,2]-results.times[i,1])/60
}
return(list(results.epochs.loss,results.epochs.accuracy,results.epochs.val_loss,results.epochs.val_accuracy,results.predictions,results.metrics,results.times))
}
############### ANN Target Mean Encoding with SMOTE 300 epochs#########################
results.ann.targetmean.1.300.nosmote<-ANN.test.training(kfold.targetMean.scale.nosmote,300,1)
results.ann.targetmean.5.300.nosmote<-ANN.test.training(kfold.targetMean.scale.nosmote,300,5)
results.ann.targetmean.10.300.nosmote<-ANN.test.training(kfold.targetMean.scale.nosmote,300,10)
results.ann.targetmean.15.300.nosmote<-ANN.test.training(kfold.targetMean.scale.nosmote,300,15)
results.ann.targetmean.20.300.nosmote<-ANN.test.training(kfold.targetMean.scale.nosmote,300,20)
results.ann.targetmean.23.300.nosmote<-ANN.test.training(kfold.targetMean.scale.nosmote,300,23)
############### ANN Frequency Encoding with SMOTE 300 epochs ###########################
results.ann.frequency.1.300.nosmote<-ANN.test.training(kfold.frequencyEncoding.scale.nosmote,300,1)
results.ann.frequency.5.300.nosmote<-ANN.test.training(kfold.frequencyEncoding.scale.nosmote,300,5)
results.ann.frequency.10.300.nosmote<-ANN.test.training(kfold.frequencyEncoding.scale.nosmote,300,10)
results.ann.frequency.15.300.nosmote<-ANN.test.training(kfold.frequencyEncoding.scale.nosmote,300,15)
results.ann.frequency.20.300.nosmote<-ANN.test.training(kfold.frequencyEncoding.scale.nosmote,300,20)
results.ann.frequency.23.300.nosmote<-ANN.test.training(kfold.frequencyEncoding.scale.nosmote,300,23)
############### ANN One-Hot Encoding with SMOTE 300 epochs #############################
results.ann.onehot.1.300.nosmote<-ANN.test.training(kfold.onehotEncoding.sale.nosmote,300,1)
results.ann.onehot.6.300.nosmote<-ANN.test.training(kfold.onehotEncoding.sale.nosmote,300,6)
results.ann.onehot.12.300.nosmote<-ANN.test.training(kfold.onehotEncoding.sale.nosmote,300,12)
results.ann.onehot.18.300.nosmote<-ANN.test.training(kfold.onehotEncoding.sale.nosmote,300,18)
results.ann.onehot.23.300.nosmote<-ANN.test.training(kfold.onehotEncoding.sale.nosmote,300,23)
results.ann.onehot.27.300.nosmote<-ANN.test.training(kfold.onehotEncoding.sale.nosmote,300,27)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggb_global_nopsd.R
\name{pathprox}
\alias{pathprox}
\title{R Wrapper to C Function}
\usage{
pathprox(y, lam, assign, w = NULL)
}
\arguments{
\item{y}{array of p elements}
\item{lam}{positive tuning parameter}
\item{assign}{array indexing each element in y with node indices (with
0 meaning element is not in the graph)}
\item{w}{array of positive weights, one per node}
}
\value{
b, w, and pen (value of lam * Omega(b) )
}
\description{
Calls C function defined in R package \pkg{hsm}.
}
|
/man/pathprox.Rd
|
no_license
|
jacobbien/ggb
|
R
| false
| true
| 569
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggb_global_nopsd.R
\name{pathprox}
\alias{pathprox}
\title{R Wrapper to C Function}
\usage{
pathprox(y, lam, assign, w = NULL)
}
\arguments{
\item{y}{array of p elements}
\item{lam}{positive tuning parameter}
\item{assign}{array indexing each element in y with node indices (with
0 meaning element is not in the graph)}
\item{w}{array of positive weights, one per node}
}
\value{
b, w, and pen (value of lam * Omega(b) )
}
\description{
Calls C function defined in R package \pkg{hsm}.
}
|
library(tidyverse)
library(here)
beaches <- read_csv(here::here("data","sydneybeaches3.csv"))
# STILL NEED TO WRITE THIS
# d <- ggplot(
# data = beaches,
# mapping = aes(
# x = reorder(month_name, month),
# y = temperature)
# ) +
# xlab("Month")
#
#
# # add the means to the plot: [they're not ready for this???]
# d5 <- d +
# geom_violin() +
# geom_point(stat = "summary")
|
/exercises/exercise_06_stats.R
|
permissive
|
djnavarro/satrdayjoburg
|
R
| false
| false
| 395
|
r
|
library(tidyverse)
library(here)
beaches <- read_csv(here::here("data","sydneybeaches3.csv"))
# STILL NEED TO WRITE THIS
# d <- ggplot(
# data = beaches,
# mapping = aes(
# x = reorder(month_name, month),
# y = temperature)
# ) +
# xlab("Month")
#
#
# # add the means to the plot: [they're not ready for this???]
# d5 <- d +
# geom_violin() +
# geom_point(stat = "summary")
|
# $$\
# \__|
# $$$$$$\ $$$$$$\ $$$$$$\ $$\ $$$$$$\ $$$$$$$\ $$$$$$$\
# $$ __$$\ $$ __$$\ $$ __$$\ $$ |$$ __$$\ $$ __$$\ $$ _____|
# $$ | \__|$$$$$$$$ |$$ / $$ |$$ |$$ / $$ |$$ | $$ |\$$$$$$\
# $$ | $$ ____|$$ | $$ |$$ |$$ | $$ |$$ | $$ | \____$$\
# $$ | \$$$$$$$\ \$$$$$$$ |$$ |\$$$$$$ |$$ | $$ |$$$$$$$ |
# \__| \_______| \____$$ |\__| \______/ \__| \__|\_______/
# $$\ $$ |
# \$$$$$$ |
# \______/
#' Shufle BED
#'
#' Similar to \href{https://bedtools.readthedocs.io/en/latest/content/tools/shuffle.html}{shufleBed} from bedtools.
#'
#'
#' @param regions regions object to be shufled (Granges)
#' @param mask mask regions where to shufle (Granges)
#'
#' @return
#' The function returns a GR object with the regions located in new random
#' positions. The index of the output should match index of the input.
#'
#' Chromosome / seqnames are not maintained.
#'
#' Resulting regions may overlap
#'
#' @export
#'
shufle_regions <- function(regions,mask) {
# hereI should check that regions have a unique match in the mask
# although I am not sure if I care
gr_reg = regions
mask_gr = mask
# here I compute the width of the region to shuffle
width_reg_vec = BiocGenerics::width(gr_reg)
width_mask_vec = BiocGenerics::width(mask_gr)
# now I choose to which instance in the mask the new region will be sent
# chromosomes / seqnames are not maintained! see split
mask_instance = integer(length = length(gr_reg))
for (i in seq(from = 1, to = length(gr_reg),by = 1)){
width_r = width_reg_vec[i]
mask_of_masks = width_r < width_mask_vec
# here I compute a unified probability, False will be 0!
probs = as.numeric(mask_of_masks) / sum(mask_of_masks)
# first sampling, instance in the mask
res = sample(x = length(mask_gr),size = 1,prob = probs)
mask_instance[i] = res
# so here the problem is that probs which is the width mask needs to
# be defined per instance so then I cannot vectorize this fully.
}
# now I sample which window instance will every region be sent, the max value
# is the one for the widest instance in the mask
max_value = max(width_mask_vec)
# I sample from 1 to this number
sampled_values = sample(max_value,size = length(gr_reg),replace = TRUE)
# each gr has been asigned to a new instance so I compute the width of the
# new instance
window_width_values = width_mask_vec[mask_instance]
# the max start which is equal to number of possible bins that you will find
# in a instance is equal to the width of the mask instanec minus the lenght
# of the region. These are tiling windows so its the same as the width
max_start = window_width_values - width_reg_vec
# because we sampled with the max value in the whole set of instances, this
# value can be bigger that the number of possible instances to solve this
# we compute the residual of each division. Then we will get the instance
# value if we filled all the possibles ones before.
wind_idx = sampled_values %% max_start
# generate the output
# now I select the seqname of the mask
seqnames_rnd = GenomeInfoDb::seqnames(mask_gr)[mask_instance]
# now I select the start
start_rnd = BiocGenerics::start(mask_gr)[mask_instance] + wind_idx
# I create the output
random_gr = GenomicRanges::GRanges(
seqnames = seqnames_rnd,
ranges = IRanges::IRanges(start = start_rnd,
width = width_reg_vec)
)
return(random_gr)
}
#' Shufle BED
#'
#' Similar to \href{https://bedtools.readthedocs.io/en/latest/content/tools/shuffle.html}{shufleBed} from bedtools.
#'
#'
#' @param regions regions object to be shufled (Granges)
#' @param mask mask regions where to shufle (Granges)
#'
#' @return
#' The function returns a GR object with the regions located in new random
#' positions. The index of the output should match index of the input.
#'
#' Chromosome / seqnames are not maintained.
#'
#' Resulting regions may overlap
#'
#' @export
#'
shufle_regions2 <- function(regions,mask) {
# hereI should check that regions have a unique match in the mask
# although I am not sure if I care
gr_reg = regions
mask_gr = mask
# here I compute the width of the region to shuffle
width_reg_vec = BiocGenerics::width(gr_reg)
width_mask_vec = BiocGenerics::width(mask_gr)
# now I choose to which instance in the mask the new region will be sent
# chromosomes / seqnames are not maintained! see split
mask_instance = sample(x = length(mask_gr),
size = length(gr_reg),
replace = TRUE)
wider_mask = width_reg_vec > width_mask_vec[mask_instance]
if (sum(wider_mask) > 0){
for (i in which(wider_mask)){
width_r = width_reg_vec[i]
mask_of_masks = width_r < width_mask_vec
# if there is no place for this region to shufle, move it!
stopifnot(sum(mask_of_masks) != 0)
# here I compute a unified probability, False will be 0!
probs = as.numeric(mask_of_masks) / sum(mask_of_masks)
# first sampling, instance in the mask
res = sample(x = length(mask_gr),size = 1,prob = probs)
mask_instance[i] = res
# so here the problem is that probs which is the width mask needs to
# be defined per instance so then I cannot vectorize this fully.
}
}
# now I sample which window instance will every region be sent, the max value
# is the one for the widest instance in the mask
max_value = max(width_mask_vec)
# I sample from 1 to this number
sampled_values = sample(max_value,size = length(gr_reg),replace = TRUE)
# each gr has been asigned to a new instance so I compute the width of the
# new instance
window_width_values = width_mask_vec[mask_instance]
# the max start which is equal to number of possible bins that you will find
# in a instance is equal to the width of the mask instanec minus the lenght
# of the region. These are tiling windows so its the same as the width
max_start = window_width_values - width_reg_vec
# because we sampled with the max value in the whole set of instances, this
# value can be bigger that the number of possible instances to solve this
# we compute the residual of each division. Then we will get the instance
# value if we filled all the possibles ones before.
wind_idx = sampled_values %% max_start
# generate the output
# now I select the seqname of the mask
seqnames_rnd = GenomeInfoDb::seqnames(mask_gr)[mask_instance]
# now I select the start
start_rnd = BiocGenerics::start(mask_gr)[mask_instance] + wind_idx
# I create the output
random_gr = GenomicRanges::GRanges(
seqnames = seqnames_rnd,
ranges = IRanges::IRanges(start = start_rnd,
width = width_reg_vec)
)
return(random_gr)
}
#' Obtain genomic regions from gene lists
#'
#' Wrapper for a workflow that obtains a given genomic feature
#' from some gene identifier.
#'
#' Basically it comprises 2 steps, first, transform input
#' gene ids (such as SYMBOL) to the gene id type for the
#' txdb object. Second, it extracts the given feature filtering
#' for those genes inputed.
#'
#' @param feat the genomic feature to extract from (such as genes, transcripts...) see ?GenomicFeatures::transcripts
#' @param annotation_pkg An OrgDb object
#' @param transcript_db A TxDb object
#' @param identifiers the genes that we want to retrieve
#' @param indentifier_type the input identifier column
#' @param transcript_db_id_type the identifier type for the txdb object
#' @param transcript_db_columns extra columns to retrieve from the txdb object
#' @param ... extra options for feat function
#'
#' @export
#' @examples
#'
#' \dontrun{
#' # retrieving from ensembl
#' human = org.Hs.eg.db::org.Hs.eg.db
#' txdb_ensembl = makeTxDbFromEnsembl(organism = "homo_sapiens")
#' obtain_genomic_feature(feat = "transcripts",
#' annotation_pkg = human,
#' transcript_db = txdb_ensembl,
#' identifiers = c("BRCA1","BRCA2"),
#' indentifier_type = "SYMBOL",
#' transcript_db_id_type = "ENSEMBL",
#' transcript_db_columns = "TXNAME")
#' }
#'
#' # retrieving from UCSC (faster)
#'
#' human = org.Hs.eg.db::org.Hs.eg.db
#' txdb = TxDb.Hsapiens.UCSC.hg19.knownGene::TxDb.Hsapiens.UCSC.hg19.knownGene
#' obtain_genomic_feature(feat = "transcripts",
#' annotation_pkg = human,
#' transcript_db = txdb,
#' identifiers = c("BRCA1","BRCA2"),
#' indentifier_type = "SYMBOL",
#' transcript_db_columns = "TXNAME")
#'
obtain_genomic_feature <- function(feat,
annotation_pkg,
transcript_db,
identifiers,
indentifier_type,
transcript_db_id_type = "ENTREZID",
transcript_db_columns = NULL,
...){
## because idk about more annotations than txdb I think for human at least
## we are save moving to ENTTREZID which is then used to retrieve the
## cds / whatever
if (indentifier_type != transcript_db_id_type ){
AnnotationDbi::mapIds(x = annotation_pkg,
keys = identifiers,
keytype = indentifier_type,
column = transcript_db_id_type) -> txdb_ready_ids
} else {
txdb_ready_ids = identifiers
}
# will stop if there are missing genes
stopifnot(all(!is.na(txdb_ready_ids)))
## Here I select the feature I want to extract
fun = getFromNamespace(feat,ns = "GenomicFeatures")
filtering_list = list(gene_id = txdb_ready_ids)
fun(x = transcript_db,
filter = filtering_list,
columns = transcript_db_columns,...) -> res_gr
return(res_gr)
}
|
/R/regions.R
|
permissive
|
davidmasp/helperMut
|
R
| false
| false
| 10,124
|
r
|
# $$\
# \__|
# $$$$$$\ $$$$$$\ $$$$$$\ $$\ $$$$$$\ $$$$$$$\ $$$$$$$\
# $$ __$$\ $$ __$$\ $$ __$$\ $$ |$$ __$$\ $$ __$$\ $$ _____|
# $$ | \__|$$$$$$$$ |$$ / $$ |$$ |$$ / $$ |$$ | $$ |\$$$$$$\
# $$ | $$ ____|$$ | $$ |$$ |$$ | $$ |$$ | $$ | \____$$\
# $$ | \$$$$$$$\ \$$$$$$$ |$$ |\$$$$$$ |$$ | $$ |$$$$$$$ |
# \__| \_______| \____$$ |\__| \______/ \__| \__|\_______/
# $$\ $$ |
# \$$$$$$ |
# \______/
#' Shufle BED
#'
#' Similar to \href{https://bedtools.readthedocs.io/en/latest/content/tools/shuffle.html}{shufleBed} from bedtools.
#'
#'
#' @param regions regions object to be shufled (Granges)
#' @param mask mask regions where to shufle (Granges)
#'
#' @return
#' The function returns a GR object with the regions located in new random
#' positions. The index of the output should match index of the input.
#'
#' Chromosome / seqnames are not maintained.
#'
#' Resulting regions may overlap
#'
#' @export
#'
shufle_regions <- function(regions,mask) {
# hereI should check that regions have a unique match in the mask
# although I am not sure if I care
gr_reg = regions
mask_gr = mask
# here I compute the width of the region to shuffle
width_reg_vec = BiocGenerics::width(gr_reg)
width_mask_vec = BiocGenerics::width(mask_gr)
# now I choose to which instance in the mask the new region will be sent
# chromosomes / seqnames are not maintained! see split
mask_instance = integer(length = length(gr_reg))
for (i in seq(from = 1, to = length(gr_reg),by = 1)){
width_r = width_reg_vec[i]
mask_of_masks = width_r < width_mask_vec
# here I compute a unified probability, False will be 0!
probs = as.numeric(mask_of_masks) / sum(mask_of_masks)
# first sampling, instance in the mask
res = sample(x = length(mask_gr),size = 1,prob = probs)
mask_instance[i] = res
# so here the problem is that probs which is the width mask needs to
# be defined per instance so then I cannot vectorize this fully.
}
# now I sample which window instance will every region be sent, the max value
# is the one for the widest instance in the mask
max_value = max(width_mask_vec)
# I sample from 1 to this number
sampled_values = sample(max_value,size = length(gr_reg),replace = TRUE)
# each gr has been asigned to a new instance so I compute the width of the
# new instance
window_width_values = width_mask_vec[mask_instance]
# the max start which is equal to number of possible bins that you will find
# in a instance is equal to the width of the mask instanec minus the lenght
# of the region. These are tiling windows so its the same as the width
max_start = window_width_values - width_reg_vec
# because we sampled with the max value in the whole set of instances, this
# value can be bigger that the number of possible instances to solve this
# we compute the residual of each division. Then we will get the instance
# value if we filled all the possibles ones before.
wind_idx = sampled_values %% max_start
# generate the output
# now I select the seqname of the mask
seqnames_rnd = GenomeInfoDb::seqnames(mask_gr)[mask_instance]
# now I select the start
start_rnd = BiocGenerics::start(mask_gr)[mask_instance] + wind_idx
# I create the output
random_gr = GenomicRanges::GRanges(
seqnames = seqnames_rnd,
ranges = IRanges::IRanges(start = start_rnd,
width = width_reg_vec)
)
return(random_gr)
}
#' Shufle BED
#'
#' Similar to \href{https://bedtools.readthedocs.io/en/latest/content/tools/shuffle.html}{shufleBed} from bedtools.
#'
#'
#' @param regions regions object to be shufled (Granges)
#' @param mask mask regions where to shufle (Granges)
#'
#' @return
#' The function returns a GR object with the regions located in new random
#' positions. The index of the output should match index of the input.
#'
#' Chromosome / seqnames are not maintained.
#'
#' Resulting regions may overlap
#'
#' @export
#'
shufle_regions2 <- function(regions,mask) {
# hereI should check that regions have a unique match in the mask
# although I am not sure if I care
gr_reg = regions
mask_gr = mask
# here I compute the width of the region to shuffle
width_reg_vec = BiocGenerics::width(gr_reg)
width_mask_vec = BiocGenerics::width(mask_gr)
# now I choose to which instance in the mask the new region will be sent
# chromosomes / seqnames are not maintained! see split
mask_instance = sample(x = length(mask_gr),
size = length(gr_reg),
replace = TRUE)
wider_mask = width_reg_vec > width_mask_vec[mask_instance]
if (sum(wider_mask) > 0){
for (i in which(wider_mask)){
width_r = width_reg_vec[i]
mask_of_masks = width_r < width_mask_vec
# if there is no place for this region to shufle, move it!
stopifnot(sum(mask_of_masks) != 0)
# here I compute a unified probability, False will be 0!
probs = as.numeric(mask_of_masks) / sum(mask_of_masks)
# first sampling, instance in the mask
res = sample(x = length(mask_gr),size = 1,prob = probs)
mask_instance[i] = res
# so here the problem is that probs which is the width mask needs to
# be defined per instance so then I cannot vectorize this fully.
}
}
# now I sample which window instance will every region be sent, the max value
# is the one for the widest instance in the mask
max_value = max(width_mask_vec)
# I sample from 1 to this number
sampled_values = sample(max_value,size = length(gr_reg),replace = TRUE)
# each gr has been asigned to a new instance so I compute the width of the
# new instance
window_width_values = width_mask_vec[mask_instance]
# the max start which is equal to number of possible bins that you will find
# in a instance is equal to the width of the mask instanec minus the lenght
# of the region. These are tiling windows so its the same as the width
max_start = window_width_values - width_reg_vec
# because we sampled with the max value in the whole set of instances, this
# value can be bigger that the number of possible instances to solve this
# we compute the residual of each division. Then we will get the instance
# value if we filled all the possibles ones before.
wind_idx = sampled_values %% max_start
# generate the output
# now I select the seqname of the mask
seqnames_rnd = GenomeInfoDb::seqnames(mask_gr)[mask_instance]
# now I select the start
start_rnd = BiocGenerics::start(mask_gr)[mask_instance] + wind_idx
# I create the output
random_gr = GenomicRanges::GRanges(
seqnames = seqnames_rnd,
ranges = IRanges::IRanges(start = start_rnd,
width = width_reg_vec)
)
return(random_gr)
}
#' Obtain genomic regions from gene lists
#'
#' Wrapper for a workflow that obtains a given genomic feature
#' from some gene identifier.
#'
#' Basically it comprises 2 steps, first, transform input
#' gene ids (such as SYMBOL) to the gene id type for the
#' txdb object. Second, it extracts the given feature filtering
#' for those genes inputed.
#'
#' @param feat the genomic feature to extract from (such as genes, transcripts...) see ?GenomicFeatures::transcripts
#' @param annotation_pkg An OrgDb object
#' @param transcript_db A TxDb object
#' @param identifiers the genes that we want to retrieve
#' @param indentifier_type the input identifier column
#' @param transcript_db_id_type the identifier type for the txdb object
#' @param transcript_db_columns extra columns to retrieve from the txdb object
#' @param ... extra options for feat function
#'
#' @export
#' @examples
#'
#' \dontrun{
#' # retrieving from ensembl
#' human = org.Hs.eg.db::org.Hs.eg.db
#' txdb_ensembl = makeTxDbFromEnsembl(organism = "homo_sapiens")
#' obtain_genomic_feature(feat = "transcripts",
#' annotation_pkg = human,
#' transcript_db = txdb_ensembl,
#' identifiers = c("BRCA1","BRCA2"),
#' indentifier_type = "SYMBOL",
#' transcript_db_id_type = "ENSEMBL",
#' transcript_db_columns = "TXNAME")
#' }
#'
#' # retrieving from UCSC (faster)
#'
#' human = org.Hs.eg.db::org.Hs.eg.db
#' txdb = TxDb.Hsapiens.UCSC.hg19.knownGene::TxDb.Hsapiens.UCSC.hg19.knownGene
#' obtain_genomic_feature(feat = "transcripts",
#' annotation_pkg = human,
#' transcript_db = txdb,
#' identifiers = c("BRCA1","BRCA2"),
#' indentifier_type = "SYMBOL",
#' transcript_db_columns = "TXNAME")
#'
obtain_genomic_feature <- function(feat,
annotation_pkg,
transcript_db,
identifiers,
indentifier_type,
transcript_db_id_type = "ENTREZID",
transcript_db_columns = NULL,
...){
## because idk about more annotations than txdb I think for human at least
## we are save moving to ENTTREZID which is then used to retrieve the
## cds / whatever
if (indentifier_type != transcript_db_id_type ){
AnnotationDbi::mapIds(x = annotation_pkg,
keys = identifiers,
keytype = indentifier_type,
column = transcript_db_id_type) -> txdb_ready_ids
} else {
txdb_ready_ids = identifiers
}
# will stop if there are missing genes
stopifnot(all(!is.na(txdb_ready_ids)))
## Here I select the feature I want to extract
fun = getFromNamespace(feat,ns = "GenomicFeatures")
filtering_list = list(gene_id = txdb_ready_ids)
fun(x = transcript_db,
filter = filtering_list,
columns = transcript_db_columns,...) -> res_gr
return(res_gr)
}
|
% Auto-generated: do not edit by hand
\name{htmlCaption}
\alias{htmlCaption}
\title{Caption component}
\description{
Caption is a wrapper for the <caption> HTML5 element. For detailed attribute info see: https://developer.mozilla.org/en-US/docs/Web/HTML/Element/caption
}
\usage{
htmlCaption(children=NULL, id=NULL, n_clicks=NULL,
n_clicks_timestamp=NULL, key=NULL, role=NULL,
accessKey=NULL, className=NULL, contentEditable=NULL,
contextMenu=NULL, dir=NULL, draggable=NULL, hidden=NULL,
lang=NULL, spellCheck=NULL, style=NULL, tabIndex=NULL,
title=NULL, loading_state=NULL, ...)
}
\arguments{
\item{children}{A list of or a singular dash component, string or number. The children of this component}
\item{id}{Character. The ID of this component, used to identify dash components
in callbacks. The ID needs to be unique across all of the
components in an app.}
\item{n_clicks}{Numeric. An integer that represents the number of times
that this element has been clicked on.}
\item{n_clicks_timestamp}{Numeric. An integer that represents the time (in ms since 1970)
at which n_clicks changed. This can be used to tell
which button was changed most recently.}
\item{key}{Character. A unique identifier for the component, used to improve
performance by React.js while rendering components
See https://reactjs.org/docs/lists-and-keys.html for more info}
\item{role}{Character. The ARIA role attribute}
\item{accessKey}{Character. Keyboard shortcut to activate or add focus to the element.}
\item{className}{Character. Often used with CSS to style elements with common properties.}
\item{contentEditable}{Character. Indicates whether the element's content is editable.}
\item{contextMenu}{Character. Defines the ID of a <menu> element which will serve as the element's context menu.}
\item{dir}{Character. Defines the text direction. Allowed values are ltr (Left-To-Right) or rtl (Right-To-Left)}
\item{draggable}{Character. Defines whether the element can be dragged.}
\item{hidden}{A value equal to: 'hidden', 'hidden' | logical. Prevents rendering of given element, while keeping child elements, e.g. script elements, active.}
\item{lang}{Character. Defines the language used in the element.}
\item{spellCheck}{Character. Indicates whether spell checking is allowed for the element.}
\item{style}{Named list. Defines CSS styles which will override styles previously set.}
\item{tabIndex}{Character. Overrides the browser's default tab order and follows the one specified instead.}
\item{title}{Character. Text to be displayed in a tooltip when hovering over the element.}
\item{loading_state}{Lists containing elements 'is_loading', 'prop_name', 'component_name'.
those elements have the following types:
- is_loading (logical; optional): determines if the component is loading or not
- prop_name (character; optional): holds which property is loading
- component_name (character; optional): holds the name of the component that is loading. Object that holds the loading state object coming from dash-renderer}
\item{...}{wildcards allowed have the form: `'data-*', 'aria-*'`}
}
\value{named list of JSON elements corresponding to React.js properties and their values}
\examples{
if (interactive() && require(dash)) {
library(dash)
library(dashHtmlComponents)
app <- Dash$new()
app$layout(
htmlDiv(list(
htmlTable(list(
htmlCaption("Elevations of a few Cascade Range volcanoes"),
htmlThead(
htmlTr(list(
htmlTh("Mountain"),
htmlTh("Elevation (m)"),
htmlTh("Elevation (ft)")
)
)),
htmlTbody(list(
htmlTr(list(
htmlTd("Mount Rainier"),
htmlTd("4,392"),
htmlTd("14,411")
)
),
htmlTr(list(
htmlTd("Mount Hood"),
htmlTd("3,429"),
htmlTd("11,249")
)
),
htmlTr(list(
htmlTd("Lassen Peak"),
htmlTd("3,187"),
htmlTd("10,457")
)
),
htmlTr(list(
htmlTd("Mount St. Helens"),
htmlTd("2,549"),
htmlTd("8,363")
)
)
)
)
), style = list(
border = "1px black solid"
)
)
)
)
)
app$run_server()
}
}
|
/man/htmlCaption.Rd
|
permissive
|
noisycomputation/dash-html-components
|
R
| false
| false
| 4,338
|
rd
|
% Auto-generated: do not edit by hand
\name{htmlCaption}
\alias{htmlCaption}
\title{Caption component}
\description{
Caption is a wrapper for the <caption> HTML5 element. For detailed attribute info see: https://developer.mozilla.org/en-US/docs/Web/HTML/Element/caption
}
\usage{
htmlCaption(children=NULL, id=NULL, n_clicks=NULL,
n_clicks_timestamp=NULL, key=NULL, role=NULL,
accessKey=NULL, className=NULL, contentEditable=NULL,
contextMenu=NULL, dir=NULL, draggable=NULL, hidden=NULL,
lang=NULL, spellCheck=NULL, style=NULL, tabIndex=NULL,
title=NULL, loading_state=NULL, ...)
}
\arguments{
\item{children}{A list of or a singular dash component, string or number. The children of this component}
\item{id}{Character. The ID of this component, used to identify dash components
in callbacks. The ID needs to be unique across all of the
components in an app.}
\item{n_clicks}{Numeric. An integer that represents the number of times
that this element has been clicked on.}
\item{n_clicks_timestamp}{Numeric. An integer that represents the time (in ms since 1970)
at which n_clicks changed. This can be used to tell
which button was changed most recently.}
\item{key}{Character. A unique identifier for the component, used to improve
performance by React.js while rendering components
See https://reactjs.org/docs/lists-and-keys.html for more info}
\item{role}{Character. The ARIA role attribute}
\item{accessKey}{Character. Keyboard shortcut to activate or add focus to the element.}
\item{className}{Character. Often used with CSS to style elements with common properties.}
\item{contentEditable}{Character. Indicates whether the element's content is editable.}
\item{contextMenu}{Character. Defines the ID of a <menu> element which will serve as the element's context menu.}
\item{dir}{Character. Defines the text direction. Allowed values are ltr (Left-To-Right) or rtl (Right-To-Left)}
\item{draggable}{Character. Defines whether the element can be dragged.}
\item{hidden}{A value equal to: 'hidden', 'hidden' | logical. Prevents rendering of given element, while keeping child elements, e.g. script elements, active.}
\item{lang}{Character. Defines the language used in the element.}
\item{spellCheck}{Character. Indicates whether spell checking is allowed for the element.}
\item{style}{Named list. Defines CSS styles which will override styles previously set.}
\item{tabIndex}{Character. Overrides the browser's default tab order and follows the one specified instead.}
\item{title}{Character. Text to be displayed in a tooltip when hovering over the element.}
\item{loading_state}{Lists containing elements 'is_loading', 'prop_name', 'component_name'.
those elements have the following types:
- is_loading (logical; optional): determines if the component is loading or not
- prop_name (character; optional): holds which property is loading
- component_name (character; optional): holds the name of the component that is loading. Object that holds the loading state object coming from dash-renderer}
\item{...}{wildcards allowed have the form: `'data-*', 'aria-*'`}
}
\value{named list of JSON elements corresponding to React.js properties and their values}
\examples{
if (interactive() && require(dash)) {
library(dash)
library(dashHtmlComponents)
app <- Dash$new()
app$layout(
htmlDiv(list(
htmlTable(list(
htmlCaption("Elevations of a few Cascade Range volcanoes"),
htmlThead(
htmlTr(list(
htmlTh("Mountain"),
htmlTh("Elevation (m)"),
htmlTh("Elevation (ft)")
)
)),
htmlTbody(list(
htmlTr(list(
htmlTd("Mount Rainier"),
htmlTd("4,392"),
htmlTd("14,411")
)
),
htmlTr(list(
htmlTd("Mount Hood"),
htmlTd("3,429"),
htmlTd("11,249")
)
),
htmlTr(list(
htmlTd("Lassen Peak"),
htmlTd("3,187"),
htmlTd("10,457")
)
),
htmlTr(list(
htmlTd("Mount St. Helens"),
htmlTd("2,549"),
htmlTd("8,363")
)
)
)
)
), style = list(
border = "1px black solid"
)
)
)
)
)
app$run_server()
}
}
|
\name{smooth.sparse.mean}
\alias{smooth.sparse.mean}
\title{
Smooth the mean function of sparse data
}
\description{
Do a smoothing of the mean function for sparse data that is either given
as a list or as a matrix with NAs. The smooth is done by basis expansion
with the functional basis "type"; if !(lambda == 0) then the second
derivative is penalized (int2Lfd(2)).
}
\usage{
smooth.sparse.mean(data, time ,rng = c(0, 1), type = "", nbasis = NULL,
knots = NULL, norder = NULL, lambda = NULL)
}
\arguments{
\item{data}{
a matrix object or list -- If the set is supplied as a matrix object,
the rows must correspond to argument values and columns to replications,
and it will be assumed that there is only one variable per observation.
If y is a three-dimensional array, the first dimension corresponds to
argument values, the second to replications, and the third to variables
within replications. -- If it is a list, each element must be a matrix
object, the rows correspond to argument values per individual. First
column corresponds to time points and followin columns to argument values
per variable.
}
\item{time}{
Array with time points where data was taken. length(time) == ncol(data)
}
\item{rng}{
an array of length 2 containing the lower and upper boundaries for the
rangeval of argument values
}
\item{type}{
Type of basisfd for smoothing the mean estimate function.
"bspline", "fourier", "exp", "const" or "mon"
}
\item{nbasis}{
An integer variable specifying the number of basis functions
}
\item{knots}{
a vector specifying the break points if type == "bspline"
}
\item{norder}{
an integer specifying the order of b-splines if type == "bspline"
}
\item{lambda}{
a nonnegative real number specifying the amount of smoothing to be applied to
the estimated functional parameter
}
}
\value{
a functional data object containing a smooth of the mean.
}
|
/fda/man/smooth.sparse.mean.Rd
|
no_license
|
sbgraves237/fda
|
R
| false
| false
| 1,982
|
rd
|
\name{smooth.sparse.mean}
\alias{smooth.sparse.mean}
\title{
Smooth the mean function of sparse data
}
\description{
Do a smoothing of the mean function for sparse data that is either given
as a list or as a matrix with NAs. The smooth is done by basis expansion
with the functional basis "type"; if !(lambda == 0) then the second
derivative is penalized (int2Lfd(2)).
}
\usage{
smooth.sparse.mean(data, time ,rng = c(0, 1), type = "", nbasis = NULL,
knots = NULL, norder = NULL, lambda = NULL)
}
\arguments{
\item{data}{
a matrix object or list -- If the set is supplied as a matrix object,
the rows must correspond to argument values and columns to replications,
and it will be assumed that there is only one variable per observation.
If y is a three-dimensional array, the first dimension corresponds to
argument values, the second to replications, and the third to variables
within replications. -- If it is a list, each element must be a matrix
object, the rows correspond to argument values per individual. First
column corresponds to time points and followin columns to argument values
per variable.
}
\item{time}{
Array with time points where data was taken. length(time) == ncol(data)
}
\item{rng}{
an array of length 2 containing the lower and upper boundaries for the
rangeval of argument values
}
\item{type}{
Type of basisfd for smoothing the mean estimate function.
"bspline", "fourier", "exp", "const" or "mon"
}
\item{nbasis}{
An integer variable specifying the number of basis functions
}
\item{knots}{
a vector specifying the break points if type == "bspline"
}
\item{norder}{
an integer specifying the order of b-splines if type == "bspline"
}
\item{lambda}{
a nonnegative real number specifying the amount of smoothing to be applied to
the estimated functional parameter
}
}
\value{
a functional data object containing a smooth of the mean.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/image_analysis.R
\name{wbt_create_colour_composite}
\alias{wbt_create_colour_composite}
\title{Create colour composite}
\usage{
wbt_create_colour_composite(red, green, blue, output, opacity = NULL,
enhance = TRUE, zeros = FALSE, verbose_mode = FALSE)
}
\arguments{
\item{red}{Input red band image file.}
\item{green}{Input green band image file.}
\item{blue}{Input blue band image file.}
\item{output}{Output colour composite file.}
\item{opacity}{Input opacity band image file (optional).}
\item{enhance}{Optional flag indicating whether a balance contrast enhancement is performed.}
\item{zeros}{Optional flag to indicate if zeros are nodata values.}
\item{verbose_mode}{Sets verbose mode. If verbose mode is False, tools will not print output messages.}
}
\value{
Returns the tool text outputs.
}
\description{
Creates a colour-composite image from three bands of multispectral imagery.
}
|
/man/wbt_create_colour_composite.Rd
|
permissive
|
Remote-Sensing-Forks/whiteboxR
|
R
| false
| true
| 979
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/image_analysis.R
\name{wbt_create_colour_composite}
\alias{wbt_create_colour_composite}
\title{Create colour composite}
\usage{
wbt_create_colour_composite(red, green, blue, output, opacity = NULL,
enhance = TRUE, zeros = FALSE, verbose_mode = FALSE)
}
\arguments{
\item{red}{Input red band image file.}
\item{green}{Input green band image file.}
\item{blue}{Input blue band image file.}
\item{output}{Output colour composite file.}
\item{opacity}{Input opacity band image file (optional).}
\item{enhance}{Optional flag indicating whether a balance contrast enhancement is performed.}
\item{zeros}{Optional flag to indicate if zeros are nodata values.}
\item{verbose_mode}{Sets verbose mode. If verbose mode is False, tools will not print output messages.}
}
\value{
Returns the tool text outputs.
}
\description{
Creates a colour-composite image from three bands of multispectral imagery.
}
|
#Author:Alicia Carabali
#Date:08/05/2019
library(lubridate)
fullname<-"./Datasets/PowerConsumption/household_power_consumption.txt"
data<-read.table(fullname,header = TRUE,sep = ";")
data1<-subset(data,dmy(data$Date)=="2007-02-02"|dmy(data$Date)=="2007-02-01")
png(filename = "Plot1.png", width = 480 , height = 480)
hist(as.numeric(data1$Global_active_power)/500,col = "red",xlab = "Global active power (kilowatts)",ylab = "Frequency", main = "Global Active Power",breaks = 13)
dev.off()
|
/Plot1.R
|
no_license
|
aliciacarabali/ExData_Plotting1
|
R
| false
| false
| 493
|
r
|
#Author:Alicia Carabali
#Date:08/05/2019
library(lubridate)
fullname<-"./Datasets/PowerConsumption/household_power_consumption.txt"
data<-read.table(fullname,header = TRUE,sep = ";")
data1<-subset(data,dmy(data$Date)=="2007-02-02"|dmy(data$Date)=="2007-02-01")
png(filename = "Plot1.png", width = 480 , height = 480)
hist(as.numeric(data1$Global_active_power)/500,col = "red",xlab = "Global active power (kilowatts)",ylab = "Frequency", main = "Global Active Power",breaks = 13)
dev.off()
|
library(stringr)
final_url<-c()
final_tit<-c()
final_date<-c()
mydate <- c(rep(20200101:20200131, 1), rep(20200201:20200229, 1), rep(20200301:20200331, 1))
# rep(20200701:20200731, 1), rep(20200801:20200831, 1), rep(20200901:20200930, 1)
j <- NULL; mydate <- as.numeric(mydate)
length(mydate)*20*3 #5460๊ฐ
for(j in mydate){
i <- 1
#ํฌ๋กค๋ง
for(i in 1:3){
# https://news.nate.com/recent?mid=n0100&type=c&date=20200101&page=2
# https://news.nate.com/recent?cate=eco&mid=n0102&type=c&date=20200101&page=2
url<-paste0("https://news.nate.com/recent?cate=eco&mid=n0102&type=c&date=", j, "&page=", i)
b<-readLines(url, encoding = "euc-kr")
nurl<-paste0("https:/", str_sub(b[str_detect(b, "class=\"lt1\">")], 14, end=-16))
b2<-b[str_detect(b, "<strong class=\"tit\">")]
tit <- str_sub(str_extract(b2, "(?<=tit).*(?=</strong>)"), 3, end=-2)[-c(21:22)]
Sys.sleep(1)
final_url<-c(final_url, nurl)
final_tit<-c(final_tit, tit)
final_date<-c(final_date, rep(j, length(tit)))
cat("\n", i, "-", j)
}
j
}
Sys.sleep(3)
k<-1
final_con<-c()
for(k in 1 :length(final_url)){
b<-readLines(final_url[k],encoding = "euc-kr")
aindex<-which(str_detect(b,"<div id=\"realArtcContents\">"))
if(length(aindex) < 1){
aindex<-which(str_detect(b,"<!-- ๊ธฐ์ฌ ๋ด์ฉ -->"))
}
Sys.sleep(1)
bindex<-which(str_detect(b,"<script language=\"javascript\" type=\"application/javascript\">"))
b2<-paste(b[aindex:bindex],collapse = " ")
b2<-gsub("<.*?>|\t|google_ad_section_start","",b2)
b2<-str_sub(b2,30)
final_con[k]<-b2
cat("\n",k)
}
str(final_con)
length(final_con)
final_data<-cbind(final_url, final_tit, final_con)
getwd()
write.csv(final_data,"nate_news_13.csv",row.names = F)
save(final_data,file="nate_news_13.RData")
head(final_data)
final_data <- as.data.frame(final_data)
final_con <- as.character(final_data$final_con)
head(final_con)
str(final_con)
comments<-gsub("<.*?>","",final_con)
comments<-gsub("\t","",comments)
comments<-gsub("[][!#$%*,:;<=>@_`|โ~{}&โ
โใ
ใ
ใใโโณโฒโฝโผโโโโโโกโโโทโถโคโ โกโฅโงโฃโโโฃโโโจโโโโโโโญโฉโชโฌใ]", " ",comments)
comments<-gsub("rdquo|gt|lt|nbsp|amp|quot|apos","",comments)
comments<-gsub(" "," ",comments)
comments<-gsub("\\^"," ",comments)
comments<-gsub("ใ
|ใ
|ใ
ก"," ",comments)
comments<-gsub("\"|\n|+","",comments)
comments<-gsub("\\+","",comments)
comments<-gsub("/|!|\\*|\\+|\\@"," ",comments)
comments<-gsub("'","",comments)
comments<-gsub("\"","",comments)
comments<-gsub("\"","",comments)
comments<-gsub("=","",comments)
comments<-gsub("~|;|<|>","",comments)
comments<-gsub("\\?","",comments)
comments<-gsub("\\[.*?\\]","",comments)
comments<-gsub("\\(.*?\\)","",comments)
comments<-gsub("\\(|\\)"," ",comments)
comments<-gsub("\\]|\\[|\\(|\\)|:|-|\\,|\\."," ",comments)
comments<-gsub("\\!","",comments)
comments<-gsub("โ|โ|โ|ยท|โ","",comments)
head(comments)
save(comments,file="pre_pro_news_13.RData")
length(comments)
unique_date<- unique(final_date)
#๋ถ์ฉ์ด stopwords
stw<-c("์ผ๋ก","ํ๋","์์","์
๋๋ค","ํฉ๋๋ค","์์ต๋๋ค","ํ๊ณ ","์๊ฒ","์๋","๋ผ๊ณ ","์๋","์ด๊ณ ","ํ๊ฒ","๋ํ","ํ๊ธฐ",
"๋๋","๋์ง","์ด๋ผ๊ณ ","์ด๊ณ ","๋ผ๋","๋ฉ๋๋ค","์์ต๋๋ค","๊ทธ๋ฆฌ๊ณ ","ํด์ผ","๋๋ฌธ","๊น์ง","ํ์ง","ํ์ฌ","ํด์","ํ์ต๋๋ค","๋ถํฐ"
,"์๊ฒ","๋ค์","ํ๋ฉด","์ด๋ค","์๋ค","๋๋","ํ์ผ๋ฉฐ","๊ฒ์","ํด์ฃผ์ญ์์ค","์๋
ํ์ญ๋๊น","์ง๋ฌธ","๊ฒ์","๋์์ต๋๋ค",
"์ด๊ธ","์์๋ค","๋ก๋","ํ๋๊ฒ","์์์ต๋๋ค","๋๋๋ฐ","ํ๋","์
๋๋ค","์ด๋ผ๋","๋์","์๋","ํ๋ค","ํ๋ค๊ณ ",
"ํ๋ค","์๋","๋๊น","๋์ด","๊ป์","์ธ๊ฐ์","์๋๋ฉด","์ด๋ฒ","๋๊ณ ","์๋","๋ํ","์๊ธฐ","๋๊ณ ","์ง๋","์ด๋ก","์ผ๋ก๋ง",
"์์ค","๋ด๋","์์์๋","์์๊น","๋๋ค","๋๊ณ ์๋","ํ๋","์์ ","๋๋ค๋ฉด","ํ์ผ๋ฉด","๋ค์
๋๋ค","ํ๋ฉด์","ํ์ง๋ง",
"์ด๋","๋๋ฆฝ๋๋ค","๊ฒ๋๋ค","์๋๋๋ค","์ถ์ต๋๋ค","ํ๋ค๋ฉด","ํด์ฃผ์ธ์","์ธ๋ฐ","์์","๋ฐ๋๋๋ค","์ด๋","์ด๋ผ๋","์ธ๋ฐ","ํ๋ฉฐ","์ ๋",
"์๋๋ก","์ด๋","์๋","ํ์์ต๋๋ค","๋ฐ์","์ฒ๋ผ","๊ฒ์ผ๋ก","๊ฒ์","๊ทธ๋์","๊ทธ๋ฌ๋๊น","๊ฒ์ด๋ผ๊ณ ","๋งํ์ต๋๋ค","๋งํ๋ค","๊ทธ๋ฌ๋ฉด","์๋ค๊ณ ",
"์ง๊ธ","๊ทธ๋ ๊ฒ","๊ฒ์ด","๋๋ฌธ์","์ต์ปค","์ฌ๋ฐฐํฌ","์ด์ด","๋ณด๋ฉด","๊ฐ์ด","๊ธฐ์","์ต์ปค",
"co","kr","com","์ด๋ฐ์ผ๋ฆฌ","๋งค์ผ๊ฒฝ์ ","๊ด๋ จ๊ธฐ์ฌ","ํ๊ตญ๊ฒฝ์ ","์ค์์ผ๋ณด","์กฐ์ ์ผ๋ณด","๋ฌด๋จ์ ์ฌ","์ํด","๋ค๋ฅด๋ฉด","์ดํ","์ํ","๋ฑ์","์ด๋ฐ",
"๋ฌด๋จ","๊ตฌ๋
์ ์ฒญ","hankyung","hankyung.com", "์ง๋", "์ด๋ ", "์ต๊ทผ", "ํ์ฌ", "ํํธ", "์ฌํด", "๊ดํด", "๊ดํ์ฌ",
"๊ด๋ จ", "์ง๋๋ฌ", "์ด๋ฅผ", "๊ฒฝ์ฐ", "ํตํด", "์ด์", "๋ค๋ง", "๊ฐ์", "ํ์ง๋ง", "๋ํด", "๊ธฐ์ค", "์ง๋ํด", "ํนํ", "๊ฐ์ฅ", "๊ฒ์ด๋ค",
"๋ฐ๋ผ", "๋ฑ์ด")
stw2<-c("๋ค์ด๋ฒ","์ผ๋ณด","๋ท์ปด","๊ธ์ง","๋ฐ๋ก๊ฐ๊ธฐ","๋ชจ๋ฐ์ผ","news","๋ณด์ธ์","YTN", "MBN")
library(stringr)
i <- 1
for (i in 1 : length(unique_date)){
date_index <- final_date == unique_date[i]
date_com <- comments[date_index]
data2<-str_split(date_com," ")
ta <- unlist(data2)
ta2 <- table(ta)
sort(ta2,decreasing = T)[1:50]
ta3 <- ta2[!names(ta2) %in% stw]
sort(ta3,decreasing = T)[1:50]
ta4 <- ta3[nchar(names(ta3)) > 1]
sort(ta4,decreasing = T)[1:50]
ta5 <- ta4[!str_detect(names(ta4),"\\d")]
# ta6 <- sort(ta5,decreasing = T)[1:100]
ss<-sapply(names(ta5),str_detect, stw2)
ta6<-ta5[apply(ss,2,sum) == 0]
ta7 <- sort(ta6,decreasing = T)[1:100]
write.csv(ta7,paste0(unique_date[i],"nate_news.csv"),row.names=F)
cat("\n",i)
}
library(wordcloud)
library(extrafont)
for(i in 1:length(unique_date)){
wm<- read.csv(paste0(unique_date[i],"nate_news.csv"))
head(wm)
word_name<-as.character(wm[,1])
freq<-as.numeric(wm[,2])
# good_color<-color.gradient(c(0.3176471,0.2392157,0.3176471,0.925),c(0.7098039,0.6588235,0.2392157,0.15),c(0.2392157,0.7098039,0.7098039,0.28),nslices=length(freq))
windowsFonts(malgun=windowsFont("08SeoulHangang M"))
png(paste0("news_word_cloud",unique_date[i],".png"),width = 500,height = 500)
wordcloud(word_name,freq,c(8,1),random.order=FALSE,family='malgun')
dev.off()
cat("\n",i)
}
#install.packages("xlsx")
library(xlsx)
data2<-list()
for (i in 1:length(unique_date)){
dat<- read.csv(paste0(unique_date[i],"nate_news.csv"))
data2[[i]]<-dat
cat("\n",i)
}
data3<-do.call("cbind",data2)
head(data3)
dd<-data3[,seq(1,10,by=2)]
head(dd)
c(dd)
uni <- as.character(unique(unlist(dd)))
wb<-createWorkbook()
sheet <- createSheet(wb, "Sheet 1")
cs1 <- CellStyle(wb) + Alignment(horizontal="ALIGN_CENTER", vertical="VERTICAL_CENTER") + Border(color="black", position=c("TOP", "RIGHT" , "LEFT","BOTTOM"),pen=c("BORDER_MEDIUM","BORDER_MEDIUM","BORDER_MEDIUM","BORDER_MEDIUM"))
cs2 <- CellStyle(wb) + Border(color="black", position=c("LEFT","RIGHT","TOP", "BOTTOM"),pen=c("BORDER_THIN","BORDER_THIN","BORDER_THIN","BORDER_THIN"))
addDataFrame(data3, sheet, row.names = F, colnamesStyle=cs1)
for(i in 1:nrow(data3) ){
Row<-getRows(sheet, rowIndex=(i+1))
Cell<-getCells(Row,colIndex = 2)
cs3<- CellStyle(wb) + Fill(foregroundColor="lightblue", backgroundColor="lightblue", pattern="SOLID_FOREGROUND")
setCellStyle(Cell[[1]], cs3)
}
saveWorkbook(wb, "newsdata_13.xlsx")
#์ผ๋ณ ํค์๋-> ์๋ณํค์๋
head(data2)
head(final_date)
library(stringr)
mon<-str_sub(final_date,end=-3)
mon2<-as.numeric(unique(mon))
mon2<-mon2[order(mon2)][-1]
head(data2)
length(data2)
names(data2)<-unique_date
head(data2)
j<-1
month_data<-list()
for(j in 1:length(mon2)){
dd<-data2[str_detect((names(data2)),as.character(mon2[j]))]
length(dd)
dd2<-do.call("rbind",dd)
ta<-tapply(dd2[,2],dd2[,1],sum)
head(dd2)
ta2<-sort(ta,decreasing = T)[1:200]
ta3<-cbind(names(ta2),ta2)
month_data[[j]]<-ta3
cat("\n",j)
}
length(month_data)
head(month_data)
class(month_data)
write.csv(month_data, "month_data_13.csv")
|
/Source Code/news_13.R
|
no_license
|
JuheePak/R-miniproject
|
R
| false
| false
| 8,272
|
r
|
library(stringr)
final_url<-c()
final_tit<-c()
final_date<-c()
mydate <- c(rep(20200101:20200131, 1), rep(20200201:20200229, 1), rep(20200301:20200331, 1))
# rep(20200701:20200731, 1), rep(20200801:20200831, 1), rep(20200901:20200930, 1)
j <- NULL; mydate <- as.numeric(mydate)
length(mydate)*20*3 #5460๊ฐ
for(j in mydate){
i <- 1
#ํฌ๋กค๋ง
for(i in 1:3){
# https://news.nate.com/recent?mid=n0100&type=c&date=20200101&page=2
# https://news.nate.com/recent?cate=eco&mid=n0102&type=c&date=20200101&page=2
url<-paste0("https://news.nate.com/recent?cate=eco&mid=n0102&type=c&date=", j, "&page=", i)
b<-readLines(url, encoding = "euc-kr")
nurl<-paste0("https:/", str_sub(b[str_detect(b, "class=\"lt1\">")], 14, end=-16))
b2<-b[str_detect(b, "<strong class=\"tit\">")]
tit <- str_sub(str_extract(b2, "(?<=tit).*(?=</strong>)"), 3, end=-2)[-c(21:22)]
Sys.sleep(1)
final_url<-c(final_url, nurl)
final_tit<-c(final_tit, tit)
final_date<-c(final_date, rep(j, length(tit)))
cat("\n", i, "-", j)
}
j
}
Sys.sleep(3)
k<-1
final_con<-c()
for(k in 1 :length(final_url)){
b<-readLines(final_url[k],encoding = "euc-kr")
aindex<-which(str_detect(b,"<div id=\"realArtcContents\">"))
if(length(aindex) < 1){
aindex<-which(str_detect(b,"<!-- ๊ธฐ์ฌ ๋ด์ฉ -->"))
}
Sys.sleep(1)
bindex<-which(str_detect(b,"<script language=\"javascript\" type=\"application/javascript\">"))
b2<-paste(b[aindex:bindex],collapse = " ")
b2<-gsub("<.*?>|\t|google_ad_section_start","",b2)
b2<-str_sub(b2,30)
final_con[k]<-b2
cat("\n",k)
}
str(final_con)
length(final_con)
final_data<-cbind(final_url, final_tit, final_con)
getwd()
write.csv(final_data,"nate_news_13.csv",row.names = F)
save(final_data,file="nate_news_13.RData")
head(final_data)
final_data <- as.data.frame(final_data)
final_con <- as.character(final_data$final_con)
head(final_con)
str(final_con)
comments<-gsub("<.*?>","",final_con)
comments<-gsub("\t","",comments)
comments<-gsub("[][!#$%*,:;<=>@_`|โ~{}&โ
โใ
ใ
ใใโโณโฒโฝโผโโโโโโกโโโทโถโคโ โกโฅโงโฃโโโฃโโโจโโโโโโโญโฉโชโฌใ]", " ",comments)
comments<-gsub("rdquo|gt|lt|nbsp|amp|quot|apos","",comments)
comments<-gsub(" "," ",comments)
comments<-gsub("\\^"," ",comments)
comments<-gsub("ใ
|ใ
|ใ
ก"," ",comments)
comments<-gsub("\"|\n|+","",comments)
comments<-gsub("\\+","",comments)
comments<-gsub("/|!|\\*|\\+|\\@"," ",comments)
comments<-gsub("'","",comments)
comments<-gsub("\"","",comments)
comments<-gsub("\"","",comments)
comments<-gsub("=","",comments)
comments<-gsub("~|;|<|>","",comments)
comments<-gsub("\\?","",comments)
comments<-gsub("\\[.*?\\]","",comments)
comments<-gsub("\\(.*?\\)","",comments)
comments<-gsub("\\(|\\)"," ",comments)
comments<-gsub("\\]|\\[|\\(|\\)|:|-|\\,|\\."," ",comments)
comments<-gsub("\\!","",comments)
comments<-gsub("โ|โ|โ|ยท|โ","",comments)
head(comments)
save(comments,file="pre_pro_news_13.RData")
length(comments)
unique_date<- unique(final_date)
#๋ถ์ฉ์ด stopwords
stw<-c("์ผ๋ก","ํ๋","์์","์
๋๋ค","ํฉ๋๋ค","์์ต๋๋ค","ํ๊ณ ","์๊ฒ","์๋","๋ผ๊ณ ","์๋","์ด๊ณ ","ํ๊ฒ","๋ํ","ํ๊ธฐ",
"๋๋","๋์ง","์ด๋ผ๊ณ ","์ด๊ณ ","๋ผ๋","๋ฉ๋๋ค","์์ต๋๋ค","๊ทธ๋ฆฌ๊ณ ","ํด์ผ","๋๋ฌธ","๊น์ง","ํ์ง","ํ์ฌ","ํด์","ํ์ต๋๋ค","๋ถํฐ"
,"์๊ฒ","๋ค์","ํ๋ฉด","์ด๋ค","์๋ค","๋๋","ํ์ผ๋ฉฐ","๊ฒ์","ํด์ฃผ์ญ์์ค","์๋
ํ์ญ๋๊น","์ง๋ฌธ","๊ฒ์","๋์์ต๋๋ค",
"์ด๊ธ","์์๋ค","๋ก๋","ํ๋๊ฒ","์์์ต๋๋ค","๋๋๋ฐ","ํ๋","์
๋๋ค","์ด๋ผ๋","๋์","์๋","ํ๋ค","ํ๋ค๊ณ ",
"ํ๋ค","์๋","๋๊น","๋์ด","๊ป์","์ธ๊ฐ์","์๋๋ฉด","์ด๋ฒ","๋๊ณ ","์๋","๋ํ","์๊ธฐ","๋๊ณ ","์ง๋","์ด๋ก","์ผ๋ก๋ง",
"์์ค","๋ด๋","์์์๋","์์๊น","๋๋ค","๋๊ณ ์๋","ํ๋","์์ ","๋๋ค๋ฉด","ํ์ผ๋ฉด","๋ค์
๋๋ค","ํ๋ฉด์","ํ์ง๋ง",
"์ด๋","๋๋ฆฝ๋๋ค","๊ฒ๋๋ค","์๋๋๋ค","์ถ์ต๋๋ค","ํ๋ค๋ฉด","ํด์ฃผ์ธ์","์ธ๋ฐ","์์","๋ฐ๋๋๋ค","์ด๋","์ด๋ผ๋","์ธ๋ฐ","ํ๋ฉฐ","์ ๋",
"์๋๋ก","์ด๋","์๋","ํ์์ต๋๋ค","๋ฐ์","์ฒ๋ผ","๊ฒ์ผ๋ก","๊ฒ์","๊ทธ๋์","๊ทธ๋ฌ๋๊น","๊ฒ์ด๋ผ๊ณ ","๋งํ์ต๋๋ค","๋งํ๋ค","๊ทธ๋ฌ๋ฉด","์๋ค๊ณ ",
"์ง๊ธ","๊ทธ๋ ๊ฒ","๊ฒ์ด","๋๋ฌธ์","์ต์ปค","์ฌ๋ฐฐํฌ","์ด์ด","๋ณด๋ฉด","๊ฐ์ด","๊ธฐ์","์ต์ปค",
"co","kr","com","์ด๋ฐ์ผ๋ฆฌ","๋งค์ผ๊ฒฝ์ ","๊ด๋ จ๊ธฐ์ฌ","ํ๊ตญ๊ฒฝ์ ","์ค์์ผ๋ณด","์กฐ์ ์ผ๋ณด","๋ฌด๋จ์ ์ฌ","์ํด","๋ค๋ฅด๋ฉด","์ดํ","์ํ","๋ฑ์","์ด๋ฐ",
"๋ฌด๋จ","๊ตฌ๋
์ ์ฒญ","hankyung","hankyung.com", "์ง๋", "์ด๋ ", "์ต๊ทผ", "ํ์ฌ", "ํํธ", "์ฌํด", "๊ดํด", "๊ดํ์ฌ",
"๊ด๋ จ", "์ง๋๋ฌ", "์ด๋ฅผ", "๊ฒฝ์ฐ", "ํตํด", "์ด์", "๋ค๋ง", "๊ฐ์", "ํ์ง๋ง", "๋ํด", "๊ธฐ์ค", "์ง๋ํด", "ํนํ", "๊ฐ์ฅ", "๊ฒ์ด๋ค",
"๋ฐ๋ผ", "๋ฑ์ด")
stw2<-c("๋ค์ด๋ฒ","์ผ๋ณด","๋ท์ปด","๊ธ์ง","๋ฐ๋ก๊ฐ๊ธฐ","๋ชจ๋ฐ์ผ","news","๋ณด์ธ์","YTN", "MBN")
library(stringr)
i <- 1
for (i in 1 : length(unique_date)){
date_index <- final_date == unique_date[i]
date_com <- comments[date_index]
data2<-str_split(date_com," ")
ta <- unlist(data2)
ta2 <- table(ta)
sort(ta2,decreasing = T)[1:50]
ta3 <- ta2[!names(ta2) %in% stw]
sort(ta3,decreasing = T)[1:50]
ta4 <- ta3[nchar(names(ta3)) > 1]
sort(ta4,decreasing = T)[1:50]
ta5 <- ta4[!str_detect(names(ta4),"\\d")]
# ta6 <- sort(ta5,decreasing = T)[1:100]
ss<-sapply(names(ta5),str_detect, stw2)
ta6<-ta5[apply(ss,2,sum) == 0]
ta7 <- sort(ta6,decreasing = T)[1:100]
write.csv(ta7,paste0(unique_date[i],"nate_news.csv"),row.names=F)
cat("\n",i)
}
library(wordcloud)
library(extrafont)
for(i in 1:length(unique_date)){
wm<- read.csv(paste0(unique_date[i],"nate_news.csv"))
head(wm)
word_name<-as.character(wm[,1])
freq<-as.numeric(wm[,2])
# good_color<-color.gradient(c(0.3176471,0.2392157,0.3176471,0.925),c(0.7098039,0.6588235,0.2392157,0.15),c(0.2392157,0.7098039,0.7098039,0.28),nslices=length(freq))
windowsFonts(malgun=windowsFont("08SeoulHangang M"))
png(paste0("news_word_cloud",unique_date[i],".png"),width = 500,height = 500)
wordcloud(word_name,freq,c(8,1),random.order=FALSE,family='malgun')
dev.off()
cat("\n",i)
}
#install.packages("xlsx")
library(xlsx)
data2<-list()
for (i in 1:length(unique_date)){
dat<- read.csv(paste0(unique_date[i],"nate_news.csv"))
data2[[i]]<-dat
cat("\n",i)
}
data3<-do.call("cbind",data2)
head(data3)
dd<-data3[,seq(1,10,by=2)]
head(dd)
c(dd)
uni <- as.character(unique(unlist(dd)))
wb<-createWorkbook()
sheet <- createSheet(wb, "Sheet 1")
cs1 <- CellStyle(wb) + Alignment(horizontal="ALIGN_CENTER", vertical="VERTICAL_CENTER") + Border(color="black", position=c("TOP", "RIGHT" , "LEFT","BOTTOM"),pen=c("BORDER_MEDIUM","BORDER_MEDIUM","BORDER_MEDIUM","BORDER_MEDIUM"))
cs2 <- CellStyle(wb) + Border(color="black", position=c("LEFT","RIGHT","TOP", "BOTTOM"),pen=c("BORDER_THIN","BORDER_THIN","BORDER_THIN","BORDER_THIN"))
addDataFrame(data3, sheet, row.names = F, colnamesStyle=cs1)
for(i in 1:nrow(data3) ){
Row<-getRows(sheet, rowIndex=(i+1))
Cell<-getCells(Row,colIndex = 2)
cs3<- CellStyle(wb) + Fill(foregroundColor="lightblue", backgroundColor="lightblue", pattern="SOLID_FOREGROUND")
setCellStyle(Cell[[1]], cs3)
}
saveWorkbook(wb, "newsdata_13.xlsx")
#์ผ๋ณ ํค์๋-> ์๋ณํค์๋
head(data2)
head(final_date)
library(stringr)
mon<-str_sub(final_date,end=-3)
mon2<-as.numeric(unique(mon))
mon2<-mon2[order(mon2)][-1]
head(data2)
length(data2)
names(data2)<-unique_date
head(data2)
j<-1
month_data<-list()
for(j in 1:length(mon2)){
dd<-data2[str_detect((names(data2)),as.character(mon2[j]))]
length(dd)
dd2<-do.call("rbind",dd)
ta<-tapply(dd2[,2],dd2[,1],sum)
head(dd2)
ta2<-sort(ta,decreasing = T)[1:200]
ta3<-cbind(names(ta2),ta2)
month_data[[j]]<-ta3
cat("\n",j)
}
length(month_data)
head(month_data)
class(month_data)
write.csv(month_data, "month_data_13.csv")
|
#' Update plot
#' @param ctl controller
#' @param pname parameter name
#' @param defaults_ defaults of the parameters
#' @param ... other arguments
#' @noRd
update_container_plots <- function(ctr, pname, defaults_, ...){
stopifnot(is_pmxclass(ctr))
if (!pname %in% (ctr %>% plot_names())) {return(NULL)}
pmx_update(ctr, pname, strat.color=NULL, strat.facet=NULL, color.scales=NULL,
filter=NULL, trans=NULL, l_left_join(defaults_, list(...)), pmxgpar=NULL
)
}
#' Create parameters for plot updating
#' @param ctr controller
#' @param pname parameter name
#' @noRd
get_plot_param <- function(ctr, pname){
params <- as.list(match.call(expand.dots = TRUE))[-1]
if ((pname == "iwres_dens") || (pname == "pmx_vpc")) {
params[["is.smooth"]] <- FALSE
}
params[["ctr"]] <- ctr
params[["pname"]] <- pname
params <- lang_to_expr(params)
params$defaults_ <- ctr$config$plots[[toupper(pname)]]
if (!exists("bloq", params) && !is.null(ctr$bloq)) {
params$defaults_[["bloq"]] <- ctr$bloq
}
# Check that x or y labels for updating exist (else plot updating will not work)
if ((!is.null(params$defaults_$labels$x)) || (!is.null(params$defaults_$labels$y))){
# Check if labels$x exists in added abbreviations; if not set default labels$x
if ((!is.null(params$defaults_$labels$x)) &&
(!(params$defaults_$labels$x %in% names(params$ctr$abbrev)))){
params$ctr$abbrev[params$defaults_$labels$x] <- params$defaults_$labels$x
}
# check if labels$y exists in added abbreviations; if not set default labels$y
if ((!is.null(params$defaults_$labels$y)) &&
(!(params$defaults_$labels$y %in% names(params$ctr$abbrev)))){
params$ctr$abbrev[params$defaults_$labels$y] <- params$defaults_$labels$y
}
do.call(update_container_plots, params)
}
}
#' Create simulation object
#'
#' @param file \code{character} path to the simulation file
#' @param data \code{data.table} simulation data
#' @param irun \code{character} name of the simulation column
#' @param idv \code{character} name of the ind. variable
#' @export
#' @example inst/examples/vpc.R
pmx_sim <- function(
file,
data,
irun,
idv) {
ID <- NULL
if (missing(data)) data <- NULL
if (missing(idv)) idv <- "TIME"
if (!missing(file) && file.exists(file)) sim <- pmx_fread(file)
if (!is.null(data) && is.data.table(data)) sim <- data
if (is.data.table(sim)) {
if (tolower(idv) == "time") {
idvn <- names(sim)[tolower(names(sim)) == "time"]
setnames(sim, idvn, "TIME")
idv <- "TIME"
}
id_col <- grep("^id$", names(sim), ignore.case = TRUE, value = TRUE)
setnames(sim, id_col, "ID")
obj <- list(
sim = sim,
idv = idv,
irun = irun
)
structure(obj, class = c("pmxSimClass", "list"))
}
}
check_argument <- function(value, pmxname) {
call <- match.call()
if (any(missing(value) | is.null(value))) {
stop(
sprintf(
"Please set a %s argument",
deparse(call$value), pmxname
)
)
}
value
}
#' Create a pmx object
#'
#' Create a pmx object from a data source
#' @param config Can be either :
#' The complete path for the configuration file, the name of configuration within the built-in
#' list of configurations, or a configuration object.
#' @param sys the system name can "mlx" (for Monolix 2016) or "mlx18" (for Monolix 2018/19 and later)
#' @param directory \code{character} modelling output directory.
#' @param input \code{character} complete path to the modelling input file
#' @param dv \code{character} the name of measurable variable used in the input modelling file
#' @param dvid \emph{[Optional]} \code{character} observation type parameter. This is mandatory
#' in case of multiple endpoint (PKPD).
#' @param cats \emph{[Optional]}\code{character} vector of categorical covariates
#' @param conts \emph{[Optional]}\code{character} vector of continuous covariates
#' @param occ \emph{[Optional]}\code{character} occasional covariate variable name
#' @param strats \emph{[Optional]}\code{character} extra stratification variables
#' @param settings \emph{[Optional]}\code{pmxSettingsClass} \code{\link{pmx_settings}}
#' shared between all plots
#' @param endpoint \code{pmxEndpointClass} or \code{integer} or \code{charcater} default to NULL
#' of the endpoint code. \code{\link{pmx_endpoint}}
#' @param sim \code{pmxSimClass} default to NULL. \code{\link{pmx_sim}} used for VPC, e.g.: sim = pmx_sim(file=vpc_file, irun="rep",idv="TIME")
#' @param bloq \code{pmxBLOQClass} default to NULL. \code{\link{pmx_bloq}} specify bloq, within controller: e.g. bloq=pmx_bloq(cens = "BLOQ_name", limit = "LIMIT_name")
#' @param sim_blq \code{logical} if TRUE uses sim_blq values for plotting. Only for Monolix 2018 and later.
#' @param id \emph{[Optional]} \code{character} the name of Indvidual variable used in the input modelling file
#' @param time \emph{[Optional]} \code{character} Time variable.
#' @return \code{pmxClass} controller object.
#' @export
#' @example inst/examples/controller.R
pmx <- function(config, sys = "mlx", directory, input, dv, dvid, cats = NULL, conts = NULL, occ = NULL, strats = NULL,
settings = NULL, endpoint = NULL, sim = NULL, bloq = NULL,id=NULL,time=NULL, sim_blq = NULL) {
directory <- check_argument(directory, "work_dir")
ll <- list.files(directory)
input <- check_argument(input, "input")
if (missing(cats)) cats <- ""
if (missing(sim)) sim <- NULL
if (missing(endpoint)) {
endpoint <- NULL
}
if (missing(config)) config <- "standing"
assert_that(is_character_or_null(cats))
if (missing(conts)) conts <- ""
assert_that(is_character_or_null(conts))
if (missing(occ)) occ <- ""
assert_that(is_character_or_null(occ))
if (missing(strats)) strats <- ""
assert_that(is_character_or_null(strats))
if (missing(sim_blq)) sim_blq <- FALSE
if (missing(dv)) dv <- "DV"
if (missing(dvid)) dvid <- "DVID"
if (!inherits(config, "pmxConfig")) {
if ("populationParameters.txt" %in% list.files(directory)) sys <- "mlx18"
else{
is_mlx <- list.files(directory,pattern="txt$")
if(length(is_mlx)==0){
stop(
sprintf(
"%s is not valid directory results path: please set a valid directory argument",
directory
)
)
}
}
config <- load_config(config, sys)
}
if (missing(settings)) settings <- pmx_settings()
if (!inherits(settings, "pmxSettingsClass")) {
settings <- pmx_settings()
}
if (missing(bloq)) bloq <- NULL
assert_that(inherits(bloq, "pmxBLOQClass") || is.null(bloq))
pmxClass$new(directory, input, dv, config, dvid, cats, conts, occ, strats, settings, endpoint, sim, bloq,id,time, sim_blq)
}
#' @rdname pmx
#' @details
#' \code{pmx_mlx} is a wrapper to mlx for the MONOLIX system ( \code{sys="mlx"})
#' @export
pmx_mlx <-
function(config, directory, input, dv, dvid, cats, conts, occ, strats, settings, endpoint, sim, bloq,id, time, sim_blq) {
pmx(config, "mlx", directory, input, dv, dvid, cats, conts, occ, strats, settings, endpoint, sim, bloq,id,time, sim_blq)
}
#' Create a controller from mlxtran file
#'
#' @param file_name \code{character} mlxtran file path.
#' @param call \code{logical} if TRUE the result is the parameters parsed
#' @param version \code{integer} Non-negative integer. Non-obligatory option, if you don't use a wildcard in the file_name.
#' Otherwise you MUST provide version and wildcard will be substituted with "version", which represents the mlxtran model version.
#' @param ... extra arguments passed to pmx_mlx.
#' @rdname pmx
#'
#' @export
#' @details
#'
#' \code{pmx_mlxtran} parses mlxtran file and guess \code{\link{pmx_mlx}} arguments. In case of
#' multi endpoint the first endpoint is selected. You can though set the endpoint through the same argument.
#' When you set \code{call=TRUE},no controller is created but only the parameters parsed
#' by mlxtran. This can be very helpful, in case you would like to customize parameters
#' (adding settings vi pmx_settings, chnag eth edefault endpoint.)
pmx_mlxtran <- function(file_name, config = "standing", call = FALSE, endpoint, version = -1, ...) {
# Substituting * with version in file_name
if (grepl("*", file_name, fixed = TRUE)) {
assert_that(version>=0, msg = "Using wildcard in file_name assume providing non-negative version")
file_name <- gsub("*", version, file_name, fixed = TRUE)
}
params <- parse_mlxtran(file_name)
rr <- as.list(match.call()[-1])
rr$file_name <- NULL
params <- append(params, rr)
if (!exists("config",params)) params$config <- config
if (!missing(endpoint)) {
params$endpoint <- NULL
params$endpoint <- endpoint
}
if (call) {
params$call <- NULL
return(params)
}
params$call <- NULL
# We don't need to pass version to pmx_mlx
params$version <- NULL
do.call(pmx_mlx, params)
}
formula_to_text <- function(form) {
if (is.formula(form)) {
paste(as.character(as.list(form)[-1]), collapse = " and ")
} else {
form
}
}
#' Create controller global settings
#' @param is.draft \code{logical} if FALSE any plot is without draft annotation
#' @param use.abbrev \code{logical} if FALSE use full description from abbreviation mapping for axis names
#' @param color.scales \code{list} list containing elements of scale_color_manual
#' @param use.labels \code{logical} if TRUE replace factor named by cats.labels
#' @param cats.labels \code{list} list of named vectors for each factor
#' @param use.titles \code{logical} FALSE to generate plots without titles
#' @param effects \code{list} list of effects levels and labels
#' @param ... extra parameter not used yet
#' @return pmxSettingsClass object
#' @example inst/examples/pmx-settings.R
#' @export
pmx_settings <-
function(is.draft = TRUE, use.abbrev = TRUE, color.scales = NULL,
cats.labels = NULL, use.labels = FALSE, use.titles = TRUE,
effects = NULL,
...) {
checkmate::assert_logical(x=is.draft, len=1, any.missing=FALSE)
checkmate::assert_logical(x=use.abbrev, len=1, any.missing=FALSE)
checkmate::assert_logical(x=use.labels, len=1, any.missing=FALSE)
checkmate::assert_logical(x=use.titles, len=1, any.missing=FALSE)
if (!missing(effects) && !is.null(effects)) {
if (!is.list(effects)) stop("effects should be a list")
if (!exists("levels", effects) || !exists("labels", effects)) {
stop("effects should be a list that contains levels and labels")
}
if (length(effects$labels) != length(effects$levels)) {
stop("effects should be a list that contains levels and labels have the same length")
}
}
res <- list(
is.draft = is.draft,
use.abbrev = use.abbrev,
color.scales = color.scales,
use.labels = use.labels,
cats.labels = cats.labels,
use.titles = use.titles,
effects = effects
)
if (use.labels) {
res$labeller <- do.call("labeller", cats.labels)
}
structure(
res, ...,
class = "pmxSettingsClass"
)
}
#' Creates pmx endpoint object
#'
#' @param code \code{character} endpoint code : used to filter observations DVID==code.
#' @param label \code{character} endpoint label: used to set title and axis labels
#' @param unit \code{character} endpoint unit : used to set title and axis labels
#' @param file.code \code{character} endpoint file code : used to set predictions and finegrid \cr
#' files extensions in case using code parameter is not enough.
#' @param trans \code{list} Transformation parameter not used yet.
#' @export
#'
#' @example inst/examples/endpoint.R
#' @details
#' In case of multiple endpoints, pkpd case for example, we need to pass endpoint to the pmx call.
#' Internally , ggPMX will filter the observations data set to keep only rows satisfying \code{DVID==code}.
#' The \code{code} is also used to find the right predictions and or fingrid files.
#' ggPMX use the configuration file to fine the path of the predictions file
#' (like the single endpoint case) and then filter the right file using the code parameter. \cr
#' For example:
#' \itemize{
#' \item predictions\{code\}.txt for mlx16
#' \item predictions\{code\}.txt and y\{code\}_residual for mlx18
#' }
#'
#' For some tricky examples the code parameter is not enough to find the files. In that case the
#' \code{file.code} parameter is used to distinguish the endpoint files.
pmx_endpoint <-
function(code,
label = "",
unit = "",
file.code = code,
trans = NULL) {
assert_that(is.character(code))
assert_that(is.character(file.code))
assert_that(is.character(unit))
assert_that(is.character(label))
assert_that(is_character_or_null(trans))
res <- list(
code = code,
label = label,
unit = unit,
file.code = file.code,
trans = trans
)
structure(
res,
class = "pmxEndpointClass"
)
}
#' Creates BLOQ object attributes
#'
#' @param cens \code{character} the censoring column name
#' @param limit \code{character} the limit column name (optional)
#' @param colour \code{character} the color of the geom
#' @param size \code{numeric} the size of the geom when using \code{geom_point()}
#' @param linewidth \code{numeric} the line width of the segment when using \code{geom_segment()}
#' @param alpha \code{numeric} the alpha of the geom
#' @param show \code{logical} if FALSE remove all censory observations
#' @param ... any other graphical parameter
#'
#' @export
#' @details
#' To define that a measurement is censored, the observation data set should include
#' a CENSORING column ( default to `CENS` ) and put 1 for lower limit or -1 for upper limit. \cr
#' Optionally, data set can contain have a limit column ( default to `LIMIT`) column to set the other limit.
pmx_bloq <-
function(
cens = "CENS",
limit = "LIMIT",
colour = "pink",
size = 2,
linewidth=1,
alpha = 0.9,
show = TRUE,
...) {
res <- list(
cens = cens,
limit = limit,
show = show,
colour = colour,
size = size,
linewidth = linewidth,
alpha = alpha,
...
)
structure(
res,
class = "pmxBLOQClass"
)
}
#' Create shrinkage parameter object
#' @param fun \code{list} shrinkage function can be \code{sd} or \code{var}
#' @param size \code{numeric} shrinkage text size
#' @param color \code{character} shrinkage text color
#' @param vjust \code{numeric} shrinkage position vertical adjustment
#' @param hjust \code{numeric} shrinkage position horizontal adjustment
#' @param ... any other parameter
#' @return \code{pmxShrinkClass} object (\code{list})
#' @export
pmx_shrink <- function(fun = c("var", "sd"),
size = 1,
color = "green",
vjust = 1.5,
hjust = 0.5,
...) {
checkmate::assert_character(x = fun, any.missing=FALSE)
checkmate::assert_numeric(x = size, len = 1, any.missing=FALSE)
checkmate::assert_character(x = color, len = 1, any.missing=FALSE)
checkmate::assert_numeric(x = vjust, len = 1, any.missing=FALSE)
checkmate::assert_numeric(x = hjust, len = 1, any.missing=FALSE)
if(length(fun) > 1) {fun <- "var"}
res <- list(
fun = fun,
size = size,
color = color,
vjust = vjust,
hjust = hjust,
...
)
structure(res, class = c("list", "pmxShrinkClass"))
}
#' Performs checks of names in shrink list
#'
#' @param shrink_list \code{list} list of shrink arguments
#' @export
check_shrink <- function(shrink_list) {
arg_names <- c("fun", "size", "color", "vjust", "hjust")
checkmate::checkNames(names(shrink_list), must.include = arg_names)
}
#' Create a new plot of the desired type
#'
#' @param ctr \code{pmxClass} controller object
#' @param ptype plot type can be:
##' \itemize{
##' \item{"IND"}{ Individual plot type: \code{\link{individual}} }
##' \item{"DIS"}{ Distribution plot type : \code{\link{distrib}}}
##' \item{"SCATTER"}{ Residual plot type :\code{\link{residual}}}
##' }
##' @param pname plot name, if missing it will be created using function aestetics
##' @param filter optional filter which will be applied to plotting data
##' @param strat.facet \code{formula} define categorical stratification as formula
##' @param strat.color \code{character}
##' @param trans \code{list}{transformation operator}
##' @param color.scales \code{list} can be used with strat.color to set scale_color_manual
##' @param use.defaults \code{logical} if FALSE do not use defaults defined in yaml init files
#' @param ... other plot parameters to configure \code{\link{pmx_gpar}}.
#'
#' @family pmxclass
#' @return invisible ctr object
#' @export
set_plot <- function(
ctr,
ptype = c(
"IND", "DIS", "SCATTER", "ETA_PAIRS",
"ETA_COV", "PMX_QQ", "VPC", "PMX_DENS"
),
pname,
use.defaults = TRUE,
filter = NULL,
strat.color = NULL,
strat.facet = NULL,
color.scales = NULL,
trans = NULL, ...) {
assert_that(is_pmxclass(ctr))
ptype <- match.arg(ptype)
assert_that(is_string_or_null(pname))
assert_that(is_string_or_null(strat.color))
assert_that(is_string_or_formula_or_null(strat.facet))
params <- list(...)
if (use.defaults) {
defaults_yaml <-
file.path(system.file(package = "ggPMX"), "init", "defaults.yaml")
defaults <- yaml.load_file(defaults_yaml)
names(defaults) <- tolower(names(defaults))
def <- if (tolower(ptype) %in% names(defaults)) {
defaults[[tolower(ptype)]]
} else {
if (ptype == "DIS") {
if (params$type == "hist") {
defaults[["dis_hist"]]
} else {
defaults[["dis_box"]]
}
}
}
if (!is.null(def)) {
params <- l_left_join(def, params)
params$ptype <- NULL
}
}
if (ptype == "VPC") {
params$dv <- ctr$sim$dv
params$idv <- ctr$sim$idv
}
conf <-
switch(ptype,
IND = do.call(individual, params),
DIS = if (ctr$has_re) do.call(distrib, params),
SCATTER = do.call(residual, params),
ETA_PAIRS = if (ctr$has_re) do.call(eta_pairs, params),
ETA_COV = if (ctr$has_re) do.call(eta_cov, params),
PMX_QQ = do.call(pmx_qq, params),
PMX_DENS = do.call(pmx_dens, params),
VPC = do.call(pmx_vpc, params)
)
if (!is.null(substitute(filter))) {
filter <- deparse(substitute(filter))
filter <- local_filter(filter)
}
if (!is.null(conf)) {
conf[["filter"]] <- filter
conf[["trans"]] <- trans
if (!is.null(strat.color)) conf[["strat.color"]] <- strat.color
if (!is.null(strat.facet)) conf[["strat.facet"]] <- strat.facet
if (!is.null(color.scales)) conf$gp[["color.scales"]] <- color.scales
ctr[["config"]][["plots"]][[toupper(pname)]] <-
c(ptype = ptype, list(...))
ctr$add_plot(conf, pname)
}
invisible(ctr)
}
#' update or add a new abbreviation
#'
#' @param ctr \code{pmxClass} controller object
#' @param ... Options to set or add, with the form \code{name = value}.
#' @export
#' @examples
#' \donttest{
#' ctr <- theophylline()
#' ctr %>% set_abbrev("new_param" = "new value")
#' ctr %>% get_abbrev("new_param")
#' }
set_abbrev <- function(ctr, ...) {
assert_that(is_pmxclass(ctr))
abbrev <- if (length(ctr$abbrev) > 0) {
l_left_join(ctr$abbrev, list(...))
} else {
unlist(list(...), recursive = FALSE)
}
class(abbrev) <- c("abbreviation", "list")
ctr$abbrev <- abbrev
for (plot_name in (ctr %>% plot_names())){
get_plot_param(ctr, plot_name)
}
}
#' S3 print abbreviation
#' @param x object of class configs
#' @param ... pass additional options (not used presently)
#' @return print abbreviation
#' @export
print.abbreviation <- function(x, ...) {
assert_that(inherits(x, "abbreviation"))
for (i in seq_len(length(x))) {
cat(sprintf("%s : %s \n", names(x)[i], x[[i]]))
}
}
#' Get abbreviation definition by key
#'
#' @param param abbreviation term
#' @param ctr \code{pmxClass} controller
#'
#' @return \code{character} abbreviation definition
#' @export
get_abbrev <- function(ctr, param) {
keys <- ctr$abbrev
if (missing(param)) {
keys
} else {
if (!is.null(keys[[param]])) keys[[param]] else param
}
}
#' Get plot object
#'
#' @param ctr \code{pmxClass} controller object
#' @param nplot character the plot name
#' @param which_pages integer vector (can be length 1), set page number in case of multi pages plot, or character "all" to plot all pages.
#'
#' @family pmxclass
#' @return ggplot object
#' @export
#' @examples
#' \donttest{
#' library(ggPMX)
#' ctr <- theophylline()
#' p1 <- ctr %>% get_plot("iwres_ipred")
#' ## get all pages or some pages
#' p2 <- ctr %>% get_plot("individual")
#' ## returns one page of individual plot
#' p2 <- ctr %>% get_plot("individual", which_pages = 1)
#' p3 <- ctr %>% get_plot("individual", which_pages = c(1, 3))
#' ## get distribution plot
#' pdistri <- ctr %>% get_plot("eta_hist")
#' }
#'
get_plot <- function(ctr, nplot, which_pages = "all") {
if (is.numeric(which_pages)) {
which_pages <- as.integer(which_pages)
}
assert_that(is_pmxclass(ctr))
assert_that(is_string(nplot))
assert_that(is.integer(which_pages) || ((length(which_pages) == 1L) && (which_pages == "all")))
nplot <- tolower(nplot)
assert_that(is_valid_plot_name(nplot, plot_names(ctr)))
xx <- ctr$get_plot(nplot)
if((length(which_pages) == 1L) && which_pages == "all") {
which_pages <- NULL
}
if (is.function(xx)) {
xx(which_pages)
} else {
xx
}
}
#' Get plot names
#'
#' @param ctr \code{pmxClass} controller object
#'
#' @family pmxclass
#' @return list of plot names
#' @export
plot_names <- function(ctr) {
assert_that(is_pmxclass(ctr))
ctr$plots()
}
#' Get plots description
#'
#' @param ctr \code{pmxClass} controller object
#'
#' @family pmxclass
#' @return data.frame of plots
#' @export
plots <- function(ctr) {
existsF <- function(...) do.call("existsFunction", list(...))
assert_that(is_pmxclass(ctr))
x <- ctr$config
function_name <- function(nn) {
fn <- sprintf("pmx_plot_%s", nn)
if (!existsF(fn, where = asNamespace("ggPMX"))) {
fn <- sprintf("pmx_plot('%s',...)", nn)
}
fn
}
if (exists("plots", x)) {
pp <- x$plots
names(pp) <- tolower(names(pp))
pp <- pp[ctr$plots()]
data.table(
plot_name = names(pp),
plot_type = sapply(pp, "[[", "ptype"),
plot_function = sapply(names(pp), function_name)
)
}
}
#' Get the plot config by name
#'
#' @param ctr the controller object
#' @param pname the plot name
#'
#' @family pmxclass
#' @return the config object
#' @export
#'
#' @examples
#' \donttest{
#' ctr <- theophylline()
#' ctr %>% set_plot("IND", pname = "indiv1")
#' ctr %>% get_plot_config("distr1")
#' }
get_plot_config <- function(ctr, pname) {
assert_that(is_pmxclass(ctr))
ctr$get_config(pname)
}
#' Get controller data set
#'
#' @param ctr the controller object
#' @param data_set the data set name
#'
#' @family pmxclass
#' @return a data.table of the named data set if available.
#' @export
get_data <- function(ctr, data_set = c(
"estimates", "predictions",
"eta", "finegrid", "input", "sim",
"individual"
)) {
assert_that(is_pmxclass(ctr))
## data_set <- match.arg(data_set)
if (data_set == "individual") data_set <- "IND"
if (data_set == "input") {
copy(ctr[["input"]])
} else {
copy(ctr[["data"]][[data_set]])
}
}
#' Set a controller data set
#'
#' @param ctr the controller object
#' @param ... a named list parameters (see example)
#' @inheritParams base::eval
#' @family pmxclass
#' @details
#' This function can be used to set an existing data set or to create a new one. The basic
#' idea is to change the built-in data set (change the factor level names, change some rows
#' values or apply any other data set operation) and use the new data set using the dname
#' parameter of pmx_plot family functions.
#' @examples
#' \donttest{
#' ctr <- theophylline()
#' dx <- ctr %>% get_data("eta")
#' dx <- dx[, EFFECT := factor(
#' EFFECT,
#' levels = c("ka", "V", "Cl"),
#' labels = c("Concentration", "Volume", "Clearance")
#' )]
#' ## update existing data set
#' ctr %>% set_data(eta = dx)
#' ## or create a new data set
#' ctr %>% set_data(eta_long = dx)
#' }
#' @export
set_data <- function(ctr, ..., envir=parent.frame()) {
assert_that(is_pmxclass(ctr))
params <- as.list(match.call(expand.dots = TRUE))[-c(1, 2)]
if (!nzchar(names(params))) {
stop("each data set should be well named")
}
invisible(Map(function(n, v) ctr$data[[n]] <- eval(v, envir=envir), names(params), params))
}
#' Get category covariates
#'
#' @param ctr the controller object
#'
#' @family pmxclass
#' @return a charcater vector
#' @export
get_cats <- function(ctr) {
assert_that(is_pmxclass(ctr))
ctr$cats
}
#' Get extra stratification variables
#'
#' @param ctr the controller object
#'
#' @family pmxclass
#' @return a charcater vector
#' @export
get_strats <- function(ctr) {
assert_that(is_pmxclass(ctr))
ctr$strats
}
#' Get covariates variables
#'
#' @param ctr the controller object
#'
#' @family pmxclass
#' @return a charcater vector
#' @export
get_covariates <- function(ctr) {
assert_that(is_pmxclass(ctr))
res <- unique(c(ctr$cats, ctr$conts))
res[nzchar(res)]
}
#' Get continuous covariates
#'
#' @param ctr the controller object
#'
#' @family pmxclass
#' @return a charcater vector
#' @export
get_conts <- function(ctr) {
assert_that(is_pmxclass(ctr))
ctr$conts
}
#' Get controller occasional covariates
#'
#' @param ctr the controller object
#'
#' @family pmxclass
#' @return a charcater vector
#' @export
get_occ <- function(ctr) {
assert_that(is_pmxclass(ctr))
ctr$occ
}
# pmxSource (R6 Class) ------------------------------------------------------------
#' @importFrom R6 R6Class
pmxClass <- R6::R6Class(
"pmxClass",
# Private methods ------------------------------------------------------------
private = list(
.data_path = "",
.input_path = "",
.covariates = NULL,
.plots = list(),
.plots_configs = list()
),
# Public methods -------------------------------------------------------------
public = list(
data = NULL, config = NULL, input = NULL,
input_file = NULL, dv = NULL, dvid = NULL, cats = NULL, conts = NULL, occ = NULL,
strats = NULL, settings = NULL, has_re = FALSE, re = NULL,
abbrev = list(), endpoint = NULL, warnings = list(),
footnote = FALSE, save_dir = NULL,
report_queue = list(),
report_n = 0,
plot_file_name = "",
sim = NULL,
bloq = NULL,
id = NULL,
time = NULL,
sim_blq = FALSE,
initialize = function(data_path, input, dv, config, dvid, cats, conts, occ, strats, settings, endpoint, sim, bloq, id, time,sim_blq)
pmx_initialize(self, private, data_path, input, dv, config, dvid, cats, conts, occ, strats, settings, endpoint, sim, bloq, id, time,sim_blq),
print = function(data_path, config, ...)
pmx_print(self, private, ...),
enqueue_plot = function(pname) {
self$report_n <- self$report_n + 1
pname_file <- paste0(pname, "-", self$report_n)
self$plot_file_name <- pname_file
self$report_queue <- c(self$report_queue, pname_file)
},
dequeue_plot = function() pmx_dequeue_plot(self),
# Operations ---------------------------------------------------------------
add_plot = function(x, pname)
pmx_add_plot(self, private, x, pname),
update_plot = function(pname, strat.facet = NULL, strat.color = NULL,
filter = NULL, trans = NULL,
..., pmxgpar = NULL) {
pmx_update_plot(
self, private, pname,
strat.color = strat.color, strat.facet = strat.facet,
filter, trans, ..., pmxgpar = pmxgpar
)
},
remove_plot = function(pname, ...)
pmx_remove_plot(self, private, pname, ...),
get_config = function(pname)
pmx_get_config(self, private, pname),
set_config = function(pname, new)
pmx_set_config(self, private, pname, new),
get_plot = function(pname)
pmx_get_plot(self, private, pname),
plots = function()
pmx_plots(self, private),
post_load = function()
pmx_post_load(self, private)
)
)
pmx_initialize <- function(self, private, data_path, input, dv,
config, dvid, cats, conts, occ, strats,
settings, endpoint, sim, bloq, id, time, sim_blq) {
DVID <- ID <- NULL
if (missing(data_path) || missing(data_path)) {
stop(
"Expecting source path(directory ) and a config path",
call. = FALSE
)
}
if (missing(dvid)) dvid <- NULL
if (any(missing(occ) | is.null(occ) | is.na(occ))) occ <- ""
if (any(missing(cats) | is.null(cats) | is.na(cats))) cats <- ""
if (any(missing(conts) | is.null(conts) | is.na(conts))) conts <- ""
if (any(missing(strats) | is.null(strats) | is.na(strats))) strats <- ""
if (missing(settings)) settings <- NULL
if (missing(bloq)) bloq <- NULL
if (missing(id)) id <- NULL
if (missing(time)) time <- NULL
if (missing(sim_blq)) sim_blq <- FALSE
private$.data_path <- data_path
self$save_dir <- data_path
if (is.character(input)) {
private$.input_path <- input
}
self$config <- config
self$dv <- dv
self$dvid <- dvid
self$cats <- cats
self$conts <- conts
self$occ <- toupper(occ)
self$strats <- strats
self$settings <- settings
self$bloq <- bloq
self$id <- id
self$time <- time
self$sim_blq <- sim_blq
if (!is.null(endpoint) && is.atomic(endpoint)) {
endpoint <- pmx_endpoint(code = as.character(endpoint))
}
self$endpoint <- endpoint
if (is.character(input) && file.exists(input)) {
self$input_file <- input
self$input <- read_input(input, self$dv, self$dvid, self$cats, self$conts, self$strats, self$occ, self$endpoint, self$id, self$time)
} else {
if (!inherits(input, "data.frame")) {
stop("observation data should be either a file or a data.frame")
}
self$input <- setDT(input)
}
# Always add isobserv to address issue #235
self$input$isobserv <- "accepted"
self[["data"]] <- load_source(
sys = config[["sys"]],
private$.data_path,
self[["config"]][["data"]],
dvid = self[["dvid"]],
endpoint = self[["endpoint"]],
occ = self$occ,
id = self$id
)
if (!is.null(self$data[["eta"]])) {
re <- grep("^eta_(.*)_(mode|mean)", names(self$data[["eta"]]), value = TRUE)
if (length(re) > 0) {
self$has_re <- TRUE
self$re <- gsub("^eta_(.*)_(mode|mean)", "\\1", re)
self$data[["eta"]] <-
post_load_eta(
self$data[["eta"]],
self$input, self$sys, self$occ
)
}
}
self$post_load()
# Replace some column names of sim_blq with ggPMX naming convention
if(!is.null(self[["data"]][["sim_blq_y"]])){
yname <- names(self[["data"]][["sim_blq_y"]])[grep("simBlq", names(self[["data"]][["sim_blq_y"]]))]
yname <- gsub("mode|mean|simBlq|_", "", yname)
# Some cases dv and xx_simBlq are not the same
suppressWarnings(
if(self[["dv"]] == yname) {
self[["data"]][["sim_blq_y"]] <-
self[["data"]][["sim_blq_y"]][,c("NPDE","IWRES", paste(dv)) := NULL]
names(self[["data"]][["sim_blq_y"]]) <-
gsub("mode|mean|simBlq|_","", names(self[["data"]][["sim_blq_y"]]))
self[["data"]][["sim_blq_y"]][["DV"]] <-
self[["data"]][["sim_blq_y"]][[paste(dv)]]
} else {
self[["data"]][["sim_blq_y"]] <-
self[["data"]][["sim_blq_y"]][,c("NPDE","IWRES") := NULL]
names(self[["data"]][["sim_blq_y"]]) <-
gsub("mode|mean|simBlq|_","", names(self[["data"]][["sim_blq_y"]]))
self[["data"]][["sim_blq_y"]][["DV"]] <-
self[["data"]][["sim_blq_y"]][[yname]]
}
)
#rename npde and iwRes to NPDE and IWRES
place_vec <- which(
names(self$data$sim_blq_y) == "npde" |
names(self$data$sim_blq_y) == "iwRes"
)
names(self$data$sim_blq_y)[place_vec] <-
toupper(names(self$data$sim_blq_y)[place_vec])
# Needed same treatment for "sim_blq" as for "sim_blq_y"
if(!is.null(self[["data"]][["sim_blq"]])){
# In some cases xx and xx_simBlq are not the same
suppressWarnings({
for(cn in c("iwRes", "pwRes", "npde")) {
if(paste0(cn, "_mode_simBlq") %in% colnames(self[["data"]][["sim_blq"]])) {
self[["data"]][["sim_blq"]][[toupper(cn)]] <-
self[["data"]][["sim_blq"]][[paste0(cn, "_mode_simBlq")]]
}
}
})
}
# Needed same treatment for "sim_blq_npde_iwres" as for "sim_blq_y"
if(!is.null(self[["data"]][["sim_blq_npde_iwres"]])){
#rename npde and iwRes to NPDE and IWRES
place_vec <- which(
names(self$data$sim_blq_npde_iwres) == "npde" |
names(self$data$sim_blq_npde_iwres) == "iwRes"
)
names(self$data$sim_blq_npde_iwres)[place_vec] <-
toupper(names(self$data$sim_blq_npde_iwres)[place_vec])
}
} else if ((self$config$sys == "mlx18") && (self$sim_blq == TRUE)) {
# give message if new version of monolix, otherwise sim_blq cannot be loaded anyway
message("`sim_blq` dataset could not be generated, `sim_blq_npde_iwres` or `sim_blq_y` is missing")
}
if (!is.null(sim)) {
dx <- sim[["sim"]]
inn <- copy(self$input)[, self$dv := NULL]
# check for unique keys in the observation variables
if (sum(duplicated(inn[, c("ID", "TIME"), with = FALSE])) > 0) {
warning(
paste(
" Different covariates for the same patient same time point\n",
"--> Duplicated created in the vpc data set."
),
call. = FALSE
)
}
self$data[["sim"]] <- merge_dx_inn_by_id_time(dx, inn, config$sys)
self$sim <- sim
}
if (config$sys == "nlmixr") {
self$data$predictions <- input
self$data$IND <- if (!is.null(config$finegrid)) config$finegrid else input
self$data$eta <- config$eta
self$data$omega <- config$omega
self$has_re <- TRUE
}
if (config$sys == "nm") {
self$data$predictions <- input
self$data$IND <- if (!is.null(config$finegrid)) config$finegrid else input
self$data$eta <- config$eta
self$data$omega <- config$omega
self$has_re <- TRUE
self$bloq <- bloq
self$data$estimates <- config$parameters
} else if (config$sys == "nlmixr") {
self$data$estimates <- config$parameters
}
## abbrev
keys_file <- file.path(
system.file(package = "ggPMX"), "init", "abbrev.yaml"
)
self$abbrev <- set_abbrev(self, yaml.load_file(keys_file))
## create all plots
for (nn in names(self$config$plots)) {
x <- self$config$plots[[nn]]
x$pname <- tolower(nn)
x$use.defaults <- FALSE
do.call(set_plot, c(ctr = self, x))
}
}
#' @importFrom knitr kable
pmx_print <- function(self, private, ...) {
cat("\npmx object:\n")
paste_col <- function(n, x) if (all(x != "")) c(n, paste(x, collapse = ","))
ctr_table <-
rbind(
c(
"working directory",
basename(dirname(private$.data_path))
),
c("Modelling input file", basename(private$.input_path)),
c("dv", self$dv),
c("dvid", self$dvid),
paste_col("cats", self %>% get_cats()),
paste_col("conts", self %>% get_conts()),
paste_col("strats", self %>% get_strats())
)
colnames(ctr_table) <- c("PARAM", "VALUE")
print(kable(ctr_table))
print(self$config, ctr = self, plot_names = names(private$.plots))
}
pmx_transform <- function(x, dx, trans, direction) {
if (is.character(trans)) {
params <- strsplit(trans, "_")[[1]]
trans <- params[1]
direction <- params[2]
}
cols_res <- function(x) {
with(x, {
switch(
direction,
x = aess$x,
y = aess$y,
xy = c(aess$x, aess$y)
)
})
}
cols_ind <- function(x) {
switch(
direction,
x = "TIME",
y = c("PRED", "IPRED", "DV"),
xy = c("TIME", "PRED", "IPRED", "DV")
)
}
cols_dis <- function(x) {
switch(
direction,
x = c("VALUE"),
y = c("VALUE"),
xy = c("VALUE")
)
}
cols_qq <- function(x) {
switch(
direction,
x = x$x
)
}
cols_eta_conts <- function(x) {
switch(
direction,
y = "VALUE"
)
}
cols <- switch(
x[["ptype"]],
SCATTER = cols_res(x),
IND = cols_ind(x),
DIS = cols_dis(x),
PMX_QQ = cols_qq(x),
ETA_COV = cols_eta_conts(x)
)
cols <- intersect(cols, names(dx))
if (length(cols) > 0) {
fun <- match.fun(trans)
dx[, (cols) := lapply(.SD, fun), .SDcols = (cols)]
}
dx
}
pmx_remove_plot <- function(self, private, pname, ...) {
private$.plots_configs[[pname]] <- NULL
private$.plots[[pname]] <- NULL
invisible(self)
}
pmx_get_config <- function(self, private, pname) {
pname <- tolower(pname)
private$.plots_configs[[pname]]
}
pmx_set_config <- function(self, private, pname, new) {
pname <- tolower(pname)
private$.plots_configs[[pname]] <- new
}
pmx_dequeue_plot <- function(self) {
## assert_that(is_none_empty_queue(self))
if (length(self$report_queue)) {
first <- self$report_queue[[1]]
self$report_queue <- self$report_queue[-1]
first
} else {
message("Warning: Chunk has plots that were not registered within ggPMX. Footnotes may be wrong.")
}
}
pmx_fig_process_init <- function(self) {
report_queue <- list()
report_n <- 0
}
pmx_fig_process_wrapup <- function(self) {
assert_that(is_empty_queue(self))
}
pmx_get_plot <- function(self, private, pname) {
pname <- tolower(pname)
private$.plots[[pname]]
}
pmx_plots <- function(self, private) {
names(private$.plots)
}
pmx_post_load <- function(self, private) {
res <- post_load(
self$data, self$input, self$config$sys,
self$config$plots,
occ = get_occ(self)
)
self$data <- res$data
self$warnings <- res$warnings
}
#' Print pmxClass object
#'
#' @param x pmxClass object
#' @param ... additinal arguments to pass to print
#'
#' @family pmxclass functions
#' @return print object to screen
#' @export
print.pmxClass <- function(x, ...) {
x$print(...)
}
#' Creates a deep copy of the controller
#'
#' @param ctr \code{pmxClass} object
#' @param keep_globals \code{logical} if TRUE we keep the global parameters changed by pmx_settings
#' @param ... extra parameters passed to \code{pmx_settings}
#'
#' @return an object of \code{pmxClass}
#' @export
#' @details
#'
#' The controller is an `R6` object, it behaves like a reference object.
#' Some functions ( methods) can have a side effect on the controller and modify it internally.
#' Technically speaking we talk about chaining not piping here. However ,
#' using \code{pmx_copy} user can work on a copy of the controller.
#'
#' By default the copy does not keep global parameters set using pmx_settings.
#'
#' @examples
#' \donttest{
#' ctr <- theophylline()
#' cctr <- ctr %>% pmx_copy()
#' ## Any change in the ctr has no side effect in the ctr and vice versa
#' }
pmx_copy <- function(ctr, keep_globals = FALSE, ...) {
assert_that(is_pmxclass(ctr))
cctr <- ctr$clone()
params <- get_params_from_call()
## params <- list(...)
if (!keep_globals) {
nn <- rev(names(formals(pmx_settings)))[-1]
eff_nn <- intersect(nn, names(params))
settings <- l_left_join(ctr$settings, params[eff_nn])
if (length(eff_nn) > 0) {
cctr$settings <- do.call(pmx_settings, settings)
}
}
cctr
}
|
/R/pmxClass.R
|
no_license
|
ggPMXdevelopment/ggPMX
|
R
| false
| false
| 40,224
|
r
|
#' Update plot
#' @param ctl controller
#' @param pname parameter name
#' @param defaults_ defaults of the parameters
#' @param ... other arguments
#' @noRd
update_container_plots <- function(ctr, pname, defaults_, ...){
stopifnot(is_pmxclass(ctr))
if (!pname %in% (ctr %>% plot_names())) {return(NULL)}
pmx_update(ctr, pname, strat.color=NULL, strat.facet=NULL, color.scales=NULL,
filter=NULL, trans=NULL, l_left_join(defaults_, list(...)), pmxgpar=NULL
)
}
#' Create parameters for plot updating
#' @param ctr controller
#' @param pname parameter name
#' @noRd
get_plot_param <- function(ctr, pname){
params <- as.list(match.call(expand.dots = TRUE))[-1]
if ((pname == "iwres_dens") || (pname == "pmx_vpc")) {
params[["is.smooth"]] <- FALSE
}
params[["ctr"]] <- ctr
params[["pname"]] <- pname
params <- lang_to_expr(params)
params$defaults_ <- ctr$config$plots[[toupper(pname)]]
if (!exists("bloq", params) && !is.null(ctr$bloq)) {
params$defaults_[["bloq"]] <- ctr$bloq
}
# Check that x or y labels for updating exist (else plot updating will not work)
if ((!is.null(params$defaults_$labels$x)) || (!is.null(params$defaults_$labels$y))){
# Check if labels$x exists in added abbreviations; if not set default labels$x
if ((!is.null(params$defaults_$labels$x)) &&
(!(params$defaults_$labels$x %in% names(params$ctr$abbrev)))){
params$ctr$abbrev[params$defaults_$labels$x] <- params$defaults_$labels$x
}
# check if labels$y exists in added abbreviations; if not set default labels$y
if ((!is.null(params$defaults_$labels$y)) &&
(!(params$defaults_$labels$y %in% names(params$ctr$abbrev)))){
params$ctr$abbrev[params$defaults_$labels$y] <- params$defaults_$labels$y
}
do.call(update_container_plots, params)
}
}
#' Create simulation object
#'
#' @param file \code{character} path to the simulation file
#' @param data \code{data.table} simulation data
#' @param irun \code{character} name of the simulation column
#' @param idv \code{character} name of the ind. variable
#' @export
#' @example inst/examples/vpc.R
pmx_sim <- function(
file,
data,
irun,
idv) {
ID <- NULL
if (missing(data)) data <- NULL
if (missing(idv)) idv <- "TIME"
if (!missing(file) && file.exists(file)) sim <- pmx_fread(file)
if (!is.null(data) && is.data.table(data)) sim <- data
if (is.data.table(sim)) {
if (tolower(idv) == "time") {
idvn <- names(sim)[tolower(names(sim)) == "time"]
setnames(sim, idvn, "TIME")
idv <- "TIME"
}
id_col <- grep("^id$", names(sim), ignore.case = TRUE, value = TRUE)
setnames(sim, id_col, "ID")
obj <- list(
sim = sim,
idv = idv,
irun = irun
)
structure(obj, class = c("pmxSimClass", "list"))
}
}
check_argument <- function(value, pmxname) {
call <- match.call()
if (any(missing(value) | is.null(value))) {
stop(
sprintf(
"Please set a %s argument",
deparse(call$value), pmxname
)
)
}
value
}
#' Create a pmx object
#'
#' Create a pmx object from a data source
#' @param config Can be either :
#' The complete path for the configuration file, the name of configuration within the built-in
#' list of configurations, or a configuration object.
#' @param sys the system name can "mlx" (for Monolix 2016) or "mlx18" (for Monolix 2018/19 and later)
#' @param directory \code{character} modelling output directory.
#' @param input \code{character} complete path to the modelling input file
#' @param dv \code{character} the name of measurable variable used in the input modelling file
#' @param dvid \emph{[Optional]} \code{character} observation type parameter. This is mandatory
#' in case of multiple endpoint (PKPD).
#' @param cats \emph{[Optional]}\code{character} vector of categorical covariates
#' @param conts \emph{[Optional]}\code{character} vector of continuous covariates
#' @param occ \emph{[Optional]}\code{character} occasional covariate variable name
#' @param strats \emph{[Optional]}\code{character} extra stratification variables
#' @param settings \emph{[Optional]}\code{pmxSettingsClass} \code{\link{pmx_settings}}
#' shared between all plots
#' @param endpoint \code{pmxEndpointClass} or \code{integer} or \code{charcater} default to NULL
#' of the endpoint code. \code{\link{pmx_endpoint}}
#' @param sim \code{pmxSimClass} default to NULL. \code{\link{pmx_sim}} used for VPC, e.g.: sim = pmx_sim(file=vpc_file, irun="rep",idv="TIME")
#' @param bloq \code{pmxBLOQClass} default to NULL. \code{\link{pmx_bloq}} specify bloq, within controller: e.g. bloq=pmx_bloq(cens = "BLOQ_name", limit = "LIMIT_name")
#' @param sim_blq \code{logical} if TRUE uses sim_blq values for plotting. Only for Monolix 2018 and later.
#' @param id \emph{[Optional]} \code{character} the name of Indvidual variable used in the input modelling file
#' @param time \emph{[Optional]} \code{character} Time variable.
#' @return \code{pmxClass} controller object.
#' @export
#' @example inst/examples/controller.R
pmx <- function(config, sys = "mlx", directory, input, dv, dvid, cats = NULL, conts = NULL, occ = NULL, strats = NULL,
settings = NULL, endpoint = NULL, sim = NULL, bloq = NULL,id=NULL,time=NULL, sim_blq = NULL) {
directory <- check_argument(directory, "work_dir")
ll <- list.files(directory)
input <- check_argument(input, "input")
if (missing(cats)) cats <- ""
if (missing(sim)) sim <- NULL
if (missing(endpoint)) {
endpoint <- NULL
}
if (missing(config)) config <- "standing"
assert_that(is_character_or_null(cats))
if (missing(conts)) conts <- ""
assert_that(is_character_or_null(conts))
if (missing(occ)) occ <- ""
assert_that(is_character_or_null(occ))
if (missing(strats)) strats <- ""
assert_that(is_character_or_null(strats))
if (missing(sim_blq)) sim_blq <- FALSE
if (missing(dv)) dv <- "DV"
if (missing(dvid)) dvid <- "DVID"
if (!inherits(config, "pmxConfig")) {
if ("populationParameters.txt" %in% list.files(directory)) sys <- "mlx18"
else{
is_mlx <- list.files(directory,pattern="txt$")
if(length(is_mlx)==0){
stop(
sprintf(
"%s is not valid directory results path: please set a valid directory argument",
directory
)
)
}
}
config <- load_config(config, sys)
}
if (missing(settings)) settings <- pmx_settings()
if (!inherits(settings, "pmxSettingsClass")) {
settings <- pmx_settings()
}
if (missing(bloq)) bloq <- NULL
assert_that(inherits(bloq, "pmxBLOQClass") || is.null(bloq))
pmxClass$new(directory, input, dv, config, dvid, cats, conts, occ, strats, settings, endpoint, sim, bloq,id,time, sim_blq)
}
#' @rdname pmx
#' @details
#' \code{pmx_mlx} is a wrapper to mlx for the MONOLIX system ( \code{sys="mlx"})
#' @export
pmx_mlx <-
function(config, directory, input, dv, dvid, cats, conts, occ, strats, settings, endpoint, sim, bloq,id, time, sim_blq) {
pmx(config, "mlx", directory, input, dv, dvid, cats, conts, occ, strats, settings, endpoint, sim, bloq,id,time, sim_blq)
}
#' Create a controller from mlxtran file
#'
#' @param file_name \code{character} mlxtran file path.
#' @param call \code{logical} if TRUE the result is the parameters parsed
#' @param version \code{integer} Non-negative integer. Non-obligatory option, if you don't use a wildcard in the file_name.
#' Otherwise you MUST provide version and wildcard will be substituted with "version", which represents the mlxtran model version.
#' @param ... extra arguments passed to pmx_mlx.
#' @rdname pmx
#'
#' @export
#' @details
#'
#' \code{pmx_mlxtran} parses mlxtran file and guess \code{\link{pmx_mlx}} arguments. In case of
#' multi endpoint the first endpoint is selected. You can though set the endpoint through the same argument.
#' When you set \code{call=TRUE},no controller is created but only the parameters parsed
#' by mlxtran. This can be very helpful, in case you would like to customize parameters
#' (adding settings vi pmx_settings, chnag eth edefault endpoint.)
pmx_mlxtran <- function(file_name, config = "standing", call = FALSE, endpoint, version = -1, ...) {
# Substituting * with version in file_name
if (grepl("*", file_name, fixed = TRUE)) {
assert_that(version>=0, msg = "Using wildcard in file_name assume providing non-negative version")
file_name <- gsub("*", version, file_name, fixed = TRUE)
}
params <- parse_mlxtran(file_name)
rr <- as.list(match.call()[-1])
rr$file_name <- NULL
params <- append(params, rr)
if (!exists("config",params)) params$config <- config
if (!missing(endpoint)) {
params$endpoint <- NULL
params$endpoint <- endpoint
}
if (call) {
params$call <- NULL
return(params)
}
params$call <- NULL
# We don't need to pass version to pmx_mlx
params$version <- NULL
do.call(pmx_mlx, params)
}
formula_to_text <- function(form) {
if (is.formula(form)) {
paste(as.character(as.list(form)[-1]), collapse = " and ")
} else {
form
}
}
#' Create controller global settings
#' @param is.draft \code{logical} if FALSE any plot is without draft annotation
#' @param use.abbrev \code{logical} if FALSE use full description from abbreviation mapping for axis names
#' @param color.scales \code{list} list containing elements of scale_color_manual
#' @param use.labels \code{logical} if TRUE replace factor named by cats.labels
#' @param cats.labels \code{list} list of named vectors for each factor
#' @param use.titles \code{logical} FALSE to generate plots without titles
#' @param effects \code{list} list of effects levels and labels
#' @param ... extra parameter not used yet
#' @return pmxSettingsClass object
#' @example inst/examples/pmx-settings.R
#' @export
pmx_settings <-
function(is.draft = TRUE, use.abbrev = TRUE, color.scales = NULL,
cats.labels = NULL, use.labels = FALSE, use.titles = TRUE,
effects = NULL,
...) {
checkmate::assert_logical(x=is.draft, len=1, any.missing=FALSE)
checkmate::assert_logical(x=use.abbrev, len=1, any.missing=FALSE)
checkmate::assert_logical(x=use.labels, len=1, any.missing=FALSE)
checkmate::assert_logical(x=use.titles, len=1, any.missing=FALSE)
if (!missing(effects) && !is.null(effects)) {
if (!is.list(effects)) stop("effects should be a list")
if (!exists("levels", effects) || !exists("labels", effects)) {
stop("effects should be a list that contains levels and labels")
}
if (length(effects$labels) != length(effects$levels)) {
stop("effects should be a list that contains levels and labels have the same length")
}
}
res <- list(
is.draft = is.draft,
use.abbrev = use.abbrev,
color.scales = color.scales,
use.labels = use.labels,
cats.labels = cats.labels,
use.titles = use.titles,
effects = effects
)
if (use.labels) {
res$labeller <- do.call("labeller", cats.labels)
}
structure(
res, ...,
class = "pmxSettingsClass"
)
}
#' Creates pmx endpoint object
#'
#' @param code \code{character} endpoint code : used to filter observations DVID==code.
#' @param label \code{character} endpoint label: used to set title and axis labels
#' @param unit \code{character} endpoint unit : used to set title and axis labels
#' @param file.code \code{character} endpoint file code : used to set predictions and finegrid \cr
#' files extensions in case using code parameter is not enough.
#' @param trans \code{list} Transformation parameter not used yet.
#' @export
#'
#' @example inst/examples/endpoint.R
#' @details
#' In case of multiple endpoints, pkpd case for example, we need to pass endpoint to the pmx call.
#' Internally , ggPMX will filter the observations data set to keep only rows satisfying \code{DVID==code}.
#' The \code{code} is also used to find the right predictions and or fingrid files.
#' ggPMX use the configuration file to fine the path of the predictions file
#' (like the single endpoint case) and then filter the right file using the code parameter. \cr
#' For example:
#' \itemize{
#' \item predictions\{code\}.txt for mlx16
#' \item predictions\{code\}.txt and y\{code\}_residual for mlx18
#' }
#'
#' For some tricky examples the code parameter is not enough to find the files. In that case the
#' \code{file.code} parameter is used to distinguish the endpoint files.
pmx_endpoint <-
function(code,
label = "",
unit = "",
file.code = code,
trans = NULL) {
assert_that(is.character(code))
assert_that(is.character(file.code))
assert_that(is.character(unit))
assert_that(is.character(label))
assert_that(is_character_or_null(trans))
res <- list(
code = code,
label = label,
unit = unit,
file.code = file.code,
trans = trans
)
structure(
res,
class = "pmxEndpointClass"
)
}
#' Creates BLOQ object attributes
#'
#' @param cens \code{character} the censoring column name
#' @param limit \code{character} the limit column name (optional)
#' @param colour \code{character} the color of the geom
#' @param size \code{numeric} the size of the geom when using \code{geom_point()}
#' @param linewidth \code{numeric} the line width of the segment when using \code{geom_segment()}
#' @param alpha \code{numeric} the alpha of the geom
#' @param show \code{logical} if FALSE remove all censory observations
#' @param ... any other graphical parameter
#'
#' @export
#' @details
#' To define that a measurement is censored, the observation data set should include
#' a CENSORING column ( default to `CENS` ) and put 1 for lower limit or -1 for upper limit. \cr
#' Optionally, data set can contain have a limit column ( default to `LIMIT`) column to set the other limit.
pmx_bloq <-
function(
cens = "CENS",
limit = "LIMIT",
colour = "pink",
size = 2,
linewidth=1,
alpha = 0.9,
show = TRUE,
...) {
res <- list(
cens = cens,
limit = limit,
show = show,
colour = colour,
size = size,
linewidth = linewidth,
alpha = alpha,
...
)
structure(
res,
class = "pmxBLOQClass"
)
}
#' Create shrinkage parameter object
#' @param fun \code{list} shrinkage function can be \code{sd} or \code{var}
#' @param size \code{numeric} shrinkage text size
#' @param color \code{character} shrinkage text color
#' @param vjust \code{numeric} shrinkage position vertical adjustment
#' @param hjust \code{numeric} shrinkage position horizontal adjustment
#' @param ... any other parameter
#' @return \code{pmxShrinkClass} object (\code{list})
#' @export
pmx_shrink <- function(fun = c("var", "sd"),
size = 1,
color = "green",
vjust = 1.5,
hjust = 0.5,
...) {
checkmate::assert_character(x = fun, any.missing=FALSE)
checkmate::assert_numeric(x = size, len = 1, any.missing=FALSE)
checkmate::assert_character(x = color, len = 1, any.missing=FALSE)
checkmate::assert_numeric(x = vjust, len = 1, any.missing=FALSE)
checkmate::assert_numeric(x = hjust, len = 1, any.missing=FALSE)
if(length(fun) > 1) {fun <- "var"}
res <- list(
fun = fun,
size = size,
color = color,
vjust = vjust,
hjust = hjust,
...
)
structure(res, class = c("list", "pmxShrinkClass"))
}
#' Performs checks of names in shrink list
#'
#' @param shrink_list \code{list} list of shrink arguments
#' @export
check_shrink <- function(shrink_list) {
arg_names <- c("fun", "size", "color", "vjust", "hjust")
checkmate::checkNames(names(shrink_list), must.include = arg_names)
}
#' Create a new plot of the desired type
#'
#' @param ctr \code{pmxClass} controller object
#' @param ptype plot type can be:
##' \itemize{
##' \item{"IND"}{ Individual plot type: \code{\link{individual}} }
##' \item{"DIS"}{ Distribution plot type : \code{\link{distrib}}}
##' \item{"SCATTER"}{ Residual plot type :\code{\link{residual}}}
##' }
##' @param pname plot name, if missing it will be created using function aestetics
##' @param filter optional filter which will be applied to plotting data
##' @param strat.facet \code{formula} define categorical stratification as formula
##' @param strat.color \code{character}
##' @param trans \code{list}{transformation operator}
##' @param color.scales \code{list} can be used with strat.color to set scale_color_manual
##' @param use.defaults \code{logical} if FALSE do not use defaults defined in yaml init files
#' @param ... other plot parameters to configure \code{\link{pmx_gpar}}.
#'
#' @family pmxclass
#' @return invisible ctr object
#' @export
set_plot <- function(
ctr,
ptype = c(
"IND", "DIS", "SCATTER", "ETA_PAIRS",
"ETA_COV", "PMX_QQ", "VPC", "PMX_DENS"
),
pname,
use.defaults = TRUE,
filter = NULL,
strat.color = NULL,
strat.facet = NULL,
color.scales = NULL,
trans = NULL, ...) {
assert_that(is_pmxclass(ctr))
ptype <- match.arg(ptype)
assert_that(is_string_or_null(pname))
assert_that(is_string_or_null(strat.color))
assert_that(is_string_or_formula_or_null(strat.facet))
params <- list(...)
if (use.defaults) {
defaults_yaml <-
file.path(system.file(package = "ggPMX"), "init", "defaults.yaml")
defaults <- yaml.load_file(defaults_yaml)
names(defaults) <- tolower(names(defaults))
def <- if (tolower(ptype) %in% names(defaults)) {
defaults[[tolower(ptype)]]
} else {
if (ptype == "DIS") {
if (params$type == "hist") {
defaults[["dis_hist"]]
} else {
defaults[["dis_box"]]
}
}
}
if (!is.null(def)) {
params <- l_left_join(def, params)
params$ptype <- NULL
}
}
if (ptype == "VPC") {
params$dv <- ctr$sim$dv
params$idv <- ctr$sim$idv
}
conf <-
switch(ptype,
IND = do.call(individual, params),
DIS = if (ctr$has_re) do.call(distrib, params),
SCATTER = do.call(residual, params),
ETA_PAIRS = if (ctr$has_re) do.call(eta_pairs, params),
ETA_COV = if (ctr$has_re) do.call(eta_cov, params),
PMX_QQ = do.call(pmx_qq, params),
PMX_DENS = do.call(pmx_dens, params),
VPC = do.call(pmx_vpc, params)
)
if (!is.null(substitute(filter))) {
filter <- deparse(substitute(filter))
filter <- local_filter(filter)
}
if (!is.null(conf)) {
conf[["filter"]] <- filter
conf[["trans"]] <- trans
if (!is.null(strat.color)) conf[["strat.color"]] <- strat.color
if (!is.null(strat.facet)) conf[["strat.facet"]] <- strat.facet
if (!is.null(color.scales)) conf$gp[["color.scales"]] <- color.scales
ctr[["config"]][["plots"]][[toupper(pname)]] <-
c(ptype = ptype, list(...))
ctr$add_plot(conf, pname)
}
invisible(ctr)
}
#' update or add a new abbreviation
#'
#' @param ctr \code{pmxClass} controller object
#' @param ... Options to set or add, with the form \code{name = value}.
#' @export
#' @examples
#' \donttest{
#' ctr <- theophylline()
#' ctr %>% set_abbrev("new_param" = "new value")
#' ctr %>% get_abbrev("new_param")
#' }
set_abbrev <- function(ctr, ...) {
assert_that(is_pmxclass(ctr))
abbrev <- if (length(ctr$abbrev) > 0) {
l_left_join(ctr$abbrev, list(...))
} else {
unlist(list(...), recursive = FALSE)
}
class(abbrev) <- c("abbreviation", "list")
ctr$abbrev <- abbrev
for (plot_name in (ctr %>% plot_names())){
get_plot_param(ctr, plot_name)
}
}
#' S3 print abbreviation
#' @param x object of class configs
#' @param ... pass additional options (not used presently)
#' @return print abbreviation
#' @export
print.abbreviation <- function(x, ...) {
assert_that(inherits(x, "abbreviation"))
for (i in seq_len(length(x))) {
cat(sprintf("%s : %s \n", names(x)[i], x[[i]]))
}
}
#' Get abbreviation definition by key
#'
#' @param param abbreviation term
#' @param ctr \code{pmxClass} controller
#'
#' @return \code{character} abbreviation definition
#' @export
get_abbrev <- function(ctr, param) {
keys <- ctr$abbrev
if (missing(param)) {
keys
} else {
if (!is.null(keys[[param]])) keys[[param]] else param
}
}
#' Get plot object
#'
#' @param ctr \code{pmxClass} controller object
#' @param nplot character the plot name
#' @param which_pages integer vector (can be length 1), set page number in case of multi pages plot, or character "all" to plot all pages.
#'
#' @family pmxclass
#' @return ggplot object
#' @export
#' @examples
#' \donttest{
#' library(ggPMX)
#' ctr <- theophylline()
#' p1 <- ctr %>% get_plot("iwres_ipred")
#' ## get all pages or some pages
#' p2 <- ctr %>% get_plot("individual")
#' ## returns one page of individual plot
#' p2 <- ctr %>% get_plot("individual", which_pages = 1)
#' p3 <- ctr %>% get_plot("individual", which_pages = c(1, 3))
#' ## get distribution plot
#' pdistri <- ctr %>% get_plot("eta_hist")
#' }
#'
get_plot <- function(ctr, nplot, which_pages = "all") {
if (is.numeric(which_pages)) {
which_pages <- as.integer(which_pages)
}
assert_that(is_pmxclass(ctr))
assert_that(is_string(nplot))
assert_that(is.integer(which_pages) || ((length(which_pages) == 1L) && (which_pages == "all")))
nplot <- tolower(nplot)
assert_that(is_valid_plot_name(nplot, plot_names(ctr)))
xx <- ctr$get_plot(nplot)
if((length(which_pages) == 1L) && which_pages == "all") {
which_pages <- NULL
}
if (is.function(xx)) {
xx(which_pages)
} else {
xx
}
}
#' Get plot names
#'
#' @param ctr \code{pmxClass} controller object
#'
#' @family pmxclass
#' @return list of plot names
#' @export
plot_names <- function(ctr) {
assert_that(is_pmxclass(ctr))
ctr$plots()
}
#' Get plots description
#'
#' @param ctr \code{pmxClass} controller object
#'
#' @family pmxclass
#' @return data.frame of plots
#' @export
plots <- function(ctr) {
existsF <- function(...) do.call("existsFunction", list(...))
assert_that(is_pmxclass(ctr))
x <- ctr$config
function_name <- function(nn) {
fn <- sprintf("pmx_plot_%s", nn)
if (!existsF(fn, where = asNamespace("ggPMX"))) {
fn <- sprintf("pmx_plot('%s',...)", nn)
}
fn
}
if (exists("plots", x)) {
pp <- x$plots
names(pp) <- tolower(names(pp))
pp <- pp[ctr$plots()]
data.table(
plot_name = names(pp),
plot_type = sapply(pp, "[[", "ptype"),
plot_function = sapply(names(pp), function_name)
)
}
}
#' Get the plot config by name
#'
#' @param ctr the controller object
#' @param pname the plot name
#'
#' @family pmxclass
#' @return the config object
#' @export
#'
#' @examples
#' \donttest{
#' ctr <- theophylline()
#' ctr %>% set_plot("IND", pname = "indiv1")
#' ctr %>% get_plot_config("distr1")
#' }
get_plot_config <- function(ctr, pname) {
assert_that(is_pmxclass(ctr))
ctr$get_config(pname)
}
#' Get controller data set
#'
#' @param ctr the controller object
#' @param data_set the data set name
#'
#' @family pmxclass
#' @return a data.table of the named data set if available.
#' @export
get_data <- function(ctr, data_set = c(
"estimates", "predictions",
"eta", "finegrid", "input", "sim",
"individual"
)) {
assert_that(is_pmxclass(ctr))
## data_set <- match.arg(data_set)
if (data_set == "individual") data_set <- "IND"
if (data_set == "input") {
copy(ctr[["input"]])
} else {
copy(ctr[["data"]][[data_set]])
}
}
#' Set a controller data set
#'
#' @param ctr the controller object
#' @param ... a named list parameters (see example)
#' @inheritParams base::eval
#' @family pmxclass
#' @details
#' This function can be used to set an existing data set or to create a new one. The basic
#' idea is to change the built-in data set (change the factor level names, change some rows
#' values or apply any other data set operation) and use the new data set using the dname
#' parameter of pmx_plot family functions.
#' @examples
#' \donttest{
#' ctr <- theophylline()
#' dx <- ctr %>% get_data("eta")
#' dx <- dx[, EFFECT := factor(
#' EFFECT,
#' levels = c("ka", "V", "Cl"),
#' labels = c("Concentration", "Volume", "Clearance")
#' )]
#' ## update existing data set
#' ctr %>% set_data(eta = dx)
#' ## or create a new data set
#' ctr %>% set_data(eta_long = dx)
#' }
#' @export
set_data <- function(ctr, ..., envir=parent.frame()) {
assert_that(is_pmxclass(ctr))
params <- as.list(match.call(expand.dots = TRUE))[-c(1, 2)]
if (!nzchar(names(params))) {
stop("each data set should be well named")
}
invisible(Map(function(n, v) ctr$data[[n]] <- eval(v, envir=envir), names(params), params))
}
#' Get category covariates
#'
#' @param ctr the controller object
#'
#' @family pmxclass
#' @return a charcater vector
#' @export
get_cats <- function(ctr) {
assert_that(is_pmxclass(ctr))
ctr$cats
}
#' Get extra stratification variables
#'
#' @param ctr the controller object
#'
#' @family pmxclass
#' @return a charcater vector
#' @export
get_strats <- function(ctr) {
assert_that(is_pmxclass(ctr))
ctr$strats
}
#' Get covariates variables
#'
#' @param ctr the controller object
#'
#' @family pmxclass
#' @return a charcater vector
#' @export
get_covariates <- function(ctr) {
assert_that(is_pmxclass(ctr))
res <- unique(c(ctr$cats, ctr$conts))
res[nzchar(res)]
}
#' Get continuous covariates
#'
#' @param ctr the controller object
#'
#' @family pmxclass
#' @return a charcater vector
#' @export
get_conts <- function(ctr) {
assert_that(is_pmxclass(ctr))
ctr$conts
}
#' Get controller occasional covariates
#'
#' @param ctr the controller object
#'
#' @family pmxclass
#' @return a charcater vector
#' @export
get_occ <- function(ctr) {
assert_that(is_pmxclass(ctr))
ctr$occ
}
# pmxSource (R6 Class) ------------------------------------------------------------
#' @importFrom R6 R6Class
pmxClass <- R6::R6Class(
"pmxClass",
# Private methods ------------------------------------------------------------
private = list(
.data_path = "",
.input_path = "",
.covariates = NULL,
.plots = list(),
.plots_configs = list()
),
# Public methods -------------------------------------------------------------
public = list(
data = NULL, config = NULL, input = NULL,
input_file = NULL, dv = NULL, dvid = NULL, cats = NULL, conts = NULL, occ = NULL,
strats = NULL, settings = NULL, has_re = FALSE, re = NULL,
abbrev = list(), endpoint = NULL, warnings = list(),
footnote = FALSE, save_dir = NULL,
report_queue = list(),
report_n = 0,
plot_file_name = "",
sim = NULL,
bloq = NULL,
id = NULL,
time = NULL,
sim_blq = FALSE,
initialize = function(data_path, input, dv, config, dvid, cats, conts, occ, strats, settings, endpoint, sim, bloq, id, time,sim_blq)
pmx_initialize(self, private, data_path, input, dv, config, dvid, cats, conts, occ, strats, settings, endpoint, sim, bloq, id, time,sim_blq),
print = function(data_path, config, ...)
pmx_print(self, private, ...),
enqueue_plot = function(pname) {
self$report_n <- self$report_n + 1
pname_file <- paste0(pname, "-", self$report_n)
self$plot_file_name <- pname_file
self$report_queue <- c(self$report_queue, pname_file)
},
dequeue_plot = function() pmx_dequeue_plot(self),
# Operations ---------------------------------------------------------------
add_plot = function(x, pname)
pmx_add_plot(self, private, x, pname),
update_plot = function(pname, strat.facet = NULL, strat.color = NULL,
filter = NULL, trans = NULL,
..., pmxgpar = NULL) {
pmx_update_plot(
self, private, pname,
strat.color = strat.color, strat.facet = strat.facet,
filter, trans, ..., pmxgpar = pmxgpar
)
},
remove_plot = function(pname, ...)
pmx_remove_plot(self, private, pname, ...),
get_config = function(pname)
pmx_get_config(self, private, pname),
set_config = function(pname, new)
pmx_set_config(self, private, pname, new),
get_plot = function(pname)
pmx_get_plot(self, private, pname),
plots = function()
pmx_plots(self, private),
post_load = function()
pmx_post_load(self, private)
)
)
pmx_initialize <- function(self, private, data_path, input, dv,
config, dvid, cats, conts, occ, strats,
settings, endpoint, sim, bloq, id, time, sim_blq) {
DVID <- ID <- NULL
if (missing(data_path) || missing(data_path)) {
stop(
"Expecting source path(directory ) and a config path",
call. = FALSE
)
}
if (missing(dvid)) dvid <- NULL
if (any(missing(occ) | is.null(occ) | is.na(occ))) occ <- ""
if (any(missing(cats) | is.null(cats) | is.na(cats))) cats <- ""
if (any(missing(conts) | is.null(conts) | is.na(conts))) conts <- ""
if (any(missing(strats) | is.null(strats) | is.na(strats))) strats <- ""
if (missing(settings)) settings <- NULL
if (missing(bloq)) bloq <- NULL
if (missing(id)) id <- NULL
if (missing(time)) time <- NULL
if (missing(sim_blq)) sim_blq <- FALSE
private$.data_path <- data_path
self$save_dir <- data_path
if (is.character(input)) {
private$.input_path <- input
}
self$config <- config
self$dv <- dv
self$dvid <- dvid
self$cats <- cats
self$conts <- conts
self$occ <- toupper(occ)
self$strats <- strats
self$settings <- settings
self$bloq <- bloq
self$id <- id
self$time <- time
self$sim_blq <- sim_blq
if (!is.null(endpoint) && is.atomic(endpoint)) {
endpoint <- pmx_endpoint(code = as.character(endpoint))
}
self$endpoint <- endpoint
if (is.character(input) && file.exists(input)) {
self$input_file <- input
self$input <- read_input(input, self$dv, self$dvid, self$cats, self$conts, self$strats, self$occ, self$endpoint, self$id, self$time)
} else {
if (!inherits(input, "data.frame")) {
stop("observation data should be either a file or a data.frame")
}
self$input <- setDT(input)
}
# Always add isobserv to address issue #235
self$input$isobserv <- "accepted"
self[["data"]] <- load_source(
sys = config[["sys"]],
private$.data_path,
self[["config"]][["data"]],
dvid = self[["dvid"]],
endpoint = self[["endpoint"]],
occ = self$occ,
id = self$id
)
if (!is.null(self$data[["eta"]])) {
re <- grep("^eta_(.*)_(mode|mean)", names(self$data[["eta"]]), value = TRUE)
if (length(re) > 0) {
self$has_re <- TRUE
self$re <- gsub("^eta_(.*)_(mode|mean)", "\\1", re)
self$data[["eta"]] <-
post_load_eta(
self$data[["eta"]],
self$input, self$sys, self$occ
)
}
}
self$post_load()
# Replace some column names of sim_blq with ggPMX naming convention
if(!is.null(self[["data"]][["sim_blq_y"]])){
yname <- names(self[["data"]][["sim_blq_y"]])[grep("simBlq", names(self[["data"]][["sim_blq_y"]]))]
yname <- gsub("mode|mean|simBlq|_", "", yname)
# Some cases dv and xx_simBlq are not the same
suppressWarnings(
if(self[["dv"]] == yname) {
self[["data"]][["sim_blq_y"]] <-
self[["data"]][["sim_blq_y"]][,c("NPDE","IWRES", paste(dv)) := NULL]
names(self[["data"]][["sim_blq_y"]]) <-
gsub("mode|mean|simBlq|_","", names(self[["data"]][["sim_blq_y"]]))
self[["data"]][["sim_blq_y"]][["DV"]] <-
self[["data"]][["sim_blq_y"]][[paste(dv)]]
} else {
self[["data"]][["sim_blq_y"]] <-
self[["data"]][["sim_blq_y"]][,c("NPDE","IWRES") := NULL]
names(self[["data"]][["sim_blq_y"]]) <-
gsub("mode|mean|simBlq|_","", names(self[["data"]][["sim_blq_y"]]))
self[["data"]][["sim_blq_y"]][["DV"]] <-
self[["data"]][["sim_blq_y"]][[yname]]
}
)
#rename npde and iwRes to NPDE and IWRES
place_vec <- which(
names(self$data$sim_blq_y) == "npde" |
names(self$data$sim_blq_y) == "iwRes"
)
names(self$data$sim_blq_y)[place_vec] <-
toupper(names(self$data$sim_blq_y)[place_vec])
# Needed same treatment for "sim_blq" as for "sim_blq_y"
if(!is.null(self[["data"]][["sim_blq"]])){
# In some cases xx and xx_simBlq are not the same
suppressWarnings({
for(cn in c("iwRes", "pwRes", "npde")) {
if(paste0(cn, "_mode_simBlq") %in% colnames(self[["data"]][["sim_blq"]])) {
self[["data"]][["sim_blq"]][[toupper(cn)]] <-
self[["data"]][["sim_blq"]][[paste0(cn, "_mode_simBlq")]]
}
}
})
}
# Needed same treatment for "sim_blq_npde_iwres" as for "sim_blq_y"
if(!is.null(self[["data"]][["sim_blq_npde_iwres"]])){
#rename npde and iwRes to NPDE and IWRES
place_vec <- which(
names(self$data$sim_blq_npde_iwres) == "npde" |
names(self$data$sim_blq_npde_iwres) == "iwRes"
)
names(self$data$sim_blq_npde_iwres)[place_vec] <-
toupper(names(self$data$sim_blq_npde_iwres)[place_vec])
}
} else if ((self$config$sys == "mlx18") && (self$sim_blq == TRUE)) {
# give message if new version of monolix, otherwise sim_blq cannot be loaded anyway
message("`sim_blq` dataset could not be generated, `sim_blq_npde_iwres` or `sim_blq_y` is missing")
}
if (!is.null(sim)) {
dx <- sim[["sim"]]
inn <- copy(self$input)[, self$dv := NULL]
# check for unique keys in the observation variables
if (sum(duplicated(inn[, c("ID", "TIME"), with = FALSE])) > 0) {
warning(
paste(
" Different covariates for the same patient same time point\n",
"--> Duplicated created in the vpc data set."
),
call. = FALSE
)
}
self$data[["sim"]] <- merge_dx_inn_by_id_time(dx, inn, config$sys)
self$sim <- sim
}
if (config$sys == "nlmixr") {
self$data$predictions <- input
self$data$IND <- if (!is.null(config$finegrid)) config$finegrid else input
self$data$eta <- config$eta
self$data$omega <- config$omega
self$has_re <- TRUE
}
if (config$sys == "nm") {
self$data$predictions <- input
self$data$IND <- if (!is.null(config$finegrid)) config$finegrid else input
self$data$eta <- config$eta
self$data$omega <- config$omega
self$has_re <- TRUE
self$bloq <- bloq
self$data$estimates <- config$parameters
} else if (config$sys == "nlmixr") {
self$data$estimates <- config$parameters
}
## abbrev
keys_file <- file.path(
system.file(package = "ggPMX"), "init", "abbrev.yaml"
)
self$abbrev <- set_abbrev(self, yaml.load_file(keys_file))
## create all plots
for (nn in names(self$config$plots)) {
x <- self$config$plots[[nn]]
x$pname <- tolower(nn)
x$use.defaults <- FALSE
do.call(set_plot, c(ctr = self, x))
}
}
#' @importFrom knitr kable
pmx_print <- function(self, private, ...) {
cat("\npmx object:\n")
paste_col <- function(n, x) if (all(x != "")) c(n, paste(x, collapse = ","))
ctr_table <-
rbind(
c(
"working directory",
basename(dirname(private$.data_path))
),
c("Modelling input file", basename(private$.input_path)),
c("dv", self$dv),
c("dvid", self$dvid),
paste_col("cats", self %>% get_cats()),
paste_col("conts", self %>% get_conts()),
paste_col("strats", self %>% get_strats())
)
colnames(ctr_table) <- c("PARAM", "VALUE")
print(kable(ctr_table))
print(self$config, ctr = self, plot_names = names(private$.plots))
}
pmx_transform <- function(x, dx, trans, direction) {
if (is.character(trans)) {
params <- strsplit(trans, "_")[[1]]
trans <- params[1]
direction <- params[2]
}
cols_res <- function(x) {
with(x, {
switch(
direction,
x = aess$x,
y = aess$y,
xy = c(aess$x, aess$y)
)
})
}
cols_ind <- function(x) {
switch(
direction,
x = "TIME",
y = c("PRED", "IPRED", "DV"),
xy = c("TIME", "PRED", "IPRED", "DV")
)
}
cols_dis <- function(x) {
switch(
direction,
x = c("VALUE"),
y = c("VALUE"),
xy = c("VALUE")
)
}
cols_qq <- function(x) {
switch(
direction,
x = x$x
)
}
cols_eta_conts <- function(x) {
switch(
direction,
y = "VALUE"
)
}
cols <- switch(
x[["ptype"]],
SCATTER = cols_res(x),
IND = cols_ind(x),
DIS = cols_dis(x),
PMX_QQ = cols_qq(x),
ETA_COV = cols_eta_conts(x)
)
cols <- intersect(cols, names(dx))
if (length(cols) > 0) {
fun <- match.fun(trans)
dx[, (cols) := lapply(.SD, fun), .SDcols = (cols)]
}
dx
}
pmx_remove_plot <- function(self, private, pname, ...) {
private$.plots_configs[[pname]] <- NULL
private$.plots[[pname]] <- NULL
invisible(self)
}
pmx_get_config <- function(self, private, pname) {
pname <- tolower(pname)
private$.plots_configs[[pname]]
}
pmx_set_config <- function(self, private, pname, new) {
pname <- tolower(pname)
private$.plots_configs[[pname]] <- new
}
pmx_dequeue_plot <- function(self) {
## assert_that(is_none_empty_queue(self))
if (length(self$report_queue)) {
first <- self$report_queue[[1]]
self$report_queue <- self$report_queue[-1]
first
} else {
message("Warning: Chunk has plots that were not registered within ggPMX. Footnotes may be wrong.")
}
}
pmx_fig_process_init <- function(self) {
report_queue <- list()
report_n <- 0
}
pmx_fig_process_wrapup <- function(self) {
assert_that(is_empty_queue(self))
}
pmx_get_plot <- function(self, private, pname) {
pname <- tolower(pname)
private$.plots[[pname]]
}
pmx_plots <- function(self, private) {
names(private$.plots)
}
pmx_post_load <- function(self, private) {
res <- post_load(
self$data, self$input, self$config$sys,
self$config$plots,
occ = get_occ(self)
)
self$data <- res$data
self$warnings <- res$warnings
}
#' Print pmxClass object
#'
#' @param x pmxClass object
#' @param ... additinal arguments to pass to print
#'
#' @family pmxclass functions
#' @return print object to screen
#' @export
print.pmxClass <- function(x, ...) {
x$print(...)
}
#' Creates a deep copy of the controller
#'
#' @param ctr \code{pmxClass} object
#' @param keep_globals \code{logical} if TRUE we keep the global parameters changed by pmx_settings
#' @param ... extra parameters passed to \code{pmx_settings}
#'
#' @return an object of \code{pmxClass}
#' @export
#' @details
#'
#' The controller is an `R6` object, it behaves like a reference object.
#' Some functions ( methods) can have a side effect on the controller and modify it internally.
#' Technically speaking we talk about chaining not piping here. However ,
#' using \code{pmx_copy} user can work on a copy of the controller.
#'
#' By default the copy does not keep global parameters set using pmx_settings.
#'
#' @examples
#' \donttest{
#' ctr <- theophylline()
#' cctr <- ctr %>% pmx_copy()
#' ## Any change in the ctr has no side effect in the ctr and vice versa
#' }
pmx_copy <- function(ctr, keep_globals = FALSE, ...) {
assert_that(is_pmxclass(ctr))
cctr <- ctr$clone()
params <- get_params_from_call()
## params <- list(...)
if (!keep_globals) {
nn <- rev(names(formals(pmx_settings)))[-1]
eff_nn <- intersect(nn, names(params))
settings <- l_left_join(ctr$settings, params[eff_nn])
if (length(eff_nn) > 0) {
cctr$settings <- do.call(pmx_settings, settings)
}
}
cctr
}
|
require(shinydashboard)
require(shiny)
shinyServer(function(input, output, clientData, session) {
output$risk1 <- renderInfoBox({
infoBox(
'Fire',
'ยฃ30000',
icon = icon("fire","fa-2x"),
color='red',
width=NULL,fill=TRUE
)
})
# Same as above, but with fill=TRUE
output$risk2 <- renderInfoBox({
infoBox(
"Lightning",
'ยฃ10000',
icon = icon("bolt","fa-2x"),
color = "red",
width=NULL,fill=TRUE
)
})
output$risk3 <- renderInfoBox({
infoBox(
"Flood",
"ยฃ8000",
icon = shiny::icon('tint',"fa-2x"),
color = "red",
width=NULL,fill=TRUE
)
})
output$quote1 <- renderInfoBox({
infoBox(
"Allianz Home Insurance Quote",
"ยฃ30",
icon = shiny::icon('shield',"fa-2x"),
color = "blue",
width=NULL,fill=TRUE
)
})
v <- reactiveValues(risk1col='red')
observeEvent(input$risk1_click, {
v$risk1col <- 'green'
})
observe({
# We'll use these multiple times, so use short var names for
# convenience.
c_label <- input$control_label
c_num <- input$control_num
# Text =====================================================
# Change both the label and the text
updateTextInput(session, "inText",
label = paste("New", c_label),
value = paste("New text", c_num)
)
# Number ===================================================
# Change the value
updateNumericInput(session, "inNumber", value = c_num)
# Change the label, value, min, and max
updateNumericInput(session, "inNumber2",
label = paste("Number ", c_label),
value = c_num, min = c_num-10, max = c_num+10, step = 5)
# Slider input =============================================
# Only label and value can be set for slider
updateSliderInput(session, "inSlider",
label = paste("Slider", c_label),
value = c_num)
# Slider range input =======================================
# For sliders that pick out a range, pass in a vector of 2
# values.
updateSliderInput(session, "inSlider2",
value = c(c_num-1, c_num+1))
# An NA means to not change that value (the low or high one)
updateSliderInput(session, "inSlider3",
value = c(NA, c_num+2))
# Date input ===============================================
# Only label and value can be set for date input
updateDateInput(session, "inDate",
label = paste("Date", c_label),
value = paste("2013-04-", c_num, sep=""))
# Date range input =========================================
# Only label and value can be set for date range input
updateDateRangeInput(session, "inDateRange",
label = paste("Date range", c_label),
start = paste("2013-01-", c_num, sep=""),
end = paste("2013-12-", c_num, sep=""),
min = paste("2001-01-", c_num, sep=""),
max = paste("2030-12-", c_num, sep="")
)
# # Checkbox ===============================================
updateCheckboxInput(session, "inCheckbox",value = c_num %% 2)
# Checkbox group ===========================================
# Create a list of new options, where the name of the items
# is something like 'option label x A', and the values are
# 'option-x-A'.
cb_options <- list()
cb_options[[paste("option label", c_num, "A")]] <-
paste0("option-", c_num, "-A")
cb_options[[paste("option label", c_num, "B")]] <-
paste0("option-", c_num, "-B")
# Set the label, choices, and selected item
updateCheckboxGroupInput(session, "inCheckboxGroup",
label = paste("checkboxgroup", c_label),
choices = cb_options,
selected = paste0("option-", c_num, "-A")
)
# Radio group ==============================================
# Create a list of new options, where the name of the items
# is something like 'option label x A', and the values are
# 'option-x-A'.
r_options <- list()
r_options[[paste("option label", c_num, "A")]] <-
paste0("option-", c_num, "-A")
r_options[[paste("option label", c_num, "B")]] <-
paste0("option-", c_num, "-B")
# Set the label, choices, and selected item
updateRadioButtons(session, "inRadio",
label = paste("Radio", c_label),
choices = r_options,
selected = paste0("option-", c_num, "-A")
)
# Select input =============================================
# Create a list of new options, where the name of the items
# is something like 'option label x A', and the values are
# 'option-x-A'.
s_options <- list()
s_options[[paste("option label", c_num, "A")]] <-
paste0("option-", c_num, "-A")
s_options[[paste("option label", c_num, "B")]] <-
paste0("option-", c_num, "-B")
# Change values for input$inSelect
updateSelectInput(session, "inSelect",
choices = s_options,
selected = paste0("option-", c_num, "-A")
)
# Can also set the label and select an item (or more than
# one if it's a multi-select)
updateSelectInput(session, "inSelect2",
label = paste("Select label", c_label),
choices = s_options,
selected = paste0("option-", c_num, "-B")
)
# Tabset input =============================================
# Change the selected tab.
# The tabsetPanel must have been created with an 'id' argument
if (c_num %% 2) {
updateTabsetPanel(session, "inTabset", selected = "panel2")
} else {
updateTabsetPanel(session, "inTabset", selected = "panel1")
}
})
})
|
/test folder/server.R
|
no_license
|
AMChierici/safer
|
R
| false
| false
| 6,135
|
r
|
require(shinydashboard)
require(shiny)
shinyServer(function(input, output, clientData, session) {
output$risk1 <- renderInfoBox({
infoBox(
'Fire',
'ยฃ30000',
icon = icon("fire","fa-2x"),
color='red',
width=NULL,fill=TRUE
)
})
# Same as above, but with fill=TRUE
output$risk2 <- renderInfoBox({
infoBox(
"Lightning",
'ยฃ10000',
icon = icon("bolt","fa-2x"),
color = "red",
width=NULL,fill=TRUE
)
})
output$risk3 <- renderInfoBox({
infoBox(
"Flood",
"ยฃ8000",
icon = shiny::icon('tint',"fa-2x"),
color = "red",
width=NULL,fill=TRUE
)
})
output$quote1 <- renderInfoBox({
infoBox(
"Allianz Home Insurance Quote",
"ยฃ30",
icon = shiny::icon('shield',"fa-2x"),
color = "blue",
width=NULL,fill=TRUE
)
})
v <- reactiveValues(risk1col='red')
observeEvent(input$risk1_click, {
v$risk1col <- 'green'
})
observe({
# We'll use these multiple times, so use short var names for
# convenience.
c_label <- input$control_label
c_num <- input$control_num
# Text =====================================================
# Change both the label and the text
updateTextInput(session, "inText",
label = paste("New", c_label),
value = paste("New text", c_num)
)
# Number ===================================================
# Change the value
updateNumericInput(session, "inNumber", value = c_num)
# Change the label, value, min, and max
updateNumericInput(session, "inNumber2",
label = paste("Number ", c_label),
value = c_num, min = c_num-10, max = c_num+10, step = 5)
# Slider input =============================================
# Only label and value can be set for slider
updateSliderInput(session, "inSlider",
label = paste("Slider", c_label),
value = c_num)
# Slider range input =======================================
# For sliders that pick out a range, pass in a vector of 2
# values.
updateSliderInput(session, "inSlider2",
value = c(c_num-1, c_num+1))
# An NA means to not change that value (the low or high one)
updateSliderInput(session, "inSlider3",
value = c(NA, c_num+2))
# Date input ===============================================
# Only label and value can be set for date input
updateDateInput(session, "inDate",
label = paste("Date", c_label),
value = paste("2013-04-", c_num, sep=""))
# Date range input =========================================
# Only label and value can be set for date range input
updateDateRangeInput(session, "inDateRange",
label = paste("Date range", c_label),
start = paste("2013-01-", c_num, sep=""),
end = paste("2013-12-", c_num, sep=""),
min = paste("2001-01-", c_num, sep=""),
max = paste("2030-12-", c_num, sep="")
)
# # Checkbox ===============================================
updateCheckboxInput(session, "inCheckbox",value = c_num %% 2)
# Checkbox group ===========================================
# Create a list of new options, where the name of the items
# is something like 'option label x A', and the values are
# 'option-x-A'.
cb_options <- list()
cb_options[[paste("option label", c_num, "A")]] <-
paste0("option-", c_num, "-A")
cb_options[[paste("option label", c_num, "B")]] <-
paste0("option-", c_num, "-B")
# Set the label, choices, and selected item
updateCheckboxGroupInput(session, "inCheckboxGroup",
label = paste("checkboxgroup", c_label),
choices = cb_options,
selected = paste0("option-", c_num, "-A")
)
# Radio group ==============================================
# Create a list of new options, where the name of the items
# is something like 'option label x A', and the values are
# 'option-x-A'.
r_options <- list()
r_options[[paste("option label", c_num, "A")]] <-
paste0("option-", c_num, "-A")
r_options[[paste("option label", c_num, "B")]] <-
paste0("option-", c_num, "-B")
# Set the label, choices, and selected item
updateRadioButtons(session, "inRadio",
label = paste("Radio", c_label),
choices = r_options,
selected = paste0("option-", c_num, "-A")
)
# Select input =============================================
# Create a list of new options, where the name of the items
# is something like 'option label x A', and the values are
# 'option-x-A'.
s_options <- list()
s_options[[paste("option label", c_num, "A")]] <-
paste0("option-", c_num, "-A")
s_options[[paste("option label", c_num, "B")]] <-
paste0("option-", c_num, "-B")
# Change values for input$inSelect
updateSelectInput(session, "inSelect",
choices = s_options,
selected = paste0("option-", c_num, "-A")
)
# Can also set the label and select an item (or more than
# one if it's a multi-select)
updateSelectInput(session, "inSelect2",
label = paste("Select label", c_label),
choices = s_options,
selected = paste0("option-", c_num, "-B")
)
# Tabset input =============================================
# Change the selected tab.
# The tabsetPanel must have been created with an 'id' argument
if (c_num %% 2) {
updateTabsetPanel(session, "inTabset", selected = "panel2")
} else {
updateTabsetPanel(session, "inTabset", selected = "panel1")
}
})
})
|
library(shiny)
df <- read.csv("/home/john/stats_corner/2016/shiny_apps/snake_draft/v2_5/faa_projection_data.csv",
#df <- read.csv("/srv/shiny-server/stats-corner/2016/snake-assistant/faa_projection_data.csv",
stringsAsFactors = FALSE,
header = TRUE)
fluidPage(
headerPanel('Snake Draft Assistant 2.6'),
sidebarLayout(position = "right",
sidebarPanel(
wellPanel(
h4("Your Next Picks are:"),
textOutput("next_pick"),
textOutput("next_pick1"),
textOutput("next_pick2")
),
wellPanel(
h4("Drafted Players"),
selectizeInput("drafted_players", label = "Enter Players as they get Drafted", multiple = TRUE, choices = df$player_team)
)
),
mainPanel(
tabsetPanel(
tabPanel("Quick Start",
p("1. Enter your draft and team parameters below."),
p("2. As all players, not just yours, get drafted enter their names into the 'Drafted Players' window on the right."),
p("3. As you draft players, enter them on the 'Your Team' tab."),
column(6,
h4("Draft Parameters"),
numericInput("first_pick", label = h6("Round 1 Pick #"), value = 1),
numericInput("league_teams", label = h6("How Many Teams in League?"), value = 10),
selectInput("scoring_format", label = h6("Scoring Format"), choices = c("Standard","PPR"), selected = "Standard"),
selectInput("extra_pos", label = h6("Additional Positions"), choices = c("FLEX","OP"), selected = "FLEX")
),
column(6,
h4("Team Parameters"),
numericInput("num_qb", label = "# QB", value = 1),
numericInput("num_rb", label = "# RB", value = 2),
numericInput("num_wr", label = "# WR", value = 3),
numericInput("num_te", label = "# TE", value = 1),
numericInput("num_flex", label = "# FLEX", value = 1),
numericInput("num_op", label = "# OP", value = 0),
numericInput("num_k", label = "# K", value = 1),
numericInput("num_dst", label = "# DST", value = 1)
)
),
tabPanel("Recomendations",
h4("Value Added (Per Game) and Dropoffs of Best Available (BA) Now and Next Time (BANT)"),
radioButtons("one_or_two", label = h4("Recommend Based on One Pick From Now or Two?"),
choices = list("One" = 1, "Two" = 2),
selected = 1,
inline = T),
dataTableOutput("rec_table")
),
tabPanel("Your Team",
column(6,
h5("Your Team"),
selectizeInput("your_team", label = "Enter Players YOU Drafted", multiple = TRUE, choices = df$player_team)
),
column(6,
h5("Weekly Expected Points From Starting Lineup"),
dataTableOutput("optimized_lineup"))
),
tabPanel("About",
a("Projection and ADP data downloaded from Fantasy Football Analytics",
href="http://fantasyfootballanalytics.net/"),
p("Data last updated on September 4, 2016"),
p("Questions? Email me: StatsCorner@gmail.com"),
p(""),
p("App Updated on 2017-04-06, version 2.6")
)
)
)
)
)
|
/analysis/2016/shiny_apps/snake_draft/v2_5/ui.R
|
no_license
|
johnckane/fantasy-football
|
R
| false
| false
| 3,792
|
r
|
library(shiny)
df <- read.csv("/home/john/stats_corner/2016/shiny_apps/snake_draft/v2_5/faa_projection_data.csv",
#df <- read.csv("/srv/shiny-server/stats-corner/2016/snake-assistant/faa_projection_data.csv",
stringsAsFactors = FALSE,
header = TRUE)
fluidPage(
headerPanel('Snake Draft Assistant 2.6'),
sidebarLayout(position = "right",
sidebarPanel(
wellPanel(
h4("Your Next Picks are:"),
textOutput("next_pick"),
textOutput("next_pick1"),
textOutput("next_pick2")
),
wellPanel(
h4("Drafted Players"),
selectizeInput("drafted_players", label = "Enter Players as they get Drafted", multiple = TRUE, choices = df$player_team)
)
),
mainPanel(
tabsetPanel(
tabPanel("Quick Start",
p("1. Enter your draft and team parameters below."),
p("2. As all players, not just yours, get drafted enter their names into the 'Drafted Players' window on the right."),
p("3. As you draft players, enter them on the 'Your Team' tab."),
column(6,
h4("Draft Parameters"),
numericInput("first_pick", label = h6("Round 1 Pick #"), value = 1),
numericInput("league_teams", label = h6("How Many Teams in League?"), value = 10),
selectInput("scoring_format", label = h6("Scoring Format"), choices = c("Standard","PPR"), selected = "Standard"),
selectInput("extra_pos", label = h6("Additional Positions"), choices = c("FLEX","OP"), selected = "FLEX")
),
column(6,
h4("Team Parameters"),
numericInput("num_qb", label = "# QB", value = 1),
numericInput("num_rb", label = "# RB", value = 2),
numericInput("num_wr", label = "# WR", value = 3),
numericInput("num_te", label = "# TE", value = 1),
numericInput("num_flex", label = "# FLEX", value = 1),
numericInput("num_op", label = "# OP", value = 0),
numericInput("num_k", label = "# K", value = 1),
numericInput("num_dst", label = "# DST", value = 1)
)
),
tabPanel("Recomendations",
h4("Value Added (Per Game) and Dropoffs of Best Available (BA) Now and Next Time (BANT)"),
radioButtons("one_or_two", label = h4("Recommend Based on One Pick From Now or Two?"),
choices = list("One" = 1, "Two" = 2),
selected = 1,
inline = T),
dataTableOutput("rec_table")
),
tabPanel("Your Team",
column(6,
h5("Your Team"),
selectizeInput("your_team", label = "Enter Players YOU Drafted", multiple = TRUE, choices = df$player_team)
),
column(6,
h5("Weekly Expected Points From Starting Lineup"),
dataTableOutput("optimized_lineup"))
),
tabPanel("About",
a("Projection and ADP data downloaded from Fantasy Football Analytics",
href="http://fantasyfootballanalytics.net/"),
p("Data last updated on September 4, 2016"),
p("Questions? Email me: StatsCorner@gmail.com"),
p(""),
p("App Updated on 2017-04-06, version 2.6")
)
)
)
)
)
|
library(chemspiderapi)
context("check_record_id")
test_that("check_record_id() fails if no record_id is provided.", {
expect_error(
.check_record_id()
)
})
test_that("check_record_id() fails if a NULL record_id is provided.", {
expect_error(
.check_record_id(record_id = NULL)
)
})
test_that("check_record_id() fails if a record_id is not a numeric vector.", {
expect_error(
.check_record_id(record_id = "record_id")
)
})
test_that("check_record_id() fails if multiple record_ids are provided.", {
expect_error(
.check_record_id(record_id = c("123", "456"))
)
})
test_that("check_record_id() issues a warning when a non-integer record_id is provided.", {
expect_warning(
.check_record_id(record_id = 2424)
)
})
test_that("check_record_id() remains silent when the correct record_id is provided.", {
expect_silent(
.check_record_id(record_id = 2424L)
)
})
|
/tests/testthat/test-check_record_id.R
|
permissive
|
RaoulWolf/chemspiderapi
|
R
| false
| false
| 910
|
r
|
library(chemspiderapi)
context("check_record_id")
test_that("check_record_id() fails if no record_id is provided.", {
expect_error(
.check_record_id()
)
})
test_that("check_record_id() fails if a NULL record_id is provided.", {
expect_error(
.check_record_id(record_id = NULL)
)
})
test_that("check_record_id() fails if a record_id is not a numeric vector.", {
expect_error(
.check_record_id(record_id = "record_id")
)
})
test_that("check_record_id() fails if multiple record_ids are provided.", {
expect_error(
.check_record_id(record_id = c("123", "456"))
)
})
test_that("check_record_id() issues a warning when a non-integer record_id is provided.", {
expect_warning(
.check_record_id(record_id = 2424)
)
})
test_that("check_record_id() remains silent when the correct record_id is provided.", {
expect_silent(
.check_record_id(record_id = 2424L)
)
})
|
# La vida media de un instrumento electrรณnico es de 6 aรฑos con una desviaciรณn estรกndar de 2
# aรฑos. Si la vida de un instrumento tal puede tratarse como una variable normal y si el
# instrumento estรก garantizado, ยฟdurante cuรกnto tiempo debiera ser vรกlida la garantรญa para que no
# mรกs del 15% de los instrumentos fallen antes de la expiraciรณn de la รฉsta?
mu = 6
sigma = 2
# Grafico
x = seq(0, 12, 0.0001)
y = dnorm(x, mu, sigma)
plot(x, y, type='l', lwd=2, main='Tiempo de vida en aรฑos de ins. elect.', ylab='f(x)', xlab='aรฑos de vida')
lines(mu, dnorm(mu, mu, sigma),type='h', lty=5, col='red')
# Debo calcular el cuantil para X < 15
q15 = qnorm(0.15, mu, sigma)
print(paste('P( X < 15 ) = ', q15))
|
/tp4_p7.r
|
no_license
|
MarianoSaez/TP4-ESTAD_2020
|
R
| false
| false
| 718
|
r
|
# La vida media de un instrumento electrรณnico es de 6 aรฑos con una desviaciรณn estรกndar de 2
# aรฑos. Si la vida de un instrumento tal puede tratarse como una variable normal y si el
# instrumento estรก garantizado, ยฟdurante cuรกnto tiempo debiera ser vรกlida la garantรญa para que no
# mรกs del 15% de los instrumentos fallen antes de la expiraciรณn de la รฉsta?
mu = 6
sigma = 2
# Grafico
x = seq(0, 12, 0.0001)
y = dnorm(x, mu, sigma)
plot(x, y, type='l', lwd=2, main='Tiempo de vida en aรฑos de ins. elect.', ylab='f(x)', xlab='aรฑos de vida')
lines(mu, dnorm(mu, mu, sigma),type='h', lty=5, col='red')
# Debo calcular el cuantil para X < 15
q15 = qnorm(0.15, mu, sigma)
print(paste('P( X < 15 ) = ', q15))
|
convertSRparms <- function(R0, h, phi, sigmaR, mean2med){
BC <- ifelse(mean2med == TRUE, exp(-0.5 * sigmaR^2), exp(0.5 * sigmaR^2))
S0BC <- (BC * 0.8 * R0 * h * phi - 0.2 * phi * R0 * (1 - h)) / (h - 0.2)
R0BC <- S0BC / phi
Rnew <- BC * 0.8 * R0 * h * 0.2 * S0BC / (0.2 * phi * R0 * (1 - h) + (h - 0.2) * 0.2 * S0BC)
hBC <- Rnew / R0BC
return(list(S0BC = S0BC, R0BC = R0BC, hBC = hBC))
}
R0=1000000
phi=0.01025625
sigmaR=seq(0.01, 2, by=0.01)
h=seq(0.21, 1, by=0.01)
R0diff <- hdiff <- matrix(NA, nrow=length(sigmaR), ncol=length(h))
for(i in 1:length(sigmaR)){
for(j in 1:length(h)){
R0diff[i,j] <- (convertSRparms(R0=R0, h=h[j], phi=phi, sigmaR=sigmaR[i], mean2med=FALSE)$R0BC-R0)/R0*100
hdiff[i,j] <- (convertSRparms(R0=R0, h=h[j], phi=phi, sigmaR=sigmaR[i], mean2med=FALSE)$hBC-h[j])
}
}
#### With Color ####
jpeg(file="C:/Users/bai.li/Desktop/mcp_results_r/cases/manuscript_figures/diff_in_R0.jpg", width=170, height=100, units="mm", res=1200)
filled.contour(sigmaR, h, R0diff,
color = terrain.colors,
xlab="SigmaR", ylab="Median h",
plot.axes = {
axis(1); axis(2);
contour(sigmaR, h, R0diff,
col = "black",
add = TRUE,
levels = c(10, 50, 100, 500, 1000, 5000),
method="edge")
},
key.title = {
par(cex.main=0.8);
title(main="Relative Diff\nin R0 (%)")
})
legend("topleft", "A)", bty="n", box.col = "")
dev.off()
jpeg(file="C:/Users/bai.li/Desktop/mcp_results_r/cases/manuscript_figures/diff_in_h.jpg", width=150, height=100, units="mm", res=1200)
filled.contour(sigmaR, h, hdiff,
#color = terrain.colors,
color = white,
xlab="SigmaR", ylab="Median h",
plot.axes = {
axis(1); axis(2);
contour(sigmaR, h, hdiff,
col = "black",
add = TRUE,
levels = c(0.02, 0.1, 0.2, 0.3, 0.4),
method="edge")
},
key.title = {
par(cex.main=0.8);
title(main="Diff in h")
})
legend("topleft", "B)", bty="n", box.col = "")
dev.off()
#### Without Color ####
jpeg(file="C:/Users/bai.li/Desktop/mcp_results_r/cases/manuscript_figures/diff_in_R0andh.jpg", width=200, height=100, units="mm", res=1200)
par(mfrow=c(1,2))
plot(NA,xlim=range(sigmaR),
ylim=range(h),xlab="SigmaR",ylab="Median-unbiased h",
frame=FALSE,axes=F,xaxs="i",yaxs="i", main="Relative Diff in R0 (%)")
contour(sigmaR, h, R0diff,
col = "black",
add = TRUE,
levels = c(10, 50, 100, 500, 1000, 5000),
method="edge")
axis(1); axis(2, las=2);
legend("topleft", "A)", bty="n")
box()
plot(NA,xlim=range(sigmaR),
ylim=range(h),xlab="SigmaR",ylab="Median-unbiased h",
frame=FALSE,axes=F,xaxs="i",yaxs="i", main="Diff in h")
contour(sigmaR, h, hdiff,
col = "black",
add = TRUE,
levels = c(0.02, 0.1, 0.2, 0.3, 0.4),
method="edge")
axis(1); axis(2, las=2);
legend("topleft", "B)", bty="n")
box()
dev.off()
#install.packages("jpeg")
library(jpeg) # for reading in PNGs
# example image
par(mar=rep(0,4)) # no margins
# layout the plots into a matrix w/ 12 columns, by row
layout(matrix(1:2, ncol=1, byrow=TRUE))
# do the plotting
img <- readJPEG("C:/Users/bai.li/Desktop/mcp_results_r/cases/manuscript_figures/diff_in_R0.jpg", native = FALSE)
plot(NA,xlim=0:1,ylim=0:1,xaxt="n",yaxt="n",bty="n")
rasterImage(img,0,0,1.05,1.1)
img <- readJPEG("C:/Users/bai.li/Desktop/mcp_results_r/cases/manuscript_figures/diff_in_h.jpg", native = FALSE)
plot(NA,xlim=0:1,ylim=0:1,xaxt="n",yaxt="n",bty="n")
rasterImage(img,0,0,1.05,1.1)
# write to PDF
dev.print(pdf, "C:/Users/bai.li/Desktop/mcp_results_r/cases/manuscript_figures/diff_in_R0_h.pdf")
|
/code/figures/difference in h.R
|
no_license
|
Bai-Li-NOAA/Model_Comparison_Paper
|
R
| false
| false
| 4,029
|
r
|
convertSRparms <- function(R0, h, phi, sigmaR, mean2med){
BC <- ifelse(mean2med == TRUE, exp(-0.5 * sigmaR^2), exp(0.5 * sigmaR^2))
S0BC <- (BC * 0.8 * R0 * h * phi - 0.2 * phi * R0 * (1 - h)) / (h - 0.2)
R0BC <- S0BC / phi
Rnew <- BC * 0.8 * R0 * h * 0.2 * S0BC / (0.2 * phi * R0 * (1 - h) + (h - 0.2) * 0.2 * S0BC)
hBC <- Rnew / R0BC
return(list(S0BC = S0BC, R0BC = R0BC, hBC = hBC))
}
R0=1000000
phi=0.01025625
sigmaR=seq(0.01, 2, by=0.01)
h=seq(0.21, 1, by=0.01)
R0diff <- hdiff <- matrix(NA, nrow=length(sigmaR), ncol=length(h))
for(i in 1:length(sigmaR)){
for(j in 1:length(h)){
R0diff[i,j] <- (convertSRparms(R0=R0, h=h[j], phi=phi, sigmaR=sigmaR[i], mean2med=FALSE)$R0BC-R0)/R0*100
hdiff[i,j] <- (convertSRparms(R0=R0, h=h[j], phi=phi, sigmaR=sigmaR[i], mean2med=FALSE)$hBC-h[j])
}
}
#### With Color ####
jpeg(file="C:/Users/bai.li/Desktop/mcp_results_r/cases/manuscript_figures/diff_in_R0.jpg", width=170, height=100, units="mm", res=1200)
filled.contour(sigmaR, h, R0diff,
color = terrain.colors,
xlab="SigmaR", ylab="Median h",
plot.axes = {
axis(1); axis(2);
contour(sigmaR, h, R0diff,
col = "black",
add = TRUE,
levels = c(10, 50, 100, 500, 1000, 5000),
method="edge")
},
key.title = {
par(cex.main=0.8);
title(main="Relative Diff\nin R0 (%)")
})
legend("topleft", "A)", bty="n", box.col = "")
dev.off()
jpeg(file="C:/Users/bai.li/Desktop/mcp_results_r/cases/manuscript_figures/diff_in_h.jpg", width=150, height=100, units="mm", res=1200)
filled.contour(sigmaR, h, hdiff,
#color = terrain.colors,
color = white,
xlab="SigmaR", ylab="Median h",
plot.axes = {
axis(1); axis(2);
contour(sigmaR, h, hdiff,
col = "black",
add = TRUE,
levels = c(0.02, 0.1, 0.2, 0.3, 0.4),
method="edge")
},
key.title = {
par(cex.main=0.8);
title(main="Diff in h")
})
legend("topleft", "B)", bty="n", box.col = "")
dev.off()
#### Without Color ####
jpeg(file="C:/Users/bai.li/Desktop/mcp_results_r/cases/manuscript_figures/diff_in_R0andh.jpg", width=200, height=100, units="mm", res=1200)
par(mfrow=c(1,2))
plot(NA,xlim=range(sigmaR),
ylim=range(h),xlab="SigmaR",ylab="Median-unbiased h",
frame=FALSE,axes=F,xaxs="i",yaxs="i", main="Relative Diff in R0 (%)")
contour(sigmaR, h, R0diff,
col = "black",
add = TRUE,
levels = c(10, 50, 100, 500, 1000, 5000),
method="edge")
axis(1); axis(2, las=2);
legend("topleft", "A)", bty="n")
box()
plot(NA,xlim=range(sigmaR),
ylim=range(h),xlab="SigmaR",ylab="Median-unbiased h",
frame=FALSE,axes=F,xaxs="i",yaxs="i", main="Diff in h")
contour(sigmaR, h, hdiff,
col = "black",
add = TRUE,
levels = c(0.02, 0.1, 0.2, 0.3, 0.4),
method="edge")
axis(1); axis(2, las=2);
legend("topleft", "B)", bty="n")
box()
dev.off()
#install.packages("jpeg")
library(jpeg) # for reading in PNGs
# example image
par(mar=rep(0,4)) # no margins
# layout the plots into a matrix w/ 12 columns, by row
layout(matrix(1:2, ncol=1, byrow=TRUE))
# do the plotting
img <- readJPEG("C:/Users/bai.li/Desktop/mcp_results_r/cases/manuscript_figures/diff_in_R0.jpg", native = FALSE)
plot(NA,xlim=0:1,ylim=0:1,xaxt="n",yaxt="n",bty="n")
rasterImage(img,0,0,1.05,1.1)
img <- readJPEG("C:/Users/bai.li/Desktop/mcp_results_r/cases/manuscript_figures/diff_in_h.jpg", native = FALSE)
plot(NA,xlim=0:1,ylim=0:1,xaxt="n",yaxt="n",bty="n")
rasterImage(img,0,0,1.05,1.1)
# write to PDF
dev.print(pdf, "C:/Users/bai.li/Desktop/mcp_results_r/cases/manuscript_figures/diff_in_R0_h.pdf")
|
#set the working directory
setwd("/home/benjamin/Documents/credits_recherche/data/leroux_2014")
#libraries
library(readxl)
library(taxize)
sum(is.na(interaction_df$`Focal species`))
sum(is.na(interaction_df$`Food item`))
#load the data
interaction_df <- read_excel("NL_FullDB_scinames.xlsx")
interaction_df$`Focal species` <- tolower(interaction_df$`Focal species`)
interaction_df$`Food item` <- tolower(interaction_df$`Food item`)
#unique the interactions
interaction_df <- interaction_df[!duplicated(interaction_df[,c("Focal species","Food item")]),]
#Adding the missing columns that complete the sets to match the full dataset
interaction_df$pred_common <- rep(NA, nrow(interaction_df))
interaction_df$prey_common <- rep(NA, nrow(interaction_df))
interaction_df$interaction <- 1
interaction_df$type_web <- rep("extract", nrow(interaction_df))
interaction_df <- interaction_df[,c(5,1,6,2,7,8,3,4)]
colnames(interaction_df) <- c("pred_common","pred_scientific","prey_common","prey_scientific","interaction","type_web","location","source")
#taxonomy correction
#manually
interaction_df$pred_scientific[interaction_df$pred_scientific == "grey wolf"] <- "canis lupus"
pred_scientific_taxo <- unique(as.character(interaction_df$pred_scientific))
prey_scientific_taxo <- unique(as.character(interaction_df$prey_scientific))
pred_scientific_taxo_resolved <- gnr_resolve(pred_scientific_taxo, best_match_only = T, canonical = T)
which(pred_scientific_taxo_resolved$matched_name2!=pred_scientific_taxo_resolved$submitted_name)
pred_scientific_taxo_resolved$submitted_name <- tolower(pred_scientific_taxo_resolved$submitted_name)
prey_scientific_taxo_resolved <- gnr_resolve(prey_scientific_taxo, best_match_only = T, canonical = T)
dropped_mat <- as.data.frame(matrix(nrow = length(attr(prey_scientific_taxo_resolved, "not_known")), ncol = 5))
dropped_mat[,1] <- attr(prey_scientific_taxo_resolved, "not_known")
colnames(dropped_mat) <- colnames(pred_scientific_taxo_resolved)
prey_scientific_taxo_resolved <- rbind(prey_scientific_taxo_resolved, dropped_mat)
prey_scientific_taxo_resolved[456,5] <- "Reithrodontomys"
prey_scientific_taxo_resolved[457,5] <- "Gastropoda"
prey_scientific_taxo_resolved[458,5] <- "Algae"
prey_scientific_taxo_resolved[459,5] <- "Garbage"
prey_scientific_taxo_resolved[460,5] <- "Alle alle"
prey_scientific_taxo_resolved[461,5] <- "Forbs"
prey_scientific_taxo_resolved[462,5] <- "Eleocharis"
prey_scientific_taxo_resolved[463,5] <- "eufernoia stygica"
prey_scientific_taxo_resolved[464,5] <- "Plantae"
prey_scientific_taxo_resolved[c(456:464),2] <- prey_scientific_taxo_resolved[c(456:464),1]
which(prey_scientific_taxo_resolved$matched_name2!=prey_scientific_taxo_resolved$submitted_name)
prey_taxo_subset_resolved <- prey_scientific_taxo_resolved[c(which(prey_scientific_taxo_resolved$matched_name2!=prey_scientific_taxo_resolved$submitted_name)),]
prey_scientific_taxo_resolved$matched_name2[prey_scientific_taxo_resolved$matched_name2 == "Human"] <- "Human food waste"
prey_scientific_taxo_resolved$matched_name2[prey_scientific_taxo_resolved$matched_name2 == "Felis"] <- "Felis catus"
prey_scientific_taxo_resolved$submitted_name <- tolower(prey_scientific_taxo_resolved$submitted_name)
interaction_df$pred_scientific <- pred_scientific_taxo_resolved$matched_name2[match(interaction_df$pred_scientific, pred_scientific_taxo_resolved$submitted_name)]
interaction_df$prey_scientific <- prey_scientific_taxo_resolved$matched_name2[match(interaction_df$prey_scientific, prey_scientific_taxo_resolved$submitted_name)]
#Checking if we have any species that are NA
sum(is.na(interaction_df$pred_scientific))
sum(is.na(interaction_df$prey_scientific))
#write back to csv to finish a clean by hand
write.csv(interaction_df, "leroux_2014.csv", row.names=F)
rm(list=ls())
|
/R/Formatting_scripts_and_functions/leroux2014_formatting.R
|
no_license
|
BenMerSci/credits-recherche
|
R
| false
| false
| 3,799
|
r
|
#set the working directory
setwd("/home/benjamin/Documents/credits_recherche/data/leroux_2014")
#libraries
library(readxl)
library(taxize)
sum(is.na(interaction_df$`Focal species`))
sum(is.na(interaction_df$`Food item`))
#load the data
interaction_df <- read_excel("NL_FullDB_scinames.xlsx")
interaction_df$`Focal species` <- tolower(interaction_df$`Focal species`)
interaction_df$`Food item` <- tolower(interaction_df$`Food item`)
#unique the interactions
interaction_df <- interaction_df[!duplicated(interaction_df[,c("Focal species","Food item")]),]
#Adding the missing columns that complete the sets to match the full dataset
interaction_df$pred_common <- rep(NA, nrow(interaction_df))
interaction_df$prey_common <- rep(NA, nrow(interaction_df))
interaction_df$interaction <- 1
interaction_df$type_web <- rep("extract", nrow(interaction_df))
interaction_df <- interaction_df[,c(5,1,6,2,7,8,3,4)]
colnames(interaction_df) <- c("pred_common","pred_scientific","prey_common","prey_scientific","interaction","type_web","location","source")
#taxonomy correction
#manually
interaction_df$pred_scientific[interaction_df$pred_scientific == "grey wolf"] <- "canis lupus"
pred_scientific_taxo <- unique(as.character(interaction_df$pred_scientific))
prey_scientific_taxo <- unique(as.character(interaction_df$prey_scientific))
pred_scientific_taxo_resolved <- gnr_resolve(pred_scientific_taxo, best_match_only = T, canonical = T)
which(pred_scientific_taxo_resolved$matched_name2!=pred_scientific_taxo_resolved$submitted_name)
pred_scientific_taxo_resolved$submitted_name <- tolower(pred_scientific_taxo_resolved$submitted_name)
prey_scientific_taxo_resolved <- gnr_resolve(prey_scientific_taxo, best_match_only = T, canonical = T)
dropped_mat <- as.data.frame(matrix(nrow = length(attr(prey_scientific_taxo_resolved, "not_known")), ncol = 5))
dropped_mat[,1] <- attr(prey_scientific_taxo_resolved, "not_known")
colnames(dropped_mat) <- colnames(pred_scientific_taxo_resolved)
prey_scientific_taxo_resolved <- rbind(prey_scientific_taxo_resolved, dropped_mat)
prey_scientific_taxo_resolved[456,5] <- "Reithrodontomys"
prey_scientific_taxo_resolved[457,5] <- "Gastropoda"
prey_scientific_taxo_resolved[458,5] <- "Algae"
prey_scientific_taxo_resolved[459,5] <- "Garbage"
prey_scientific_taxo_resolved[460,5] <- "Alle alle"
prey_scientific_taxo_resolved[461,5] <- "Forbs"
prey_scientific_taxo_resolved[462,5] <- "Eleocharis"
prey_scientific_taxo_resolved[463,5] <- "eufernoia stygica"
prey_scientific_taxo_resolved[464,5] <- "Plantae"
prey_scientific_taxo_resolved[c(456:464),2] <- prey_scientific_taxo_resolved[c(456:464),1]
which(prey_scientific_taxo_resolved$matched_name2!=prey_scientific_taxo_resolved$submitted_name)
prey_taxo_subset_resolved <- prey_scientific_taxo_resolved[c(which(prey_scientific_taxo_resolved$matched_name2!=prey_scientific_taxo_resolved$submitted_name)),]
prey_scientific_taxo_resolved$matched_name2[prey_scientific_taxo_resolved$matched_name2 == "Human"] <- "Human food waste"
prey_scientific_taxo_resolved$matched_name2[prey_scientific_taxo_resolved$matched_name2 == "Felis"] <- "Felis catus"
prey_scientific_taxo_resolved$submitted_name <- tolower(prey_scientific_taxo_resolved$submitted_name)
interaction_df$pred_scientific <- pred_scientific_taxo_resolved$matched_name2[match(interaction_df$pred_scientific, pred_scientific_taxo_resolved$submitted_name)]
interaction_df$prey_scientific <- prey_scientific_taxo_resolved$matched_name2[match(interaction_df$prey_scientific, prey_scientific_taxo_resolved$submitted_name)]
#Checking if we have any species that are NA
sum(is.na(interaction_df$pred_scientific))
sum(is.na(interaction_df$prey_scientific))
#write back to csv to finish a clean by hand
write.csv(interaction_df, "leroux_2014.csv", row.names=F)
rm(list=ls())
|
.onLoad <- function(libname = find.package("inspectdf"), pkgname = "inspectdf"){
# CRAN Note avoidance
if(getRversion() >= "2.15.1")
utils::globalVariables(c(".", "prop.x", "prop.y", "col_1", "col_2",
"correlation", "prop", "X1", "X2",
"pair", "lower", "upper", "value", "col_name", "V1",
"n_levels", "levels.x", "levels.y", "diff_1_2",
"diff_2_1", "p_value", "n", "type",
"mid", "hist.x", "hist.y", "n.x", "col_name2",
"n.y", "df_input", "count", "col_type",
"corr", "is_sig", "index", "data_frame", "pcnt",
"level_key", "dfi", "colval", "fisher_p",
"cname", "first_num", "new_level_key", "zs", "dfn",
"cnt", "col_names", "white_labs", "black_labs",
"cls", "alpha", "jsd", "significant", "bar_width",
"corr_1", "corr_2", "data_frame_n", "col_vec", "size",
"df_cat_fact", "ymax", "se", "col_name1", "prop_z",
"data", "pcnt_nna", "bytes", "md_cor", "md_pcnt",
"nna", 'breaks'))
invisible()
}
|
/inspectdf/R/zzz.R
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false
| false
| 1,351
|
r
|
.onLoad <- function(libname = find.package("inspectdf"), pkgname = "inspectdf"){
# CRAN Note avoidance
if(getRversion() >= "2.15.1")
utils::globalVariables(c(".", "prop.x", "prop.y", "col_1", "col_2",
"correlation", "prop", "X1", "X2",
"pair", "lower", "upper", "value", "col_name", "V1",
"n_levels", "levels.x", "levels.y", "diff_1_2",
"diff_2_1", "p_value", "n", "type",
"mid", "hist.x", "hist.y", "n.x", "col_name2",
"n.y", "df_input", "count", "col_type",
"corr", "is_sig", "index", "data_frame", "pcnt",
"level_key", "dfi", "colval", "fisher_p",
"cname", "first_num", "new_level_key", "zs", "dfn",
"cnt", "col_names", "white_labs", "black_labs",
"cls", "alpha", "jsd", "significant", "bar_width",
"corr_1", "corr_2", "data_frame_n", "col_vec", "size",
"df_cat_fact", "ymax", "se", "col_name1", "prop_z",
"data", "pcnt_nna", "bytes", "md_cor", "md_pcnt",
"nna", 'breaks'))
invisible()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Viz3_plot.R
\name{Viz3_plot}
\alias{Viz3_plot}
\title{Plots a treemap with size = to average number of connexions & color = connexion duration with respect to a given analysis axis}
\usage{
Viz3_plot(a_set, analysis_axis = "None", hierarchy_view)
}
\arguments{
\item{a_set}{= a dataframe, analysis_axis = a defined string of characters (default is "None"), hierarchy_view = a boolean}
}
\value{
a treemap object
}
\description{
Plots a treemap with size = to average number of connexions & color = connexion duration with respect to a given analysis axis
}
\examples{
\dontrun{
A MODIFIER
start<-lubridate::ymd("2016-09-27")
end<-lubridate::ymd("2016-09-30")
duration_min<- 10
duration_max<- 5000
districts<-c(1,2,5,8,"All")
cat_sites<-c("Mairie","Bibliothรจque")
sites<-c("All")
countries<-c("All")
devices<-c("smartphone","tablet")
analysis_axis<-"category_device"
hierarchy_view<-FALSE
a_set <- Viz1_Filter(start, end, duration_min, duration_max, districts, cat_sites, sites, countries, devices) \%>\%
number_connexions(start,end,analysis_axis)
Viz3_plot(a_set, analysis_axis, hierarchy_view)
}
}
|
/man/Viz3_plot.Rd
|
no_license
|
SachaIZADI/wifiparis
|
R
| false
| true
| 1,183
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Viz3_plot.R
\name{Viz3_plot}
\alias{Viz3_plot}
\title{Plots a treemap with size = to average number of connexions & color = connexion duration with respect to a given analysis axis}
\usage{
Viz3_plot(a_set, analysis_axis = "None", hierarchy_view)
}
\arguments{
\item{a_set}{= a dataframe, analysis_axis = a defined string of characters (default is "None"), hierarchy_view = a boolean}
}
\value{
a treemap object
}
\description{
Plots a treemap with size = to average number of connexions & color = connexion duration with respect to a given analysis axis
}
\examples{
\dontrun{
A MODIFIER
start<-lubridate::ymd("2016-09-27")
end<-lubridate::ymd("2016-09-30")
duration_min<- 10
duration_max<- 5000
districts<-c(1,2,5,8,"All")
cat_sites<-c("Mairie","Bibliothรจque")
sites<-c("All")
countries<-c("All")
devices<-c("smartphone","tablet")
analysis_axis<-"category_device"
hierarchy_view<-FALSE
a_set <- Viz1_Filter(start, end, duration_min, duration_max, districts, cat_sites, sites, countries, devices) \%>\%
number_connexions(start,end,analysis_axis)
Viz3_plot(a_set, analysis_axis, hierarchy_view)
}
}
|
# Script Notes from MEAP Book on ML
# Support Vector Machines
# February 23, 2020
#
# Support Vector Machines Algorithms are good for classification prediction problems.
# They use several tricks/features to help identify/describe a hyperplane (a line in 2 dim problem)
# that separates classes of observations.
# The optimal hyperplane has several attributes:
# 1. The space between the two classes is maximized (as measured by the edge observations)
# 2. If there is no 'pure' hyperplane solution, it finds the optimal case with the least 'cost' (ie transgressing
# observations.)
# 3. It can use kernels (think balls under a blanket) to create distortions in the shape but make it easier than to describe
# 'pure' class regions.
# SVM is not good on categorial variables; is good on continuous ones.
# I. Set Out Key Capabilities,Load & Explore Data -------------------------------
library(mlr)
library(tidyverse)
library(dplyr)
library(plyr)
# load spam dataset ... last column is called 'type' and is our target vairable, indicating whether spam or not.
data(spam, package = 'kernlab')
spamtib <- as_tibble(spam)
# it appears most of these variables are giving share counts as % of total characters in email on presence of variables
# here is code to get the scoop on descriptions
?kernlab::spam
# II. Define Task, Learner, Model, Hyperparameter Selection ---------------
spamtask <- makeClassifTask(data = spamtib, target = 'type')
SVM <- makeLearner(('classif.svm'))
# many options on choosing hyperparamters.
# In general, most important ones are
# i) the kernel
# ii) the cost
# iii) the degree
# iv) gamma
# here i am dumbly just copying what has been done in the book.
kernels <- c('polynomials','radial','sigmoid')
svmParamSpace <- makeParamSet(
makeDiscreteParam('kernel', values = kernels),
makeIntegerParam('degree', lower = 1, upper = 3),
makeNumericParam('cost', lower = 0.1, upper = 10),
makeNumericParam('gamma', lower = 0.1, upper= 10))
# this creates an enormous number of possibilities of the optimal hyperparameter.
# rather than going through each one explicitly, will do a random search of n possibilities
# then take the best of those and use it.
randomsearch <- makeTuneControlRandom(maxit = 20)
CVForTuning <- makeResampleDesc('Holdout', split = 2/3)
# again continuing stupidly here... this is about how the program runs through the various
# random permutations. apparently want to make sure that there is some parallel processing going on
# so that this doesn't atke forever....
library(parallelMap)
library(parallel)
parallelStartSocket(cpus = detectCores())
tunedsvmPars <- tuneParams('classif.svm', task = spamtask, resampling = CVForTuning,
par.set = svmParamSpace,
control = randomsearch)
parallelStop()
# III. Train the model with the determined hyperparameters ---------------
tunedSvm <- setHyperPars(makeLearner("classif.svm"),
par.vals = tunedsvmPars$x)
tunedSvmModel <- train(tunedSvm, spamtask)
|
/Script Notes on SVM Algorithms from ML Book 20200223.R
|
no_license
|
boxcarrovers/M2A2-R-Scripts
|
R
| false
| false
| 3,188
|
r
|
# Script Notes from MEAP Book on ML
# Support Vector Machines
# February 23, 2020
#
# Support Vector Machines Algorithms are good for classification prediction problems.
# They use several tricks/features to help identify/describe a hyperplane (a line in 2 dim problem)
# that separates classes of observations.
# The optimal hyperplane has several attributes:
# 1. The space between the two classes is maximized (as measured by the edge observations)
# 2. If there is no 'pure' hyperplane solution, it finds the optimal case with the least 'cost' (ie transgressing
# observations.)
# 3. It can use kernels (think balls under a blanket) to create distortions in the shape but make it easier than to describe
# 'pure' class regions.
# SVM is not good on categorial variables; is good on continuous ones.
# I. Set Out Key Capabilities,Load & Explore Data -------------------------------
library(mlr)
library(tidyverse)
library(dplyr)
library(plyr)
# load spam dataset ... last column is called 'type' and is our target vairable, indicating whether spam or not.
data(spam, package = 'kernlab')
spamtib <- as_tibble(spam)
# it appears most of these variables are giving share counts as % of total characters in email on presence of variables
# here is code to get the scoop on descriptions
?kernlab::spam
# II. Define Task, Learner, Model, Hyperparameter Selection ---------------
spamtask <- makeClassifTask(data = spamtib, target = 'type')
SVM <- makeLearner(('classif.svm'))
# many options on choosing hyperparamters.
# In general, most important ones are
# i) the kernel
# ii) the cost
# iii) the degree
# iv) gamma
# here i am dumbly just copying what has been done in the book.
kernels <- c('polynomials','radial','sigmoid')
svmParamSpace <- makeParamSet(
makeDiscreteParam('kernel', values = kernels),
makeIntegerParam('degree', lower = 1, upper = 3),
makeNumericParam('cost', lower = 0.1, upper = 10),
makeNumericParam('gamma', lower = 0.1, upper= 10))
# this creates an enormous number of possibilities of the optimal hyperparameter.
# rather than going through each one explicitly, will do a random search of n possibilities
# then take the best of those and use it.
randomsearch <- makeTuneControlRandom(maxit = 20)
CVForTuning <- makeResampleDesc('Holdout', split = 2/3)
# again continuing stupidly here... this is about how the program runs through the various
# random permutations. apparently want to make sure that there is some parallel processing going on
# so that this doesn't atke forever....
library(parallelMap)
library(parallel)
parallelStartSocket(cpus = detectCores())
tunedsvmPars <- tuneParams('classif.svm', task = spamtask, resampling = CVForTuning,
par.set = svmParamSpace,
control = randomsearch)
parallelStop()
# III. Train the model with the determined hyperparameters ---------------
tunedSvm <- setHyperPars(makeLearner("classif.svm"),
par.vals = tunedsvmPars$x)
tunedSvmModel <- train(tunedSvm, spamtask)
|
cmsaf.divc <-
function(var,const=1,infile,outfile){
if (const==0){
stop("Division by zero not allowed!")
}
start.time <- Sys.time()
# check filename
filecheck <- checkfile(infile,outfile)
if (filecheck[[1]]){
infile <- filecheck[[2]]
outfile <- filecheck[[3]]
# define standard names of variables and dimensions
t_name <- "time"
t_standard_name = "time"
t_units = "undefined"
t_calendar = "undefined"
nb2_units = "1"
lat_name = "latitude"
lat_standard_name = "latitude"
lat_long_name = "latitude"
lat_units = "degrees_north"
lat_axis = "Y"
lon_name = "longitude"
lon_standard_name = "longitude"
lon_long_name = "longitude"
lon_units = "degrees_east"
lon_axis = "X"
v_standard_name = "undefined"
v_long_name = "undefined"
v_units = "undefined"
v__FillValue = "undefined"
v_missing_value = "undefined"
info = "Created with the CM SAF R toolbox."
var_prec="double"
att_list <- c("standard_name","long_name","units","_FillValue","missing_value","calendar")
v_att_list <- c("v_standard_name","v_long_name","v_units","v__FillValue","v_missing_value","v_calendar")
# get file information
cat("get file information", "\n")
id <- nc_open(infile)
# get information about dimensions
dimnames <- names(id$dim)
# check standard_names of dimensions
for (i in 1:length(dimnames)){
sn <- ncatt_get(id,dimnames[i],"standard_name")
if (length(sn)>0){
sn <- sn$value
if (sn=="longitude")(lon_name <- dimnames[i])
if (sn=="latitude")(lat_name <- dimnames[i])
if (sn=="time")(t_name <- dimnames[i])
}
}
for (i in 1:length(dimnames)){
if (t_name %in% dimnames){
attnames <- names(id$dim[[i]])
if ("units" %in% attnames){
t_units <- ncatt_get(id,t_name,"units")$value}
if ("calendar" %in% attnames){
t_calendar <- ncatt_get(id,t_name,"calendar")$value}
}
}
# get information about variables
varnames <- names(id$var)
if (var %in% varnames){
for (i in 1:6){
att_dum <- ncatt_get(id,var,att_list[i])
if (att_dum$hasatt){
assign(v_att_list[i],att_dum$value)}
}
# get details of file
lon <- ncvar_get(id,lon_name)
lat <- ncvar_get(id,lat_name)
time1 <- ncvar_get(id,t_name)
if ("time_bnds" %in% varnames){
tbnds1 <- ncvar_get(id,"time_bnds",collapse_degen=FALSE)
}
}else{
nc_close(id)
stop(cat(paste("Variable ",var," not found! File contains: ",varnames,sep="")),"\n")}
if (v__FillValue == "undefined"){
v__FillValue = v_missing_value}
if (v_missing_value == "undefined"){
v_missing_value = v__FillValue}
nc_close(id)
# extract time information
date.time <- as.Date(get_time(t_units,time1))
a <- as.character(date.time)
b <- strsplit(a,"-")
d <- unlist(b)
dummy <- length(d)
dum <- seq(2,dummy,3)
mon <- as.integer(d[dum])
dum <- seq(1,dummy,3)
years <- as.integer(d[dum])
target <- array(NA,dim=c(length(lon),length(lat),1))
time_bnds <- array(NA, dim=c(2,1))
# create netcdf
cat("create netcdf", "\n")
target[is.na(target)] <- v_missing_value
nb2 <- c(0,1)
x <- ncdim_def(name="lon",units=lon_units,vals=lon)
y <- ncdim_def(name="lat",units=lat_units,vals=lat)
t <- ncdim_def(name="time",units=t_units,vals=0,unlim=TRUE)
if ("time_bnds" %in% varnames){
tb <- ncdim_def(name="nb2",units="1",vals=nb2)
}
var1 <- ncvar_def(name=var,units=v_units,dim=list(x,y,t),prec=var_prec)
if ("time_bnds" %in% varnames){
var2 <- ncvar_def(name="time_bnds",units="1",dim=list(tb,t),prec="double")
vars <- list(var1,var2)
ncnew <- nc_create(outfile,vars)
ncvar_put(ncnew,var1,target)
ncvar_put(ncnew,var2,time_bnds)
ncatt_put(ncnew,var,"standard_name",v_standard_name,prec="text")
ncatt_put(ncnew,var,"long_name",v_long_name,prec="text")
ncatt_put(ncnew,var,"_FillValue",v__FillValue,prec=var_prec)
ncatt_put(ncnew,var,"missing_value",v_missing_value,prec=var_prec)
ncatt_put(ncnew,"time","standard_name",t_standard_name,prec="text")
ncatt_put(ncnew,"time","calendar",t_calendar,prec="text")
ncatt_put(ncnew,"time","bounds","time_bnds",prec="text")
ncatt_put(ncnew,"lon","standard_name",lon_standard_name,prec="text")
ncatt_put(ncnew,"lon","long_name",lon_long_name,prec="text")
ncatt_put(ncnew,"lon","axis",lon_axis,prec="text")
ncatt_put(ncnew,"lat","standard_name",lat_standard_name,prec="text")
ncatt_put(ncnew,"lat","long_name",lat_long_name,prec="text")
ncatt_put(ncnew,"lat","axis",lat_axis,prec="text")
ncatt_put(ncnew,0,"Info",info,prec="text")
} else {
vars <- list(var1)
ncnew <- nc_create(outfile,vars)
ncvar_put(ncnew,var1,target)
ncatt_put(ncnew,var,"standard_name",v_standard_name,prec="text")
ncatt_put(ncnew,var,"long_name",v_long_name,prec="text")
ncatt_put(ncnew,var,"_FillValue",v__FillValue,prec=var_prec)
ncatt_put(ncnew,var,"missing_value",v_missing_value,prec=var_prec)
ncatt_put(ncnew,"time","standard_name",t_standard_name,prec="text")
ncatt_put(ncnew,"time","calendar",t_calendar,prec="text")
ncatt_put(ncnew,"lon","standard_name",lon_standard_name,prec="text")
ncatt_put(ncnew,"lon","long_name",lon_long_name,prec="text")
ncatt_put(ncnew,"lon","axis",lon_axis,prec="text")
ncatt_put(ncnew,"lat","standard_name",lat_standard_name,prec="text")
ncatt_put(ncnew,"lat","long_name",lat_long_name,prec="text")
ncatt_put(ncnew,"lat","axis",lat_axis,prec="text")
ncatt_put(ncnew,0,"Info",info,prec="text")
}
# multiply each timestep with a constant number
id <- nc_open(infile)
count <- 1
for (i in 1:length(time1)){
dum_dat <- ncvar_get(id,var,start=c(1,1,i),count=c(-1,-1,1))
cat("\r","divide timestep by constant ",i," of ",length(time1),sep="")
dum_dat <- dum_dat/const
dum_dat[is.na(dum_dat)] <- v_missing_value
ncvar_put(ncnew,var1,dum_dat,start=c(1,1,count),count=c(-1,-1,1))
ncvar_put(ncnew,t,time1[i], start=count, count=1)
if ("time_bnds" %in% varnames){
ncvar_put(ncnew,var2,tbnds1[,i],start=c(1,count),count=c(-1,1))
}
count <- count+1
}
nc_close(id)
nc_close(ncnew)
end.time <- Sys.time()
cat("\n","processing time: ",round(as.numeric(end.time-start.time,units="secs"),digits=2)," s",sep="", "\n")
} # endif filecheck
}
|
/cmsaf/R/cmsaf.divc.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 6,518
|
r
|
cmsaf.divc <-
function(var,const=1,infile,outfile){
if (const==0){
stop("Division by zero not allowed!")
}
start.time <- Sys.time()
# check filename
filecheck <- checkfile(infile,outfile)
if (filecheck[[1]]){
infile <- filecheck[[2]]
outfile <- filecheck[[3]]
# define standard names of variables and dimensions
t_name <- "time"
t_standard_name = "time"
t_units = "undefined"
t_calendar = "undefined"
nb2_units = "1"
lat_name = "latitude"
lat_standard_name = "latitude"
lat_long_name = "latitude"
lat_units = "degrees_north"
lat_axis = "Y"
lon_name = "longitude"
lon_standard_name = "longitude"
lon_long_name = "longitude"
lon_units = "degrees_east"
lon_axis = "X"
v_standard_name = "undefined"
v_long_name = "undefined"
v_units = "undefined"
v__FillValue = "undefined"
v_missing_value = "undefined"
info = "Created with the CM SAF R toolbox."
var_prec="double"
att_list <- c("standard_name","long_name","units","_FillValue","missing_value","calendar")
v_att_list <- c("v_standard_name","v_long_name","v_units","v__FillValue","v_missing_value","v_calendar")
# get file information
cat("get file information", "\n")
id <- nc_open(infile)
# get information about dimensions
dimnames <- names(id$dim)
# check standard_names of dimensions
for (i in 1:length(dimnames)){
sn <- ncatt_get(id,dimnames[i],"standard_name")
if (length(sn)>0){
sn <- sn$value
if (sn=="longitude")(lon_name <- dimnames[i])
if (sn=="latitude")(lat_name <- dimnames[i])
if (sn=="time")(t_name <- dimnames[i])
}
}
for (i in 1:length(dimnames)){
if (t_name %in% dimnames){
attnames <- names(id$dim[[i]])
if ("units" %in% attnames){
t_units <- ncatt_get(id,t_name,"units")$value}
if ("calendar" %in% attnames){
t_calendar <- ncatt_get(id,t_name,"calendar")$value}
}
}
# get information about variables
varnames <- names(id$var)
if (var %in% varnames){
for (i in 1:6){
att_dum <- ncatt_get(id,var,att_list[i])
if (att_dum$hasatt){
assign(v_att_list[i],att_dum$value)}
}
# get details of file
lon <- ncvar_get(id,lon_name)
lat <- ncvar_get(id,lat_name)
time1 <- ncvar_get(id,t_name)
if ("time_bnds" %in% varnames){
tbnds1 <- ncvar_get(id,"time_bnds",collapse_degen=FALSE)
}
}else{
nc_close(id)
stop(cat(paste("Variable ",var," not found! File contains: ",varnames,sep="")),"\n")}
if (v__FillValue == "undefined"){
v__FillValue = v_missing_value}
if (v_missing_value == "undefined"){
v_missing_value = v__FillValue}
nc_close(id)
# extract time information
date.time <- as.Date(get_time(t_units,time1))
a <- as.character(date.time)
b <- strsplit(a,"-")
d <- unlist(b)
dummy <- length(d)
dum <- seq(2,dummy,3)
mon <- as.integer(d[dum])
dum <- seq(1,dummy,3)
years <- as.integer(d[dum])
target <- array(NA,dim=c(length(lon),length(lat),1))
time_bnds <- array(NA, dim=c(2,1))
# create netcdf
cat("create netcdf", "\n")
target[is.na(target)] <- v_missing_value
nb2 <- c(0,1)
x <- ncdim_def(name="lon",units=lon_units,vals=lon)
y <- ncdim_def(name="lat",units=lat_units,vals=lat)
t <- ncdim_def(name="time",units=t_units,vals=0,unlim=TRUE)
if ("time_bnds" %in% varnames){
tb <- ncdim_def(name="nb2",units="1",vals=nb2)
}
var1 <- ncvar_def(name=var,units=v_units,dim=list(x,y,t),prec=var_prec)
if ("time_bnds" %in% varnames){
var2 <- ncvar_def(name="time_bnds",units="1",dim=list(tb,t),prec="double")
vars <- list(var1,var2)
ncnew <- nc_create(outfile,vars)
ncvar_put(ncnew,var1,target)
ncvar_put(ncnew,var2,time_bnds)
ncatt_put(ncnew,var,"standard_name",v_standard_name,prec="text")
ncatt_put(ncnew,var,"long_name",v_long_name,prec="text")
ncatt_put(ncnew,var,"_FillValue",v__FillValue,prec=var_prec)
ncatt_put(ncnew,var,"missing_value",v_missing_value,prec=var_prec)
ncatt_put(ncnew,"time","standard_name",t_standard_name,prec="text")
ncatt_put(ncnew,"time","calendar",t_calendar,prec="text")
ncatt_put(ncnew,"time","bounds","time_bnds",prec="text")
ncatt_put(ncnew,"lon","standard_name",lon_standard_name,prec="text")
ncatt_put(ncnew,"lon","long_name",lon_long_name,prec="text")
ncatt_put(ncnew,"lon","axis",lon_axis,prec="text")
ncatt_put(ncnew,"lat","standard_name",lat_standard_name,prec="text")
ncatt_put(ncnew,"lat","long_name",lat_long_name,prec="text")
ncatt_put(ncnew,"lat","axis",lat_axis,prec="text")
ncatt_put(ncnew,0,"Info",info,prec="text")
} else {
vars <- list(var1)
ncnew <- nc_create(outfile,vars)
ncvar_put(ncnew,var1,target)
ncatt_put(ncnew,var,"standard_name",v_standard_name,prec="text")
ncatt_put(ncnew,var,"long_name",v_long_name,prec="text")
ncatt_put(ncnew,var,"_FillValue",v__FillValue,prec=var_prec)
ncatt_put(ncnew,var,"missing_value",v_missing_value,prec=var_prec)
ncatt_put(ncnew,"time","standard_name",t_standard_name,prec="text")
ncatt_put(ncnew,"time","calendar",t_calendar,prec="text")
ncatt_put(ncnew,"lon","standard_name",lon_standard_name,prec="text")
ncatt_put(ncnew,"lon","long_name",lon_long_name,prec="text")
ncatt_put(ncnew,"lon","axis",lon_axis,prec="text")
ncatt_put(ncnew,"lat","standard_name",lat_standard_name,prec="text")
ncatt_put(ncnew,"lat","long_name",lat_long_name,prec="text")
ncatt_put(ncnew,"lat","axis",lat_axis,prec="text")
ncatt_put(ncnew,0,"Info",info,prec="text")
}
# multiply each timestep with a constant number
id <- nc_open(infile)
count <- 1
for (i in 1:length(time1)){
dum_dat <- ncvar_get(id,var,start=c(1,1,i),count=c(-1,-1,1))
cat("\r","divide timestep by constant ",i," of ",length(time1),sep="")
dum_dat <- dum_dat/const
dum_dat[is.na(dum_dat)] <- v_missing_value
ncvar_put(ncnew,var1,dum_dat,start=c(1,1,count),count=c(-1,-1,1))
ncvar_put(ncnew,t,time1[i], start=count, count=1)
if ("time_bnds" %in% varnames){
ncvar_put(ncnew,var2,tbnds1[,i],start=c(1,count),count=c(-1,1))
}
count <- count+1
}
nc_close(id)
nc_close(ncnew)
end.time <- Sys.time()
cat("\n","processing time: ",round(as.numeric(end.time-start.time,units="secs"),digits=2)," s",sep="", "\n")
} # endif filecheck
}
|
\name{gmQO}
\alias{gmQO}
\title{
Quasi-Optimization
}
\description{
Function for deriving a Markov generator matrix estimate based on the quasi-optimization procedure of Kreinin and Sidelnikova, 2001
}
\usage{
gmQO(tmrel, te, logmethod = "Eigen")
}
\arguments{
\item{tmrel}{
matrix of relative transition frequencies
}
\item{te}{
time elapsed in transition process
}
\item{logmethod}{
method for computation of matrix logarithm, by default eigendecomposition is chosen (see \code{?logm} from \code{expm} package for more information)
}
}
\details{
From the set of possible Markov generator matrices, the one is chosen which is closest to a matrix logarithm based candidate solution in terms of sum of squared deviations.
}
\value{
generator matrix estimate
}
\references{
E. Kreinin and M. Sidelnikova: Regularization Algorithms for Transition Matrices. Algo Research Quarterly 4(1):23-40, 2001
}
\author{
Marius Pfeuffer
}
\examples{
data(tm_abs)
## Derive matrix of relative transition frequencies
data(tm_abs)
tm_rel=rbind((tm_abs/rowSums(tm_abs))[1:7,],c(rep(0,7),1))
## Derive quasi optimization generator matrix estimate
gmqo=gmQO(tm_rel,1)
gmqo
}
|
/man/gmQO.Rd
|
no_license
|
cran/ctmcd
|
R
| false
| false
| 1,205
|
rd
|
\name{gmQO}
\alias{gmQO}
\title{
Quasi-Optimization
}
\description{
Function for deriving a Markov generator matrix estimate based on the quasi-optimization procedure of Kreinin and Sidelnikova, 2001
}
\usage{
gmQO(tmrel, te, logmethod = "Eigen")
}
\arguments{
\item{tmrel}{
matrix of relative transition frequencies
}
\item{te}{
time elapsed in transition process
}
\item{logmethod}{
method for computation of matrix logarithm, by default eigendecomposition is chosen (see \code{?logm} from \code{expm} package for more information)
}
}
\details{
From the set of possible Markov generator matrices, the one is chosen which is closest to a matrix logarithm based candidate solution in terms of sum of squared deviations.
}
\value{
generator matrix estimate
}
\references{
E. Kreinin and M. Sidelnikova: Regularization Algorithms for Transition Matrices. Algo Research Quarterly 4(1):23-40, 2001
}
\author{
Marius Pfeuffer
}
\examples{
data(tm_abs)
## Derive matrix of relative transition frequencies
data(tm_abs)
tm_rel=rbind((tm_abs/rowSums(tm_abs))[1:7,],c(rep(0,7),1))
## Derive quasi optimization generator matrix estimate
gmqo=gmQO(tm_rel,1)
gmqo
}
|
library(agRee)
### Name: agree.sddm
### Title: Mean Normalized Smallest Detectable Difference
### Aliases: agree.sddm
### Keywords: htest
### ** Examples
data(petVT)
agree.sddm(petVT$cerebellum)
|
/data/genthat_extracted_code/agRee/examples/agree.sddm.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 206
|
r
|
library(agRee)
### Name: agree.sddm
### Title: Mean Normalized Smallest Detectable Difference
### Aliases: agree.sddm
### Keywords: htest
### ** Examples
data(petVT)
agree.sddm(petVT$cerebellum)
|
#################FINDING USEFUL COEFFICIENTS
library(tidyverse)
library(glmnet)
library(Metrics)
#SIMPLE GLM MODEL#
model = glm(pstr ~ ageyear+SEX+higheduc+think_will_hospitalized_cv+fam_wage_loss_cv+fam_exp1_cv+fam_actions_cv___5+fam_actions_cv___6, data = trainData)
predictionTest = predict(model, testData)
testSubmission1 <- data.frame(test_id = testData$test_id, pstr = predictionTest )
testSubmission1$pstr[is.na(testSubmission1$pstr)] = mean(testSubmission1$pstr, na.rm =T)
write.csv(testSubmission1,"C:\\Users\\burto\\Desktop\\DataScienceCompetition\\testSubmission.csv", row.names = FALSE)
####FROM KAGGLE NOTEBOOK
list.files(path = "../input")
train = read.csv("../input/stressdata2/train.csv")
test = read.csv("../input/stressdata2/test.csv")
#split data for testing purposes
trainSplitSort = sort(sample(nrow(train), nrow(train)*.5))
trainSplit<-train[trainSplitSort,]
testSplit<-train[-trainSplitSort,]
#effectiveness testing for variables
y = train$pstr
x = train[,3:83]
x$pstr=NULL
x$SEX=as.numeric(as.factor(x$SEX))
pstr_table = train %>% group_by(higheduc) %>% summarise(mnpstr = mean(pstr))
x_join = right_join(x, pstr_table, by = "higheduc")
x_join$higheduc= NULL
x_join[is.na(x_join)] = 0
x_join = as.matrix(x_join)
model_glmn = glmnet::glmnet(x_join, y, alpha = 1)
sum(coef(model_glmn, s = 0.15)!=0)
coef(model_glmn, s=.15)
#DATA SPLIT FOR TESTING PURPOSES
trainSplitSort = sort(sample(nrow(train), nrow(train)*.5))
trainSplit<-train[trainSplitSort,]
testSplit<-train[-trainSplitSort,]
############################
#######GBM MODEL############
library(gbm)
#testing portion for local cross validation
gbmModel = gbm(formula = pstr ~ hincome+child_social_media_time_cv+physical_activities_hr_cv, data = train)
gbmPrediction = predict(gbmModel, test, n.trees = 100)
gbmModelComp <- data.frame(test_id = test$test_id, pstr = gbmPrediction)
gbmModelComp$pstr[is.na(gbmModelComp$pstr)] = mean(gbmModelComp$pstr, na.rm = T)
#rmse(testSplit$pstr, gbmModelComp$pstr)
write.csv(gbmModelComp,"gbmModelCompSub.csv", row.names = FALSE)
############################
#RANDOM FOREST model with updated variables
install.packages("randomForest")
library(randomForest)
rfModel = randomForest(pstr ~ hincome+child_social_media_time_cv+physical_activities_hr_cv, data = trainSplit, ntree =100,mtry=2,importance = TRUE, na.action = na.exclude)
rfPrediction = predict(rfModel, trainSplit)
rfModelComp <-data.frame(test_id = trainSplit$train_id, pstr = rfPrediction)
rfModelComp$pstr[is.na(rfModelComp$pstr)] = mean(rfModelComp$pstr, na.rm = T)
library(Metrics)
#testing rmse for rfModel
rmse(testSplit$pstr, rfModelComp$pstr)
#submission for random forest model with updated variables (probaly worse than before)
rfSub = predict(rfModel, testData)
rfModelCompSub <-data.frame(test_id = testData$test_id, pstr = rfSub)
rfModelCompSub$pstr[is.na(rfModelCompSub$pstr)] = mean(rfModelCompSub$pstr, na.rm = T)
write.csv(rfModelCompSub,"C:\\Users\\burto\\Desktop\\DataScienceCompetition\\rfModelUpdatedVariables.csv", row.names = FALSE)
#############################################
###############MODEL STACKING################
#split data for stacking
index = sample(nrow(train), nrow(train)/2, F)
train1 = train[index,]
train2 = train[index,]
#base model
model_glm = glm(pstr ~ hincome+child_social_media_time_cv+physical_activities_hr_cv, data = train1)
train2$glm_prediction = predict(model_glm, train2)
test$glm_prediction = predict(model_glm, test)
#random forest model
library(randomForest)
model_rf = randomForest(pstr ~ hincome+child_social_media_time_cv+physical_activities_hr_cv, data = train1, ntree =100,mtry=2,importance = TRUE, na.action = na.exclude)
train2$rf_prediction = predict(model_rf, train2)
test$rf_prediction = predict(model_rf, test)
#gbm model
model_gbm = gbm(formula = pstr ~ hincome+child_social_media_time_cv+physical_activities_hr_cv, data = train1, n.trees = 100)
train2$gbm_prediction = predict(model_gbm, train2, n.trees = 100)
test$gbm_prediction = predict(model_gbm, test, n.trees = 100)
plot(train2$glm_prediction, train2$rf_prediction)
#stack time
model_stack = glm(pstr~ hincome*glm_prediction+child_social_media_time_cv*glm_prediction
+physical_activities_hr_cv*glm_prediction + hincome*gbm_prediction+child_social_media_time_cv*gbm_prediction
+physical_activities_hr_cv*gbm_prediction + hincome*rf_prediction+child_social_media_time_cv*rf_prediction
+physical_activities_hr_cv*rf_prediction, data = train2)
stack_predict = predict(model_stack, test)
stackComp <- data.frame(test_id = test$test_id, pstr = stack_predict)
stackComp$pstr[is.na(stackComp$pstr)] = mean(stackComp$pstr, na.rm = T)
write.csv(stackComp,"stackModel.csv", row.names = FALSE)
#####################################
###########KERAS ATTEMPT#############
#KERAS ATTEMPT
library(keras)
library(tensorflow)
library(imager)
# create the model
model <- keras_model_sequential()
model %>%
layer_dense(units = 100, activation = 'relu', input_shape = c(80)) %>%
layer_dropout(rate = 0.4) %>%
layer_dense(units = 50, activation = 'relu') %>%
layer_dropout(rate = 0.4) %>%
layer_dense(units = 1, activation = 'linear')
summary(model)
# compile model and specify optimization
model %>% compile(
loss = 'MeanSquaredError',
optimizer = optimizer_adam(),
metrics = c('MeanSquaredError')
)
# fit model
history <- model %>% fit(
x_join, y,
epochs = 30, batch_size = 128,
validation_split = 0.2
)
plot(history)
|
/kaggleSubmissions.R
|
no_license
|
Jeffrey-NB/stress-prediction
|
R
| false
| false
| 5,775
|
r
|
#################FINDING USEFUL COEFFICIENTS
library(tidyverse)
library(glmnet)
library(Metrics)
#SIMPLE GLM MODEL#
model = glm(pstr ~ ageyear+SEX+higheduc+think_will_hospitalized_cv+fam_wage_loss_cv+fam_exp1_cv+fam_actions_cv___5+fam_actions_cv___6, data = trainData)
predictionTest = predict(model, testData)
testSubmission1 <- data.frame(test_id = testData$test_id, pstr = predictionTest )
testSubmission1$pstr[is.na(testSubmission1$pstr)] = mean(testSubmission1$pstr, na.rm =T)
write.csv(testSubmission1,"C:\\Users\\burto\\Desktop\\DataScienceCompetition\\testSubmission.csv", row.names = FALSE)
####FROM KAGGLE NOTEBOOK
list.files(path = "../input")
train = read.csv("../input/stressdata2/train.csv")
test = read.csv("../input/stressdata2/test.csv")
#split data for testing purposes
trainSplitSort = sort(sample(nrow(train), nrow(train)*.5))
trainSplit<-train[trainSplitSort,]
testSplit<-train[-trainSplitSort,]
#effectiveness testing for variables
y = train$pstr
x = train[,3:83]
x$pstr=NULL
x$SEX=as.numeric(as.factor(x$SEX))
pstr_table = train %>% group_by(higheduc) %>% summarise(mnpstr = mean(pstr))
x_join = right_join(x, pstr_table, by = "higheduc")
x_join$higheduc= NULL
x_join[is.na(x_join)] = 0
x_join = as.matrix(x_join)
model_glmn = glmnet::glmnet(x_join, y, alpha = 1)
sum(coef(model_glmn, s = 0.15)!=0)
coef(model_glmn, s=.15)
#DATA SPLIT FOR TESTING PURPOSES
trainSplitSort = sort(sample(nrow(train), nrow(train)*.5))
trainSplit<-train[trainSplitSort,]
testSplit<-train[-trainSplitSort,]
############################
#######GBM MODEL############
library(gbm)
#testing portion for local cross validation
gbmModel = gbm(formula = pstr ~ hincome+child_social_media_time_cv+physical_activities_hr_cv, data = train)
gbmPrediction = predict(gbmModel, test, n.trees = 100)
gbmModelComp <- data.frame(test_id = test$test_id, pstr = gbmPrediction)
gbmModelComp$pstr[is.na(gbmModelComp$pstr)] = mean(gbmModelComp$pstr, na.rm = T)
#rmse(testSplit$pstr, gbmModelComp$pstr)
write.csv(gbmModelComp,"gbmModelCompSub.csv", row.names = FALSE)
############################
#RANDOM FOREST model with updated variables
install.packages("randomForest")
library(randomForest)
rfModel = randomForest(pstr ~ hincome+child_social_media_time_cv+physical_activities_hr_cv, data = trainSplit, ntree =100,mtry=2,importance = TRUE, na.action = na.exclude)
rfPrediction = predict(rfModel, trainSplit)
rfModelComp <-data.frame(test_id = trainSplit$train_id, pstr = rfPrediction)
rfModelComp$pstr[is.na(rfModelComp$pstr)] = mean(rfModelComp$pstr, na.rm = T)
library(Metrics)
#testing rmse for rfModel
rmse(testSplit$pstr, rfModelComp$pstr)
#submission for random forest model with updated variables (probaly worse than before)
rfSub = predict(rfModel, testData)
rfModelCompSub <-data.frame(test_id = testData$test_id, pstr = rfSub)
rfModelCompSub$pstr[is.na(rfModelCompSub$pstr)] = mean(rfModelCompSub$pstr, na.rm = T)
write.csv(rfModelCompSub,"C:\\Users\\burto\\Desktop\\DataScienceCompetition\\rfModelUpdatedVariables.csv", row.names = FALSE)
#############################################
###############MODEL STACKING################
#split data for stacking
index = sample(nrow(train), nrow(train)/2, F)
train1 = train[index,]
train2 = train[index,]
#base model
model_glm = glm(pstr ~ hincome+child_social_media_time_cv+physical_activities_hr_cv, data = train1)
train2$glm_prediction = predict(model_glm, train2)
test$glm_prediction = predict(model_glm, test)
#random forest model
library(randomForest)
model_rf = randomForest(pstr ~ hincome+child_social_media_time_cv+physical_activities_hr_cv, data = train1, ntree =100,mtry=2,importance = TRUE, na.action = na.exclude)
train2$rf_prediction = predict(model_rf, train2)
test$rf_prediction = predict(model_rf, test)
#gbm model
model_gbm = gbm(formula = pstr ~ hincome+child_social_media_time_cv+physical_activities_hr_cv, data = train1, n.trees = 100)
train2$gbm_prediction = predict(model_gbm, train2, n.trees = 100)
test$gbm_prediction = predict(model_gbm, test, n.trees = 100)
plot(train2$glm_prediction, train2$rf_prediction)
#stack time
model_stack = glm(pstr~ hincome*glm_prediction+child_social_media_time_cv*glm_prediction
+physical_activities_hr_cv*glm_prediction + hincome*gbm_prediction+child_social_media_time_cv*gbm_prediction
+physical_activities_hr_cv*gbm_prediction + hincome*rf_prediction+child_social_media_time_cv*rf_prediction
+physical_activities_hr_cv*rf_prediction, data = train2)
stack_predict = predict(model_stack, test)
stackComp <- data.frame(test_id = test$test_id, pstr = stack_predict)
stackComp$pstr[is.na(stackComp$pstr)] = mean(stackComp$pstr, na.rm = T)
write.csv(stackComp,"stackModel.csv", row.names = FALSE)
#####################################
###########KERAS ATTEMPT#############
#KERAS ATTEMPT
library(keras)
library(tensorflow)
library(imager)
# create the model
model <- keras_model_sequential()
model %>%
layer_dense(units = 100, activation = 'relu', input_shape = c(80)) %>%
layer_dropout(rate = 0.4) %>%
layer_dense(units = 50, activation = 'relu') %>%
layer_dropout(rate = 0.4) %>%
layer_dense(units = 1, activation = 'linear')
summary(model)
# compile model and specify optimization
model %>% compile(
loss = 'MeanSquaredError',
optimizer = optimizer_adam(),
metrics = c('MeanSquaredError')
)
# fit model
history <- model %>% fit(
x_join, y,
epochs = 30, batch_size = 128,
validation_split = 0.2
)
plot(history)
|
mean(Car$speed1)
mfv(Car$type4)
sd(Car$pollution3)
sd(Car$pollution5)
|
/tarea1_999003157.R
|
no_license
|
FAMM999003157/FAMM999003157
|
R
| false
| false
| 73
|
r
|
mean(Car$speed1)
mfv(Car$type4)
sd(Car$pollution3)
sd(Car$pollution5)
|
\name{MOL.passage}
\alias{MOL.passage}
\title{Approximate the First Passage Time Density of a Two-Barrier Problem for Time-Homogeneous Scalar Diffusions.}
\description{
For scalar diffusions with drift \code{mu=function(X){}} and diffusion \code{sig=function(X){}}, moving in relation to lower and upper bounds \code{limits[1]} and \code{limits[2]} respectively, \code{MOL.passage()} approximates a solution to the partial differential equation (PDE) that governs the evolution of the survaval distribution of the first passage time density via the method of lines (MOL).
}
\usage{
MOL.passage(Xs, t, limits, N, delt, mu, sig, desc = 1)
}
\arguments{
\item{Xs}{Starting/Initial value for the diffusion process (see note \bold{[1]}).}
\item{t}{Value (>0) giving the terminal point for the transition horizon (the final time at which to evaluate the transition density).}
\item{limits}{Limits for the spatial nodes of the lattice. These limits now represent the limits in the spatial domain (see note \bold{[2]}).}
\item{N}{The number of nodes in the spatial domain at which to evaluate the transitional density. Increase \code{N} for more accurate approximations (see note \bold{[3]} and warning \bold{[2]}).}
\item{delt}{Step size for the time domain of the lattice (see note \bold{[4]}).}
\item{mu}{Optional (if drift and diffusion coefficients are already defined) arguments giving the drift and diffusion coefficients as text expressions (See \bold{Interface} below).}
\item{sig}{Optional (if drift and diffusion coefficients are already defined) arguments giving the drift and diffusion coefficients as text expressions (See \bold{Interface} below).}
\item{desc}{The type of discretization used (see note \bold{[5]}). }
}
\value{
\item{surface}{A matrix giving the approximate survival probability over time for all starting values contained in the discretization of the interval enclosed by \code{limits}.}
\item{density}{A vector containing the approximate first passage time density for trajectories starting at \code{Xs} (see note \bold{[i]}).}
\item{time}{A vector of time nodes at which the approximation was evaluated.}
}
\references{
Hamdi, S., Schiesser, W. E. and Griffiths, G. W. 2007 Method of lines. \emph{Scholarpedia}, \bold{2(7)}:2859.
Revision #124335.
Feagin, T. 2007 A tenth-order Runge-Kutta method with error estimate. \emph{In Proceedings of the IAENG
Conf. on Scientifc Computing}.
}
\author{
Etienne A.D. Pienaar \email{etiennead@gmail.com}
}
\section{Interface}{
\code{MOL.passage} uses a function-input interface whereby the drift and diffusion components of the time-homogeneous stochastic differential equation (SDE):
\bold{\deqn{dX_t = mu(X_t)dt+sigma(X_t)dW_t,}}
are defined as R-functions in the current workspace. That is by defining the drift and diffusion components
\code{mu=function(X){some expression i.t.o. X}}
\code{sig=function(X){some expression i.t.o. X}}
further analysis may be conducted by calling the function \code{MOL.passage()}.
}
\note{
\bold{Note [1]}: If the initial value \code{Xs} does not fall on one of the lattice nodes, then the first passage time density is calculated by linearly interpolating between approximations at the two nearest lattice nodes.
\bold{Note [2]}: Note that that enough nodes, \code{N}, are needed in order to generate a sufficiently accurate approximation, especially when \code{limits[1]} and \code{limits[2]} are far apart.
\bold{Note [3]}: Increasing \code{N} will likely require smaller \code{delt}, thus increasing computation time. For some problems, slight increases in \code{N} will require much smaller \code{delt}.
\bold{Note [4]}: \code{delt} is used as the step size for a 10(8)-th order Runge-Kutta method for solving the resulting system of ODEs. Note again the inter-play between \code{N} and \code{delt} (see note \bold{[3]}).
\bold{Note [5]}: When one of the limits is sufficiently far away to not be accessible within the provided time-horizon, instabilities may occur and an alternative discretization may be required in order to ensure smooth operation. Possible values are \code{desc = 1} (close limits), \code{desc = 2} (\code{limits[1]} is inaccessible) and \code{desc = 3} (\code{limits[2]} is inaccessible).
}
\section{Warnings}{
\bold{Warning [1]:}
}
\seealso{\code{\link{MOL.density}}, \code{\link{BiMOL.density}}.}
\examples{
\donttest{
#===============================================================================
# For detailed notes and examples on how to use the MOL.passage() function, see
# the following vignette:
RShowDoc('Part_3_First_Passage_Times',type='html','DiffusionRimp')
#===============================================================================
}
}
|
/man/MOL.passage.Rd
|
no_license
|
cran/DiffusionRimp
|
R
| false
| false
| 4,823
|
rd
|
\name{MOL.passage}
\alias{MOL.passage}
\title{Approximate the First Passage Time Density of a Two-Barrier Problem for Time-Homogeneous Scalar Diffusions.}
\description{
For scalar diffusions with drift \code{mu=function(X){}} and diffusion \code{sig=function(X){}}, moving in relation to lower and upper bounds \code{limits[1]} and \code{limits[2]} respectively, \code{MOL.passage()} approximates a solution to the partial differential equation (PDE) that governs the evolution of the survaval distribution of the first passage time density via the method of lines (MOL).
}
\usage{
MOL.passage(Xs, t, limits, N, delt, mu, sig, desc = 1)
}
\arguments{
\item{Xs}{Starting/Initial value for the diffusion process (see note \bold{[1]}).}
\item{t}{Value (>0) giving the terminal point for the transition horizon (the final time at which to evaluate the transition density).}
\item{limits}{Limits for the spatial nodes of the lattice. These limits now represent the limits in the spatial domain (see note \bold{[2]}).}
\item{N}{The number of nodes in the spatial domain at which to evaluate the transitional density. Increase \code{N} for more accurate approximations (see note \bold{[3]} and warning \bold{[2]}).}
\item{delt}{Step size for the time domain of the lattice (see note \bold{[4]}).}
\item{mu}{Optional (if drift and diffusion coefficients are already defined) arguments giving the drift and diffusion coefficients as text expressions (See \bold{Interface} below).}
\item{sig}{Optional (if drift and diffusion coefficients are already defined) arguments giving the drift and diffusion coefficients as text expressions (See \bold{Interface} below).}
\item{desc}{The type of discretization used (see note \bold{[5]}). }
}
\value{
\item{surface}{A matrix giving the approximate survival probability over time for all starting values contained in the discretization of the interval enclosed by \code{limits}.}
\item{density}{A vector containing the approximate first passage time density for trajectories starting at \code{Xs} (see note \bold{[i]}).}
\item{time}{A vector of time nodes at which the approximation was evaluated.}
}
\references{
Hamdi, S., Schiesser, W. E. and Griffiths, G. W. 2007 Method of lines. \emph{Scholarpedia}, \bold{2(7)}:2859.
Revision #124335.
Feagin, T. 2007 A tenth-order Runge-Kutta method with error estimate. \emph{In Proceedings of the IAENG
Conf. on Scientifc Computing}.
}
\author{
Etienne A.D. Pienaar \email{etiennead@gmail.com}
}
\section{Interface}{
\code{MOL.passage} uses a function-input interface whereby the drift and diffusion components of the time-homogeneous stochastic differential equation (SDE):
\bold{\deqn{dX_t = mu(X_t)dt+sigma(X_t)dW_t,}}
are defined as R-functions in the current workspace. That is by defining the drift and diffusion components
\code{mu=function(X){some expression i.t.o. X}}
\code{sig=function(X){some expression i.t.o. X}}
further analysis may be conducted by calling the function \code{MOL.passage()}.
}
\note{
\bold{Note [1]}: If the initial value \code{Xs} does not fall on one of the lattice nodes, then the first passage time density is calculated by linearly interpolating between approximations at the two nearest lattice nodes.
\bold{Note [2]}: Note that that enough nodes, \code{N}, are needed in order to generate a sufficiently accurate approximation, especially when \code{limits[1]} and \code{limits[2]} are far apart.
\bold{Note [3]}: Increasing \code{N} will likely require smaller \code{delt}, thus increasing computation time. For some problems, slight increases in \code{N} will require much smaller \code{delt}.
\bold{Note [4]}: \code{delt} is used as the step size for a 10(8)-th order Runge-Kutta method for solving the resulting system of ODEs. Note again the inter-play between \code{N} and \code{delt} (see note \bold{[3]}).
\bold{Note [5]}: When one of the limits is sufficiently far away to not be accessible within the provided time-horizon, instabilities may occur and an alternative discretization may be required in order to ensure smooth operation. Possible values are \code{desc = 1} (close limits), \code{desc = 2} (\code{limits[1]} is inaccessible) and \code{desc = 3} (\code{limits[2]} is inaccessible).
}
\section{Warnings}{
\bold{Warning [1]:}
}
\seealso{\code{\link{MOL.density}}, \code{\link{BiMOL.density}}.}
\examples{
\donttest{
#===============================================================================
# For detailed notes and examples on how to use the MOL.passage() function, see
# the following vignette:
RShowDoc('Part_3_First_Passage_Times',type='html','DiffusionRimp')
#===============================================================================
}
}
|
context('test corpus.R')
test_that("test show.corpus", {
testcorpus <- corpus(c('The'))
expect_that(
show(testcorpus),
prints_text('Corpus consisting of 1 document.')
)
testcorpus <- corpus(
c('The', 'quick', 'brown', 'fox')
)
expect_that(
show(testcorpus),
prints_text('Corpus consisting of 4 documents.')
)
testcorpus <- corpus(
c('The', 'quick', 'brown', 'fox'),
docvars=data.frame(list(test=1:4))
)
expect_that(
show(testcorpus),
prints_text('Corpus consisting of 4 documents and 1 docvar.')
)
testcorpus <- corpus(
c('The', 'quick', 'brown', 'fox'),
docvars=data.frame(list(test=1:4, test2=1:4))
)
expect_that(
show(testcorpus),
prints_text('Corpus consisting of 4 documents and 2 docvars.')
)
})
test_that("test c.corpus", {
concat.corpus <- c(data_corpus_inaugural, data_corpus_inaugural, data_corpus_inaugural)
expected_docvars <-rbind(docvars(data_corpus_inaugural), docvars(data_corpus_inaugural), docvars(data_corpus_inaugural))
rownames(expected_docvars) <- make.unique(rep(rownames(docvars(data_corpus_inaugural)), 3), sep='')
expect_equal(
docvars(concat.corpus),
expected_docvars
)
expect_is(
docvars(concat.corpus),
'data.frame'
)
expected_texts <- c(texts(data_corpus_inaugural), texts(data_corpus_inaugural), texts(data_corpus_inaugural))
names(expected_texts) <- make.unique(rep(names(texts(data_corpus_inaugural)), 3), sep='')
expect_equal(
texts(concat.corpus),
expected_texts
)
expect_is(
texts(concat.corpus),
'character'
)
expect_true(
grepl('Concatenation by c.corpus', metacorpus(concat.corpus)$source)
)
})
test_that("test corpus constructors works for kwic", {
kwiccorpus <- corpus(kwic(data_corpus_inaugural, "christmas"))
expect_that(kwiccorpus, is_a("corpus"))
expect_equal(names(docvars(kwiccorpus)),
c("docname", "from", "to", "keyword", "context"))
})
test_that("test corpus constructors works for character", {
expect_that(corpus(data_char_ukimmig2010), is_a("corpus"))
})
test_that("test corpus constructors works for data.frame", {
mydf <- data.frame(letter_factor = factor(rep(letters[1:3], each = 2)),
some_ints = 1L:6L,
some_text = paste0("This is text number ", 1:6, "."),
some_logical = rep(c(TRUE, FALSE), 3),
stringsAsFactors = FALSE,
row.names = paste0("fromDf_", 1:6))
mycorp <- corpus(mydf, text_field = "some_text",
metacorpus = list(source = "From a data.frame called mydf."))
expect_equal(docnames(mycorp),
paste("fromDf", 1:6, sep = "_"))
expect_equal(mycorp[["letter_factor"]][3,1],
factor("b", levels = c("a", "b", "c")))
mydf2 <- mydf
names(mydf2)[3] <- "text"
expect_equal(corpus(mydf, text_field = "some_text"),
corpus(mydf2))
expect_equal(corpus(mydf, text_field = "some_text"),
corpus(mydf, text_field = 3))
expect_error(corpus(mydf, text_field = "some_ints"),
"text_field must refer to a character mode column")
expect_error(corpus(mydf, text_field = c(1,3)),
"text_field must refer to a single column")
expect_error(corpus(mydf, text_field = c("some_text", "letter_factor")),
"text_field must refer to a single column")
expect_error(corpus(mydf, text_field = 0),
"text_field index refers to an invalid column")
expect_error(corpus(mydf, text_field = -1),
"text_field index refers to an invalid column")
expect_error(corpus(mydf, text_field = "notfound"),
"column name notfound not found")
expect_error(corpus(mydf, text_field = "some_text", docid_field = "some_ints"),
"docid_field must refer to a character mode column")
expect_error(corpus(mydf, text_field = "some_text", docid_field = c(1,3)),
"docid_field must refer to a single column")
expect_error(corpus(mydf, text_field = "some_text", docid_field = c("some_text", "letter_factor")),
"docid_field must refer to a single column")
expect_error(corpus(mydf, text_field = "some_text", docid_field = 0),
"docid_field index refers to an invalid column")
expect_error(corpus(mydf, text_field = "some_text", docid_field = -1),
"docid_field index refers to an invalid column")
expect_error(corpus(mydf, text_field = "some_text", docid_field = "notfound"),
"column name notfound not found")
})
test_that("test corpus constructor works for tm objects", {
skip_if_not_installed("tm")
require(tm)
# VCorpus
data(crude, package = "tm") # load in a tm example VCorpus
mytmCorpus <- corpus(crude)
expect_equal(substring(texts(mytmCorpus)[1], 1, 21),
c("127" = "Diamond Shamrock Corp"))
data(acq, package = "tm")
mytmCorpus2 <- corpus(acq)
expect_equal(dim(docvars(mytmCorpus2)), c(50,12))
# SimpleCorpus
txt <- system.file("texts", "txt", package = "tm")
mytmCorpus3 <- SimpleCorpus(DirSource(txt, encoding = "UTF-8"),
control = list(language = "lat"))
qcorpus3 <- corpus(mytmCorpus3)
expect_equal(content(mytmCorpus3), texts(qcorpus3))
expect_equal(unclass(meta(mytmCorpus3, type = "corpus")[1]),
metacorpus(qcorpus3)[names(meta(mytmCorpus3, type = "corpus"))])
# any other type
mytmCorpus4 <- mytmCorpus3
class(mytmCorpus4)[1] <- "OtherCorpus"
expect_error(
corpus(mytmCorpus4),
"Cannot construct a corpus from this tm OtherCorpus object"
)
detach("package:tm", unload = TRUE)
detach("package:NLP", unload = TRUE)
})
test_that("test corpus constructor works for VCorpus with one document (#445)", {
skip_if_not_installed("tm")
require(tm)
tmCorpus_length1 <- VCorpus(VectorSource(data_corpus_inaugural[2]))
expect_silent(qcorpus <- corpus(tmCorpus_length1))
expect_equivalent(texts(qcorpus)[1], data_corpus_inaugural[2])
detach("package:tm", unload = TRUE)
detach("package:NLP", unload = TRUE)
})
test_that("test corpus constructor works for complex VCorpus (#849)", {
skip_if_not_installed("tm")
load("../data/corpora/complex_Corpus.RData")
qc <- corpus(complex_Corpus)
expect_equal(
head(docnames(qc), 3),
c("41113_201309.1", "41113_201309.2", "41113_201309.3")
)
expect_equal(
tail(docnames(qc), 3),
c("41223_201309.2553", "41223_201309.2554", "41223_201309.2555")
)
expect_output(
print(qc),
"Corpus consisting of 8,230 documents and 16 docvars\\."
)
})
test_that("corpus_subset works", {
txt <- c(doc1 = "This is a sample text.\nIt has three lines.\nThe third line.",
doc2 = "one\ntwo\tpart two\nthree\nfour.",
doc3 = "A single sentence.",
doc4 = "A sentence with \"escaped quotes\".")
dv <- data.frame(varnumeric = 10:13, varfactor = factor(c("A", "B", "A", "B")), varchar = letters[1:4])
data_corpus_test <- corpus(txt, docvars = dv, metacorpus = list(source = "From test-corpus.R"))
expect_equal(ndoc(corpus_subset(data_corpus_test, varfactor == "B")), 2)
expect_equal(docnames(corpus_subset(data_corpus_test, varfactor == "B")), c("doc2", "doc4"))
data_corpus_test_nodv <- corpus(txt, metacorpus = list(source = "From test-corpus.R"))
expect_equal(ndoc(corpus_subset(data_corpus_test_nodv, LETTERS[1:4] == "B")), 1)
expect_equal(docnames(corpus_subset(data_corpus_test_nodv, LETTERS[1:4] == "B")), c("doc2"))
})
test_that("summary method works for corpus", {
expect_output(summary(print(data_corpus_irishbudget2010)), regexp = "^Corpus consisting of 14 documents")
})
test_that("corpus works for texts with duplicate filenames", {
txt <- c(one = "Text one.", two = "text two", one = "second first text")
cor <- corpus(txt)
expect_equal(docnames(cor), c("one", "two", "one.1"))
})
test_that("create a corpus on a corpus", {
expect_identical(
data_corpus_irishbudget2010,
corpus(data_corpus_irishbudget2010)
)
tmpcorp <- data_corpus_irishbudget2010
docnames(tmpcorp) <- paste0("d", seq_len(ndoc(tmpcorp)))
expect_identical(
tmpcorp,
corpus(data_corpus_irishbudget2010, docnames = paste0("d", seq_len(ndoc(tmpcorp))))
)
expect_identical(
corpus(data_corpus_irishbudget2010, compress = TRUE),
corpus(texts(data_corpus_irishbudget2010),
docvars = docvars(data_corpus_irishbudget2010),
metacorpus = metacorpus(data_corpus_irishbudget2010),
compress = TRUE)
)
})
test_that("summary.corpus with verbose prints warning", {
expect_warning(
summary(data_corpus_irishbudget2010, verbose = FALSE),
"verbose argument is defunct"
)
})
test_that("head, tail.corpus work as expected", {
crp <- corpus_subset(data_corpus_inaugural, Year < 2018)
expect_equal(
docnames(head(crp, 3)),
c("1789-Washington", "1793-Washington", "1797-Adams")
)
expect_equal(
docnames(head(crp, -55)),
c("1789-Washington", "1793-Washington", "1797-Adams")
)
expect_equal(
docnames(tail(crp, 3)),
c("2009-Obama", "2013-Obama", "2017-Trump")
)
expect_equal(
docnames(tail(crp, -55)),
c("2009-Obama", "2013-Obama", "2017-Trump")
)
})
test_that("internal documents fn works", {
mydfm <- dfm(corpus_subset(data_corpus_inaugural, Year < 1800))
expect_is(quanteda:::documents.dfm(mydfm), "data.frame")
expect_equal(
dim(quanteda:::documents.dfm(mydfm)),
c(3, 3)
)
})
|
/tests/testthat/test-corpus.R
|
no_license
|
leighd2008/quanteda
|
R
| false
| false
| 10,124
|
r
|
context('test corpus.R')
test_that("test show.corpus", {
testcorpus <- corpus(c('The'))
expect_that(
show(testcorpus),
prints_text('Corpus consisting of 1 document.')
)
testcorpus <- corpus(
c('The', 'quick', 'brown', 'fox')
)
expect_that(
show(testcorpus),
prints_text('Corpus consisting of 4 documents.')
)
testcorpus <- corpus(
c('The', 'quick', 'brown', 'fox'),
docvars=data.frame(list(test=1:4))
)
expect_that(
show(testcorpus),
prints_text('Corpus consisting of 4 documents and 1 docvar.')
)
testcorpus <- corpus(
c('The', 'quick', 'brown', 'fox'),
docvars=data.frame(list(test=1:4, test2=1:4))
)
expect_that(
show(testcorpus),
prints_text('Corpus consisting of 4 documents and 2 docvars.')
)
})
test_that("test c.corpus", {
concat.corpus <- c(data_corpus_inaugural, data_corpus_inaugural, data_corpus_inaugural)
expected_docvars <-rbind(docvars(data_corpus_inaugural), docvars(data_corpus_inaugural), docvars(data_corpus_inaugural))
rownames(expected_docvars) <- make.unique(rep(rownames(docvars(data_corpus_inaugural)), 3), sep='')
expect_equal(
docvars(concat.corpus),
expected_docvars
)
expect_is(
docvars(concat.corpus),
'data.frame'
)
expected_texts <- c(texts(data_corpus_inaugural), texts(data_corpus_inaugural), texts(data_corpus_inaugural))
names(expected_texts) <- make.unique(rep(names(texts(data_corpus_inaugural)), 3), sep='')
expect_equal(
texts(concat.corpus),
expected_texts
)
expect_is(
texts(concat.corpus),
'character'
)
expect_true(
grepl('Concatenation by c.corpus', metacorpus(concat.corpus)$source)
)
})
test_that("test corpus constructors works for kwic", {
kwiccorpus <- corpus(kwic(data_corpus_inaugural, "christmas"))
expect_that(kwiccorpus, is_a("corpus"))
expect_equal(names(docvars(kwiccorpus)),
c("docname", "from", "to", "keyword", "context"))
})
test_that("test corpus constructors works for character", {
expect_that(corpus(data_char_ukimmig2010), is_a("corpus"))
})
test_that("test corpus constructors works for data.frame", {
mydf <- data.frame(letter_factor = factor(rep(letters[1:3], each = 2)),
some_ints = 1L:6L,
some_text = paste0("This is text number ", 1:6, "."),
some_logical = rep(c(TRUE, FALSE), 3),
stringsAsFactors = FALSE,
row.names = paste0("fromDf_", 1:6))
mycorp <- corpus(mydf, text_field = "some_text",
metacorpus = list(source = "From a data.frame called mydf."))
expect_equal(docnames(mycorp),
paste("fromDf", 1:6, sep = "_"))
expect_equal(mycorp[["letter_factor"]][3,1],
factor("b", levels = c("a", "b", "c")))
mydf2 <- mydf
names(mydf2)[3] <- "text"
expect_equal(corpus(mydf, text_field = "some_text"),
corpus(mydf2))
expect_equal(corpus(mydf, text_field = "some_text"),
corpus(mydf, text_field = 3))
expect_error(corpus(mydf, text_field = "some_ints"),
"text_field must refer to a character mode column")
expect_error(corpus(mydf, text_field = c(1,3)),
"text_field must refer to a single column")
expect_error(corpus(mydf, text_field = c("some_text", "letter_factor")),
"text_field must refer to a single column")
expect_error(corpus(mydf, text_field = 0),
"text_field index refers to an invalid column")
expect_error(corpus(mydf, text_field = -1),
"text_field index refers to an invalid column")
expect_error(corpus(mydf, text_field = "notfound"),
"column name notfound not found")
expect_error(corpus(mydf, text_field = "some_text", docid_field = "some_ints"),
"docid_field must refer to a character mode column")
expect_error(corpus(mydf, text_field = "some_text", docid_field = c(1,3)),
"docid_field must refer to a single column")
expect_error(corpus(mydf, text_field = "some_text", docid_field = c("some_text", "letter_factor")),
"docid_field must refer to a single column")
expect_error(corpus(mydf, text_field = "some_text", docid_field = 0),
"docid_field index refers to an invalid column")
expect_error(corpus(mydf, text_field = "some_text", docid_field = -1),
"docid_field index refers to an invalid column")
expect_error(corpus(mydf, text_field = "some_text", docid_field = "notfound"),
"column name notfound not found")
})
test_that("test corpus constructor works for tm objects", {
skip_if_not_installed("tm")
require(tm)
# VCorpus
data(crude, package = "tm") # load in a tm example VCorpus
mytmCorpus <- corpus(crude)
expect_equal(substring(texts(mytmCorpus)[1], 1, 21),
c("127" = "Diamond Shamrock Corp"))
data(acq, package = "tm")
mytmCorpus2 <- corpus(acq)
expect_equal(dim(docvars(mytmCorpus2)), c(50,12))
# SimpleCorpus
txt <- system.file("texts", "txt", package = "tm")
mytmCorpus3 <- SimpleCorpus(DirSource(txt, encoding = "UTF-8"),
control = list(language = "lat"))
qcorpus3 <- corpus(mytmCorpus3)
expect_equal(content(mytmCorpus3), texts(qcorpus3))
expect_equal(unclass(meta(mytmCorpus3, type = "corpus")[1]),
metacorpus(qcorpus3)[names(meta(mytmCorpus3, type = "corpus"))])
# any other type
mytmCorpus4 <- mytmCorpus3
class(mytmCorpus4)[1] <- "OtherCorpus"
expect_error(
corpus(mytmCorpus4),
"Cannot construct a corpus from this tm OtherCorpus object"
)
detach("package:tm", unload = TRUE)
detach("package:NLP", unload = TRUE)
})
test_that("test corpus constructor works for VCorpus with one document (#445)", {
skip_if_not_installed("tm")
require(tm)
tmCorpus_length1 <- VCorpus(VectorSource(data_corpus_inaugural[2]))
expect_silent(qcorpus <- corpus(tmCorpus_length1))
expect_equivalent(texts(qcorpus)[1], data_corpus_inaugural[2])
detach("package:tm", unload = TRUE)
detach("package:NLP", unload = TRUE)
})
test_that("test corpus constructor works for complex VCorpus (#849)", {
skip_if_not_installed("tm")
load("../data/corpora/complex_Corpus.RData")
qc <- corpus(complex_Corpus)
expect_equal(
head(docnames(qc), 3),
c("41113_201309.1", "41113_201309.2", "41113_201309.3")
)
expect_equal(
tail(docnames(qc), 3),
c("41223_201309.2553", "41223_201309.2554", "41223_201309.2555")
)
expect_output(
print(qc),
"Corpus consisting of 8,230 documents and 16 docvars\\."
)
})
test_that("corpus_subset works", {
txt <- c(doc1 = "This is a sample text.\nIt has three lines.\nThe third line.",
doc2 = "one\ntwo\tpart two\nthree\nfour.",
doc3 = "A single sentence.",
doc4 = "A sentence with \"escaped quotes\".")
dv <- data.frame(varnumeric = 10:13, varfactor = factor(c("A", "B", "A", "B")), varchar = letters[1:4])
data_corpus_test <- corpus(txt, docvars = dv, metacorpus = list(source = "From test-corpus.R"))
expect_equal(ndoc(corpus_subset(data_corpus_test, varfactor == "B")), 2)
expect_equal(docnames(corpus_subset(data_corpus_test, varfactor == "B")), c("doc2", "doc4"))
data_corpus_test_nodv <- corpus(txt, metacorpus = list(source = "From test-corpus.R"))
expect_equal(ndoc(corpus_subset(data_corpus_test_nodv, LETTERS[1:4] == "B")), 1)
expect_equal(docnames(corpus_subset(data_corpus_test_nodv, LETTERS[1:4] == "B")), c("doc2"))
})
test_that("summary method works for corpus", {
expect_output(summary(print(data_corpus_irishbudget2010)), regexp = "^Corpus consisting of 14 documents")
})
test_that("corpus works for texts with duplicate filenames", {
txt <- c(one = "Text one.", two = "text two", one = "second first text")
cor <- corpus(txt)
expect_equal(docnames(cor), c("one", "two", "one.1"))
})
test_that("create a corpus on a corpus", {
expect_identical(
data_corpus_irishbudget2010,
corpus(data_corpus_irishbudget2010)
)
tmpcorp <- data_corpus_irishbudget2010
docnames(tmpcorp) <- paste0("d", seq_len(ndoc(tmpcorp)))
expect_identical(
tmpcorp,
corpus(data_corpus_irishbudget2010, docnames = paste0("d", seq_len(ndoc(tmpcorp))))
)
expect_identical(
corpus(data_corpus_irishbudget2010, compress = TRUE),
corpus(texts(data_corpus_irishbudget2010),
docvars = docvars(data_corpus_irishbudget2010),
metacorpus = metacorpus(data_corpus_irishbudget2010),
compress = TRUE)
)
})
test_that("summary.corpus with verbose prints warning", {
expect_warning(
summary(data_corpus_irishbudget2010, verbose = FALSE),
"verbose argument is defunct"
)
})
test_that("head, tail.corpus work as expected", {
crp <- corpus_subset(data_corpus_inaugural, Year < 2018)
expect_equal(
docnames(head(crp, 3)),
c("1789-Washington", "1793-Washington", "1797-Adams")
)
expect_equal(
docnames(head(crp, -55)),
c("1789-Washington", "1793-Washington", "1797-Adams")
)
expect_equal(
docnames(tail(crp, 3)),
c("2009-Obama", "2013-Obama", "2017-Trump")
)
expect_equal(
docnames(tail(crp, -55)),
c("2009-Obama", "2013-Obama", "2017-Trump")
)
})
test_that("internal documents fn works", {
mydfm <- dfm(corpus_subset(data_corpus_inaugural, Year < 1800))
expect_is(quanteda:::documents.dfm(mydfm), "data.frame")
expect_equal(
dim(quanteda:::documents.dfm(mydfm)),
c(3, 3)
)
})
|
#' Get one or multiple key from database by chunks
#'
#' This function reads all keys that qualify for the like condition
#' specified in the arguments of this function.
#'
#' @param ... chunks of class character that should be use to narrow
#' down selected keys
#' @param conn a postgreSQL connection object
#' @author Matthias Bannert
#' @export
#' @seealso \code{\link{boots2db} and \code{\link{bootsKey}}}
getKeysFromDb <- function(...,conn=con,
relation="timeseries_main"){
like <- unlist(list(...))
stopifnot(is.character(like))
like <- addTag(like,"'%","%'")
like <- paste(like,collapse=" AND ts_key LIKE ")
statement <- paste("SELECT ts_key FROM ",relation,
" WHERE ts_key LIKE ",like,sep="")
res <- dbGetQuery(conn,statement)
res <- as.matrix(res)
res
}
result_set <- setRefClass("result_set",
fields = list(keys = "data.frame",
selection = "numeric"),
methods = list(
add_series = function(mi_keys,vintage_keys = list(),
meta_env_name="meta"){
# sanity check of argument
if(!is.character(mi_keys)){
stop("mi_keys must be a vector
of character representation of the keys you want to add.")
}
# throw an error if there is no
# meta desc
if(!all(mi_keys %in% ls(envir=get(meta_env_name)))){
stop("Not all time series have meta information.")
}
df <- data.frame(mi_key = mi_keys,stringsAsFactors=F)
# add vintage keys if list is not empty
if(length(vintage_keys) != 0 & all(unlist(vintage_keys) %in% 1:nrow(df))){
df[unlist(vintage_keys),"vintage_key"] <- names(vintage_keys)
keys <<- df
} else {
stop("result set cannot be generated. One of the vintage keys
is out of bounds.")
}
},
add_selection = function(rows){
if(!is.numeric(rows)){
stop("selection has to be row number in the result_set data.frame")
}
# check whether selection fits
if(!all(rows %in% 1:nrow(keys))){
stop("selection not in range.")
}
selection <<- rows
},
# default show method shows only selected values
show = function(){
if(length(selection) == 0){
sel <- 1:nrow(keys)
} else {
sel <- selection
}
methods::show(keys[sel,])
}
)
)
ts1 <- ts(rnorm(1:100),start=c(1982,1),frequency=4)
ts2 <- ts(rnorm(1:100),start=c(1985,1),frequency=4)
ts3 <- ts(rnorm(1:100),start=c(1985,1),frequency=4)
ts4 <- ts(rnorm(1:100),start=c(1985,1),frequency=4)
xx <- list(run1=1,run3=4)
unlist(xx)
tset$keys[unlist(xx),"vintage_key"] <- names(xx)
nrow(tset$keys)
ifelse()
matrix(c("ts1","ts2",T,F),ncol=2)
tset <- result_set()
tset$add_series(c("ts1","ts2","ts3","ts4"),list(run3123=3,run3=3))
tset$add_selection(c(1,2,4))
tset$selection
c(1,2,3) %in% 1:nrow(tset$keys)
tset$selection
require(miro)
add_mi(ts1)
add_mi(ts2)
add_mi(ts3)
add_mi(ts4)
length(list(run3123=2,run3=45)) != 0 & all(unlist(list(run3123=2,run3=45)) %in% 1:nrow(tset$keys))
all(c("ts1","ts2","ts3","ts4") %in% ls(envir=meta))
resultKeys <- setRefClass("resultKeys",
fields = list(keys = "matrix",
selected = "numeric"),
methods = list(
populate = function(...){
'Gets initial set of Keys from
data base.
'
keys <<- getKeysFromDb(...)
selected <<- as.numeric()
},
select = function(...){
mx <- nrow(keys)
sel <- sort(unlist(list(...)))
stopifnot(range(sel)[2] <= mx)
selected <<- sel
},
show = function(){
if(length(numeric) > 0){
cat("Keys of selected time series, object of class",
classLabel(class(.self)),": \n")
methods::show(keys[selected])
cat("To see all keys from this query call showAll.")
} else {
cat("Keys of selected time series, object of class",
classLabel(class(.self)),": \n")
methods::show(keys)
cat("No selections made yet.")
}
},
showAll = function(){
cat("Keys of selected time series, object of class",
classLabel(class(.self)),": \n")
methods::show(keys)
cat("The following rows are currently selected: \n")
methods::show(selected)
}
# definitely gotta a save selection method here.
))
is.resultKeys <- function(x) inherits(x,"resultKeys")
test <- resultKeys()
test$populate("DLU1","NEG")
test$select(1,2)
test$showAll()
pastetest$keys[test$selected]
getIndex <- function(Obj) Obj@ts_index
t1 <- db2boots(tskey=test$keys[1])
t2 <- db2boots(tskey=test$keys[2])
t2a <- db2boots(tskey=test$keys[2])
t3 <- list(t1,t2,t2a)
t5 <- lapply(t3,getIndex)
min(t5[[3]])
identical(t5[[1]],t5[[2]],t5[[3]][1])
all(t5)
resultSet <- setRefClass("resultSet",fields = list(bootsList = "list",
set = "data.frame"),
methods = list(
queryByKeys = function(x,...){
stopifnot(is.resultKeys(x))
li <- lapply(x$keys[x$selected],db2boots)
names(li) <- x$keys[x$selected]
bootsList <<- li
set <<- bootsList2df(li)
},
show = function(){
methods::show(set)
cat("Only showing data.frame representation. Access object$bootsList for all information.")
},
export2STATA = function(f="stata_out.dta"){
stopifnot(nrow(set) > 0)
foreign::write.dta(set,file=f)
cat("File written to ",getwd(),"f")
}
# should also be able to generate result
# sets without database...
)
)
getMeta(out$bootsList$CH.KOF.DLU.GROUP.DLU1.F_948.NEGATIVE,"en")$title
out <- resultSet()
out$export2STATA()
out$queryByKeys(test)
plot(out$bootsList$CH.KOF.DLU.GROUP.DLU1.F_948.NEGATIVE@.Data,type="l")
bootsList2df <- function(li){
indices <- lapply(li,getIndex)
nm <- names(li)
data <- as.data.frame(lapply(li,getDataPart))
colnames(data) <- nm
if(compareList(indices)){
d <- indices[[1]]
df <- data.frame(Date = as.character(d),
data,stringsAsFactors = FALSE)
df
} else
cat ("Time series of different length are not supported at the moment.")
}
compareList <- function(li){
stopifnot(length(li) > 1)
l <- length(li)
res <- lapply(li[-1],function(X,x) identical(X,x),x=li[[1]])
res <- all(unlist(res))
res
}
compareList(test_false)
a <- out$bootsList$CH.KOF.DLU.GROUP.DLU1.F_948.NEGATIVE@ts_index
b <- out$bootsList$CH.KOF.DLU.GROUP.DLU1.F_948.NEGATIVE_X12_D11@ts_index
identical(a,b)
class(a)
class(as.character(a))
test
bootsList2df(out$bootsList)
getIndex(out$bootsList$CH.KOF.DLU.GROUP.DLU1.F_948.NEGATIVE)
t4 <- lapply(t3,getDataPart)
as.data.frame(t4)
|
/experiments_with_old_definitions.R
|
no_license
|
mbannert/miro
|
R
| false
| false
| 9,345
|
r
|
#' Get one or multiple key from database by chunks
#'
#' This function reads all keys that qualify for the like condition
#' specified in the arguments of this function.
#'
#' @param ... chunks of class character that should be use to narrow
#' down selected keys
#' @param conn a postgreSQL connection object
#' @author Matthias Bannert
#' @export
#' @seealso \code{\link{boots2db} and \code{\link{bootsKey}}}
getKeysFromDb <- function(...,conn=con,
relation="timeseries_main"){
like <- unlist(list(...))
stopifnot(is.character(like))
like <- addTag(like,"'%","%'")
like <- paste(like,collapse=" AND ts_key LIKE ")
statement <- paste("SELECT ts_key FROM ",relation,
" WHERE ts_key LIKE ",like,sep="")
res <- dbGetQuery(conn,statement)
res <- as.matrix(res)
res
}
result_set <- setRefClass("result_set",
fields = list(keys = "data.frame",
selection = "numeric"),
methods = list(
add_series = function(mi_keys,vintage_keys = list(),
meta_env_name="meta"){
# sanity check of argument
if(!is.character(mi_keys)){
stop("mi_keys must be a vector
of character representation of the keys you want to add.")
}
# throw an error if there is no
# meta desc
if(!all(mi_keys %in% ls(envir=get(meta_env_name)))){
stop("Not all time series have meta information.")
}
df <- data.frame(mi_key = mi_keys,stringsAsFactors=F)
# add vintage keys if list is not empty
if(length(vintage_keys) != 0 & all(unlist(vintage_keys) %in% 1:nrow(df))){
df[unlist(vintage_keys),"vintage_key"] <- names(vintage_keys)
keys <<- df
} else {
stop("result set cannot be generated. One of the vintage keys
is out of bounds.")
}
},
add_selection = function(rows){
if(!is.numeric(rows)){
stop("selection has to be row number in the result_set data.frame")
}
# check whether selection fits
if(!all(rows %in% 1:nrow(keys))){
stop("selection not in range.")
}
selection <<- rows
},
# default show method shows only selected values
show = function(){
if(length(selection) == 0){
sel <- 1:nrow(keys)
} else {
sel <- selection
}
methods::show(keys[sel,])
}
)
)
ts1 <- ts(rnorm(1:100),start=c(1982,1),frequency=4)
ts2 <- ts(rnorm(1:100),start=c(1985,1),frequency=4)
ts3 <- ts(rnorm(1:100),start=c(1985,1),frequency=4)
ts4 <- ts(rnorm(1:100),start=c(1985,1),frequency=4)
xx <- list(run1=1,run3=4)
unlist(xx)
tset$keys[unlist(xx),"vintage_key"] <- names(xx)
nrow(tset$keys)
ifelse()
matrix(c("ts1","ts2",T,F),ncol=2)
tset <- result_set()
tset$add_series(c("ts1","ts2","ts3","ts4"),list(run3123=3,run3=3))
tset$add_selection(c(1,2,4))
tset$selection
c(1,2,3) %in% 1:nrow(tset$keys)
tset$selection
require(miro)
add_mi(ts1)
add_mi(ts2)
add_mi(ts3)
add_mi(ts4)
length(list(run3123=2,run3=45)) != 0 & all(unlist(list(run3123=2,run3=45)) %in% 1:nrow(tset$keys))
all(c("ts1","ts2","ts3","ts4") %in% ls(envir=meta))
resultKeys <- setRefClass("resultKeys",
fields = list(keys = "matrix",
selected = "numeric"),
methods = list(
populate = function(...){
'Gets initial set of Keys from
data base.
'
keys <<- getKeysFromDb(...)
selected <<- as.numeric()
},
select = function(...){
mx <- nrow(keys)
sel <- sort(unlist(list(...)))
stopifnot(range(sel)[2] <= mx)
selected <<- sel
},
show = function(){
if(length(numeric) > 0){
cat("Keys of selected time series, object of class",
classLabel(class(.self)),": \n")
methods::show(keys[selected])
cat("To see all keys from this query call showAll.")
} else {
cat("Keys of selected time series, object of class",
classLabel(class(.self)),": \n")
methods::show(keys)
cat("No selections made yet.")
}
},
showAll = function(){
cat("Keys of selected time series, object of class",
classLabel(class(.self)),": \n")
methods::show(keys)
cat("The following rows are currently selected: \n")
methods::show(selected)
}
# definitely gotta a save selection method here.
))
is.resultKeys <- function(x) inherits(x,"resultKeys")
test <- resultKeys()
test$populate("DLU1","NEG")
test$select(1,2)
test$showAll()
pastetest$keys[test$selected]
getIndex <- function(Obj) Obj@ts_index
t1 <- db2boots(tskey=test$keys[1])
t2 <- db2boots(tskey=test$keys[2])
t2a <- db2boots(tskey=test$keys[2])
t3 <- list(t1,t2,t2a)
t5 <- lapply(t3,getIndex)
min(t5[[3]])
identical(t5[[1]],t5[[2]],t5[[3]][1])
all(t5)
resultSet <- setRefClass("resultSet",fields = list(bootsList = "list",
set = "data.frame"),
methods = list(
queryByKeys = function(x,...){
stopifnot(is.resultKeys(x))
li <- lapply(x$keys[x$selected],db2boots)
names(li) <- x$keys[x$selected]
bootsList <<- li
set <<- bootsList2df(li)
},
show = function(){
methods::show(set)
cat("Only showing data.frame representation. Access object$bootsList for all information.")
},
export2STATA = function(f="stata_out.dta"){
stopifnot(nrow(set) > 0)
foreign::write.dta(set,file=f)
cat("File written to ",getwd(),"f")
}
# should also be able to generate result
# sets without database...
)
)
getMeta(out$bootsList$CH.KOF.DLU.GROUP.DLU1.F_948.NEGATIVE,"en")$title
out <- resultSet()
out$export2STATA()
out$queryByKeys(test)
plot(out$bootsList$CH.KOF.DLU.GROUP.DLU1.F_948.NEGATIVE@.Data,type="l")
bootsList2df <- function(li){
indices <- lapply(li,getIndex)
nm <- names(li)
data <- as.data.frame(lapply(li,getDataPart))
colnames(data) <- nm
if(compareList(indices)){
d <- indices[[1]]
df <- data.frame(Date = as.character(d),
data,stringsAsFactors = FALSE)
df
} else
cat ("Time series of different length are not supported at the moment.")
}
compareList <- function(li){
stopifnot(length(li) > 1)
l <- length(li)
res <- lapply(li[-1],function(X,x) identical(X,x),x=li[[1]])
res <- all(unlist(res))
res
}
compareList(test_false)
a <- out$bootsList$CH.KOF.DLU.GROUP.DLU1.F_948.NEGATIVE@ts_index
b <- out$bootsList$CH.KOF.DLU.GROUP.DLU1.F_948.NEGATIVE_X12_D11@ts_index
identical(a,b)
class(a)
class(as.character(a))
test
bootsList2df(out$bootsList)
getIndex(out$bootsList$CH.KOF.DLU.GROUP.DLU1.F_948.NEGATIVE)
t4 <- lapply(t3,getDataPart)
as.data.frame(t4)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.