content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library(shiny)
library(plotly)
library(sqldf)
library(shinythemes)
list<-read.csv('pokemonGO.csv')
list$Name<-as.character(list$Name)
list[29,2]<-'Nidoran'
list<-list[-32,]
shinyUI(fluidPage(theme = shinytheme("slate"),
titlePanel('Pokemon Go EDA'),
sidebarLayout(
sidebarPanel(
uiOutput("ui",align="center"),
br(),
br(),
selectInput('Pkm',"Pokemon",choices =as.character(list$Name),selected = as.character(list$Name)[1]),
br(),
h4('Author: Jason Liu'),
a(h5("LinkedIn"),href="https://nl.linkedin.com/in/jiashen-liu-4658aa112",target="_blank"),
a(h5("Github"),href="https://github.com/liujiashen9307/",target="_blank"),
br(),
h4('Claim: This Shiny App uses the data set from Kaggle.com. Packages used in this app are: Shiny, plotly and sqldf'),
a(h5("Data Link 1"),href="https://www.kaggle.com/semioniy/predictemall",target="_blank"),
a(h5("Data Link 2"),href="https://www.kaggle.com/abcsds/pokemongo",target="_blank")
),
mainPanel(
tabsetPanel(
tabPanel('Geo Data',
p(h3('Statistics of Pokemon')),
splitLayout(cellwidths=c("50%","50%"),htmlOutput("cp"),htmlOutput("hp")),
p(h3('Map of Pokemon')),
plotlyOutput('map'),
p(h3('Frequency of showing up among cities')),
plotlyOutput('city'),
p(h3('Pokemon Gym Distance Distribution')),
plotlyOutput('Gym'),
p(h3('Population Density Distribution')),
plotlyOutput('pop')),
tabPanel('Time Data',
p(h3('Show up data Heatmap')),
plotlyOutput('hm'),
p(h3('Show up data by Hour')),
plotOutput('hour'),
p(h3('Show up data by Minute')),
plotlyOutput('minute'),
p(h3('Proportion of showing up periods')),
plotlyOutput('pie')
),
tabPanel('Weather Data',
p(h3('Proportion of showing up under different Weathers')),
plotlyOutput('pie2'),
p(h3('Temperature and Pressure vs Showing up Count')),
plotlyOutput('tp1'),
p(h3('Wind Effects vs Showing up Count')),
plotlyOutput('wd2')
)
)
)
)
)) | /PokemonGo/ui.R | no_license | jindal2309/KaggleCompetition | R | false | false | 3,299 | r | library(shiny)
library(plotly)
library(sqldf)
library(shinythemes)
list<-read.csv('pokemonGO.csv')
list$Name<-as.character(list$Name)
list[29,2]<-'Nidoran'
list<-list[-32,]
shinyUI(fluidPage(theme = shinytheme("slate"),
titlePanel('Pokemon Go EDA'),
sidebarLayout(
sidebarPanel(
uiOutput("ui",align="center"),
br(),
br(),
selectInput('Pkm',"Pokemon",choices =as.character(list$Name),selected = as.character(list$Name)[1]),
br(),
h4('Author: Jason Liu'),
a(h5("LinkedIn"),href="https://nl.linkedin.com/in/jiashen-liu-4658aa112",target="_blank"),
a(h5("Github"),href="https://github.com/liujiashen9307/",target="_blank"),
br(),
h4('Claim: This Shiny App uses the data set from Kaggle.com. Packages used in this app are: Shiny, plotly and sqldf'),
a(h5("Data Link 1"),href="https://www.kaggle.com/semioniy/predictemall",target="_blank"),
a(h5("Data Link 2"),href="https://www.kaggle.com/abcsds/pokemongo",target="_blank")
),
mainPanel(
tabsetPanel(
tabPanel('Geo Data',
p(h3('Statistics of Pokemon')),
splitLayout(cellwidths=c("50%","50%"),htmlOutput("cp"),htmlOutput("hp")),
p(h3('Map of Pokemon')),
plotlyOutput('map'),
p(h3('Frequency of showing up among cities')),
plotlyOutput('city'),
p(h3('Pokemon Gym Distance Distribution')),
plotlyOutput('Gym'),
p(h3('Population Density Distribution')),
plotlyOutput('pop')),
tabPanel('Time Data',
p(h3('Show up data Heatmap')),
plotlyOutput('hm'),
p(h3('Show up data by Hour')),
plotOutput('hour'),
p(h3('Show up data by Minute')),
plotlyOutput('minute'),
p(h3('Proportion of showing up periods')),
plotlyOutput('pie')
),
tabPanel('Weather Data',
p(h3('Proportion of showing up under different Weathers')),
plotlyOutput('pie2'),
p(h3('Temperature and Pressure vs Showing up Count')),
plotlyOutput('tp1'),
p(h3('Wind Effects vs Showing up Count')),
plotlyOutput('wd2')
)
)
)
)
)) |
## Association Rules
########## Book Data Set #########
library(arules)
book<-read.transactions("C:\\Users\\Nik\\Downloads\\book (2).csv",format="basket")
inspect(book[1:10])
class(book)
# itemFrequencyPlot can be applicable only for transaction data
# count of each item from all the transactions
itemFrequencyPlot(book,topN=20)
book_rules<-apriori(book,parameter = list(support = 0.5,confidence = 0.02,minlen=4))
library(arulesViz)
plot(book_rules,method = "scatterplot")
plot(book_rules,method = "grouped")
plot(book_rules,method = "graph")
plot(book_rules,method = "mosaic")
inspect(book_rules[1:5])
rules <- sort(book_rules,by="lift")
inspect(rules[1:4])
## With different support, confidence and minimum length ##
book_rules1<-apriori(book,parameter = list(support = 0.8,confidence = 0.01,minlen=4))
plot(book_rules1,method = "scatterplot")
plot(book_rules1,method = "grouped")
plot(book_rules1,method = "graph")
plot(book_rules1,method = "mosaic")
inspect(book_rules1[1:5])
rules1 <- sort(book_rules1,by="lift")
inspect(rules[1:4])
| /Association Rules - Book Dataset.R | no_license | Nikanksha/Association-Rules | R | false | false | 1,076 | r | ## Association Rules
########## Book Data Set #########
library(arules)
book<-read.transactions("C:\\Users\\Nik\\Downloads\\book (2).csv",format="basket")
inspect(book[1:10])
class(book)
# itemFrequencyPlot can be applicable only for transaction data
# count of each item from all the transactions
itemFrequencyPlot(book,topN=20)
book_rules<-apriori(book,parameter = list(support = 0.5,confidence = 0.02,minlen=4))
library(arulesViz)
plot(book_rules,method = "scatterplot")
plot(book_rules,method = "grouped")
plot(book_rules,method = "graph")
plot(book_rules,method = "mosaic")
inspect(book_rules[1:5])
rules <- sort(book_rules,by="lift")
inspect(rules[1:4])
## With different support, confidence and minimum length ##
book_rules1<-apriori(book,parameter = list(support = 0.8,confidence = 0.01,minlen=4))
plot(book_rules1,method = "scatterplot")
plot(book_rules1,method = "grouped")
plot(book_rules1,method = "graph")
plot(book_rules1,method = "mosaic")
inspect(book_rules1[1:5])
rules1 <- sort(book_rules1,by="lift")
inspect(rules[1:4])
|
source('util.R')
package_list <- list('devtools', 'data.table', 'ggplot2')
# Load and execute specified libraries
load_or_install(package_list)
### Read in command line arguments ###
arg_len <- 5
args <- commandArgs(TRUE)
# print(args)
if (length(args) != arg_len) {
stop(sprintf('Must supply %i arguments -- you supplied %i\n', arg_len, length(args)))
} else {
ensemble_outcome <- as.character(args[1])
# cat(sprintf('ensemble_outcome: %s\n', ensemble_outcome))
data_dir <- as.character(args[2])
# cat(sprintf('data_dir: %s\n', data_dir))
output_dir <- as.character(args[3])
# cat(sprintf('output_dir: %s\n', output_dir))
ensemble_performance_split <- as.character(args[4])
# cat(sprintf('ensemble_performance_split: %s\n', ensemble_performance_split))
ensemble_performance_split_value <- as.numeric(args[5])
# cat(sprintf('ensemble_performance_split_value: %s\n', ensemble_performance_split_value))
}
### main function ###
postModelingAnalysis <- function(ensemble_outcome, data_dir, output_dir, ensemble_performance_split, ensemble_performance_split_value) {
unlink(output_dir, recursive = TRUE)
dir.create(output_dir)
# read in holdout data
holdout_dt <- readRDS(sprintf('%s03_data_with_predictions/holdout_data_with_predictions_including_ensemble.rds', data_dir))
# output ROC
outputROC(dt = holdout_dt, prediction_col_name = paste(ensemble_outcome, 'ensemble_prediction', sep = '_'), ensemble_outcome = ensemble_outcome,
ensemble_performance_split = ensemble_performance_split, ensemble_performance_split_value = ensemble_performance_split_value)
# create tables of mean y_bar by y_hat quantile
quantile_vals <- c(5, 10, 20, 100)
quantile_names <- c('quintile', 'decile', 'ventile', 'percentile')
if (length(quantile_vals) != length(quantile_names)) {
stop('Error: quantile values and names must have same number of elements')
}
meanYByYHatQuantile(dt = holdout_dt, prediction_col_name = paste(ensemble_outcome, 'ensemble_prediction', sep = '_'),
ensemble_outcome = ensemble_outcome, ensemble_performance_split = ensemble_performance_split,
ensemble_performance_split_value = ensemble_performance_split_value,
quantile_vals = quantile_vals, quantile_names = quantile_names)
}
### component functions ###
outputROC <- function(dt, prediction_col_name, ensemble_outcome, ensemble_performance_split, ensemble_performance_split_value) {
dt <- dt[get(ensemble_performance_split) == ensemble_performance_split_value] # subset data to portion on which we will evaluate model
labels <- dt[, get(ensemble_outcome)] # observed outcomes
predictions = dt[, get(paste(ensemble_outcome, 'ensemble_prediction', sep = '_'))] # predicted outcomes
labels <- labels[order(predictions, decreasing = TRUE)] # order outcomes by decreasing order of prediction value
roc_dt <- data.table(tpr = cumsum(labels) / sum(labels), fpr = cumsum(!labels) / sum(!labels), labels) # create data table for plotting
d_tpr <- c(diff(roc_dt$tpr), 0) # true positive rate step sizes
d_fpr <- c(diff(roc_dt$fpr), 0) # false positive rate step sizes
auc <- sum(roc_dt$tpr * d_fpr) + sum(d_tpr * d_fpr) / 2 # calculate AUC using trapezoidal sum -- NOTE: version below might be more clear, but is slower
# plot ROC
options(device = 'png')
plt <- ggplot(data = roc_dt, aes(x = fpr, y = tpr)) +
geom_line(color = 'red') +
geom_abline(intercept = 0, slope = 1) +
xlim(0,1) +
ylim(0,1) +
annotate(geom = 'text', x = 1, y = 0, label = sprintf('AUC: %s', as.character(round(auc, 3))), vjust=1, hjust=1) +
labs(title = 'ROC', x = 'False Positive Rate', y = 'True Positive Rate')
ggsave(filename = sprintf('%sensemble_roc.png', output_dir), device = 'png', width = 7, height = 7)
}
meanYByYHatQuantile <- function(dt, prediction_col_name, ensemble_outcome, ensemble_performance_split, ensemble_performance_split_value, quantile_vals, quantile_names) {
dt <- dt[get(ensemble_performance_split) == ensemble_performance_split_value] # subset data to portion on which we will evaluate model
# create quantiles of y_hat and get mean outcome by y_hat quantile
for (i in 1:length(quantile_vals)) {
dt[, (sprintf('prediction_%s', quantile_names[i])) := add_quantile(get(paste(ensemble_outcome, 'ensemble_prediction', sep = '_')), n_quantil = quantile_vals[i])]
dt[, (sprintf('mean_%s_by_%s', ensemble_outcome, quantile_names[i])) := mean(get(ensemble_outcome), na.rm = TRUE), by = c(sprintf('prediction_%s', quantile_names[i]))]
mean_dt <- unique(dt[, c(sprintf('prediction_%s', quantile_names[i]), sprintf('mean_%s_by_%s', ensemble_outcome, quantile_names[i])), with = FALSE])
setkeyv(mean_dt, sprintf('prediction_%s', quantile_names[i]))
setkeyv(mean_dt, NULL)
fwrite(mean_dt, sprintf('%smean_%s_by_predicted_%s_%s.csv', output_dir, ensemble_outcome, ensemble_outcome, quantile_names[i]))
}
}
### execute ###
postModelingAnalysis(ensemble_outcome, data_dir, output_dir, ensemble_performance_split, ensemble_performance_split_value)
| /scripts/R/06_post_modeling_analysis.R | no_license | ctcovington/ml_pipeline_prototype | R | false | false | 5,311 | r | source('util.R')
package_list <- list('devtools', 'data.table', 'ggplot2')
# Load and execute specified libraries
load_or_install(package_list)
### Read in command line arguments ###
arg_len <- 5
args <- commandArgs(TRUE)
# print(args)
if (length(args) != arg_len) {
stop(sprintf('Must supply %i arguments -- you supplied %i\n', arg_len, length(args)))
} else {
ensemble_outcome <- as.character(args[1])
# cat(sprintf('ensemble_outcome: %s\n', ensemble_outcome))
data_dir <- as.character(args[2])
# cat(sprintf('data_dir: %s\n', data_dir))
output_dir <- as.character(args[3])
# cat(sprintf('output_dir: %s\n', output_dir))
ensemble_performance_split <- as.character(args[4])
# cat(sprintf('ensemble_performance_split: %s\n', ensemble_performance_split))
ensemble_performance_split_value <- as.numeric(args[5])
# cat(sprintf('ensemble_performance_split_value: %s\n', ensemble_performance_split_value))
}
### main function ###
postModelingAnalysis <- function(ensemble_outcome, data_dir, output_dir, ensemble_performance_split, ensemble_performance_split_value) {
unlink(output_dir, recursive = TRUE)
dir.create(output_dir)
# read in holdout data
holdout_dt <- readRDS(sprintf('%s03_data_with_predictions/holdout_data_with_predictions_including_ensemble.rds', data_dir))
# output ROC
outputROC(dt = holdout_dt, prediction_col_name = paste(ensemble_outcome, 'ensemble_prediction', sep = '_'), ensemble_outcome = ensemble_outcome,
ensemble_performance_split = ensemble_performance_split, ensemble_performance_split_value = ensemble_performance_split_value)
# create tables of mean y_bar by y_hat quantile
quantile_vals <- c(5, 10, 20, 100)
quantile_names <- c('quintile', 'decile', 'ventile', 'percentile')
if (length(quantile_vals) != length(quantile_names)) {
stop('Error: quantile values and names must have same number of elements')
}
meanYByYHatQuantile(dt = holdout_dt, prediction_col_name = paste(ensemble_outcome, 'ensemble_prediction', sep = '_'),
ensemble_outcome = ensemble_outcome, ensemble_performance_split = ensemble_performance_split,
ensemble_performance_split_value = ensemble_performance_split_value,
quantile_vals = quantile_vals, quantile_names = quantile_names)
}
### component functions ###
outputROC <- function(dt, prediction_col_name, ensemble_outcome, ensemble_performance_split, ensemble_performance_split_value) {
dt <- dt[get(ensemble_performance_split) == ensemble_performance_split_value] # subset data to portion on which we will evaluate model
labels <- dt[, get(ensemble_outcome)] # observed outcomes
predictions = dt[, get(paste(ensemble_outcome, 'ensemble_prediction', sep = '_'))] # predicted outcomes
labels <- labels[order(predictions, decreasing = TRUE)] # order outcomes by decreasing order of prediction value
roc_dt <- data.table(tpr = cumsum(labels) / sum(labels), fpr = cumsum(!labels) / sum(!labels), labels) # create data table for plotting
d_tpr <- c(diff(roc_dt$tpr), 0) # true positive rate step sizes
d_fpr <- c(diff(roc_dt$fpr), 0) # false positive rate step sizes
auc <- sum(roc_dt$tpr * d_fpr) + sum(d_tpr * d_fpr) / 2 # calculate AUC using trapezoidal sum -- NOTE: version below might be more clear, but is slower
# plot ROC
options(device = 'png')
plt <- ggplot(data = roc_dt, aes(x = fpr, y = tpr)) +
geom_line(color = 'red') +
geom_abline(intercept = 0, slope = 1) +
xlim(0,1) +
ylim(0,1) +
annotate(geom = 'text', x = 1, y = 0, label = sprintf('AUC: %s', as.character(round(auc, 3))), vjust=1, hjust=1) +
labs(title = 'ROC', x = 'False Positive Rate', y = 'True Positive Rate')
ggsave(filename = sprintf('%sensemble_roc.png', output_dir), device = 'png', width = 7, height = 7)
}
meanYByYHatQuantile <- function(dt, prediction_col_name, ensemble_outcome, ensemble_performance_split, ensemble_performance_split_value, quantile_vals, quantile_names) {
dt <- dt[get(ensemble_performance_split) == ensemble_performance_split_value] # subset data to portion on which we will evaluate model
# create quantiles of y_hat and get mean outcome by y_hat quantile
for (i in 1:length(quantile_vals)) {
dt[, (sprintf('prediction_%s', quantile_names[i])) := add_quantile(get(paste(ensemble_outcome, 'ensemble_prediction', sep = '_')), n_quantil = quantile_vals[i])]
dt[, (sprintf('mean_%s_by_%s', ensemble_outcome, quantile_names[i])) := mean(get(ensemble_outcome), na.rm = TRUE), by = c(sprintf('prediction_%s', quantile_names[i]))]
mean_dt <- unique(dt[, c(sprintf('prediction_%s', quantile_names[i]), sprintf('mean_%s_by_%s', ensemble_outcome, quantile_names[i])), with = FALSE])
setkeyv(mean_dt, sprintf('prediction_%s', quantile_names[i]))
setkeyv(mean_dt, NULL)
fwrite(mean_dt, sprintf('%smean_%s_by_predicted_%s_%s.csv', output_dir, ensemble_outcome, ensemble_outcome, quantile_names[i]))
}
}
### execute ###
postModelingAnalysis(ensemble_outcome, data_dir, output_dir, ensemble_performance_split, ensemble_performance_split_value)
|
library(radiant.data)
### Name: iterms
### Title: Create a vector of interaction terms
### Aliases: iterms
### ** Examples
paste0("var", 1:3) %>% iterms(2)
paste0("var", 1:3) %>% iterms(3)
paste0("var", 1:3) %>% iterms(2, sep = ".")
| /data/genthat_extracted_code/radiant.data/examples/iterms.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 241 | r | library(radiant.data)
### Name: iterms
### Title: Create a vector of interaction terms
### Aliases: iterms
### ** Examples
paste0("var", 1:3) %>% iterms(2)
paste0("var", 1:3) %>% iterms(3)
paste0("var", 1:3) %>% iterms(2, sep = ".")
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/ModelA_D.R
\name{calcResid}
\alias{calcResid}
\title{Forward calculation for residual only}
\usage{
calcResid(par, data, cModel, fixParms, fixInis, transParms, sepn, ...)
}
\arguments{
\item{par}{parameters}
\item{data}{data frame}
\item{cModel}{model definition}
\item{fixParms}{fixed parameters}
\item{fixInis}{fixed initial values}
\item{transParms}{tranformation functions}
\item{sepn}{seperater index}
\item{...}{other unused yet inputs}
}
\value{
a vector of residuals
}
\description{
Forward calculation
}
| /man/calcResid.Rd | no_license | zhenglei-gao/SedimentWater | R | false | false | 607 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/ModelA_D.R
\name{calcResid}
\alias{calcResid}
\title{Forward calculation for residual only}
\usage{
calcResid(par, data, cModel, fixParms, fixInis, transParms, sepn, ...)
}
\arguments{
\item{par}{parameters}
\item{data}{data frame}
\item{cModel}{model definition}
\item{fixParms}{fixed parameters}
\item{fixInis}{fixed initial values}
\item{transParms}{tranformation functions}
\item{sepn}{seperater index}
\item{...}{other unused yet inputs}
}
\value{
a vector of residuals
}
\description{
Forward calculation
}
|
# Check if data exists in working directory (or sub directories)
# if not download data
# create vector of ALL file paths
file_list<-list.files(getwd(), all.files=TRUE, full.names=TRUE, recursive=TRUE)
# check if "household power consumption.txt" file exists
exists = sum(grepl("household_power_consumption.txt", file_list))
#initialise variable
data_source <- character()
# if file does not exist download to working directory and extract it
if (sum(grepl("household_power_consumption.txt", file_list))!=1) {
# Set source url
SourceUrl = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
# set destination
dest<- paste(getwd(), "/exdata-data-household_power_consumption.zip", sep="")
#download file
download.file(SourceUrl, dest)
# extract file
unzip(dest)
# set source file location
data_source <- paste (getwd(), "/household_power_consumption.txt", sep="")
}
# set source file location if it wasn't downloaded
if (length(data_source)==0) data_source <- file_list[which(grepl("household_power_consumption.txt", file_list)==1)]
# read data into R using read lines
data <- readLines(data_source)
#create regular expression string to filter data to table
x <-paste("^2/2/2007","^1/2/2007", sep="|")
# read relevant data to table
data<-read.table(text=data[grep(x, data)],sep=";",header=FALSE,comment.char="", colClasses = c("character", "character", "numeric","numeric","numeric","numeric","numeric", "numeric", "numeric"))
# add column names
colnames(data)<- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
# convert date and create datetime
data$Date <- as.Date(data$Date,"%d/%m/%Y")
data$DateTime = as.POSIXct(paste(data$Date, data$Time), format="%Y-%m-%d %H:%M:%S")
# create plot 2
png(file="plot2.png")
plot(data$DateTime, data$Global_active_power,
ylab="Global Active Power (kilowatts)",
xlab="", type="l")
dev.off()
| /plot2.R | no_license | WongD/ExData_Plotting1 | R | false | false | 2,057 | r | # Check if data exists in working directory (or sub directories)
# if not download data
# create vector of ALL file paths
file_list<-list.files(getwd(), all.files=TRUE, full.names=TRUE, recursive=TRUE)
# check if "household power consumption.txt" file exists
exists = sum(grepl("household_power_consumption.txt", file_list))
#initialise variable
data_source <- character()
# if file does not exist download to working directory and extract it
if (sum(grepl("household_power_consumption.txt", file_list))!=1) {
# Set source url
SourceUrl = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
# set destination
dest<- paste(getwd(), "/exdata-data-household_power_consumption.zip", sep="")
#download file
download.file(SourceUrl, dest)
# extract file
unzip(dest)
# set source file location
data_source <- paste (getwd(), "/household_power_consumption.txt", sep="")
}
# set source file location if it wasn't downloaded
if (length(data_source)==0) data_source <- file_list[which(grepl("household_power_consumption.txt", file_list)==1)]
# read data into R using read lines
data <- readLines(data_source)
#create regular expression string to filter data to table
x <-paste("^2/2/2007","^1/2/2007", sep="|")
# read relevant data to table
data<-read.table(text=data[grep(x, data)],sep=";",header=FALSE,comment.char="", colClasses = c("character", "character", "numeric","numeric","numeric","numeric","numeric", "numeric", "numeric"))
# add column names
colnames(data)<- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
# convert date and create datetime
data$Date <- as.Date(data$Date,"%d/%m/%Y")
data$DateTime = as.POSIXct(paste(data$Date, data$Time), format="%Y-%m-%d %H:%M:%S")
# create plot 2
png(file="plot2.png")
plot(data$DateTime, data$Global_active_power,
ylab="Global Active Power (kilowatts)",
xlab="", type="l")
dev.off()
|
############################################
# Flatfish Harvest Control Rule MSE #
#Load in the files from the Operating Model#
# and the estimation model. #
# #
# Plotting and Reporting #
# Created January 13, 2016 by #
# Chantel Wetzel #
############################################
#Load in the R objects from the Simulation Eval Code ========================================
drive = "G:/SyncBack"
steep.vec <- c("Steep_75","Steep_85", "Steep_95") #"Steep_85_75", "Steep_85_95", "Steep_85_data_30",
#"Steep_85_sigmaR_60", "Steep_85_auto", "Steep_85_auto_sigmaR_60")
hcr.vec <- c( "hcr_20_5_ramp_constant",
"hcr_25_5_ramp_constant",
"hcr_30_10_ramp_constant",
"hcr_40_10_ramp_constant")
dir = paste0(drive, "/PhD/Chapter4/output")
om.out <- ss.out <- hcr.out <- med.out <- list()
for (a in 1:length(steep.vec)){
load(paste0(dir,"/", steep.vec[a], "_om_all"))
om.out[[a]] <- om.all
load(paste0(dir,"/", steep.vec[a], "_ss_all"))
ss.out[[a]] <- ss.all
load(paste0(dir,"/", steep.vec[a], "_hcr_all"))
hcr.out[[a]] <- hcr.all
load(paste0(dir,"/", steep.vec[a], "_medians_all"))
med.out[[a]] <- med.all
}
print.letter <- function(label="(a)",xy=c(0.1,0.925),...) { #... means any new parameters are copied faithfully to text() below
tmp <- par("usr")
text.x <- tmp[1]+xy[1]*diff(tmp[1:2]) #x position, diff=difference
text.y <- tmp[3]+xy[2]*diff(tmp[3:4]) #y position
text(x=text.x, y=text.y, labels=label, ...)
}
print.numeric<-function(x, digits) { formatC(x, digits = digits, format = "f") }
target = c(0.20, 0.25, 0.30, 0.40)
#alpha.label = c('(a)', '(b)', '(c)', '(d)', '(e)', '(f)','(g)', '(h)', '(i)', '(j)')
labels2 =c(expression("steepness"["75"]), #expression(italic("h")['75']),
expression("steepness"["85"]),#expression(italic("h")['85']),
expression("steepness"["95"]))#expression(italic("h")['95']),
#"recr. var.",
#"recr. auto.",
#"var. & auto")
#expression(italic("h")[LO]),
#expression(italic("h")[HI]),
#'RD',
#expression(sigma[R]),
#expression(rho[R]),
#expression(sigma[R]~' & '~rho[R]))
hcr.lab = c("20-5", "25-5", "30-10", "40-10")
x = 151:175 # Summary years for the operating model
quant.vec = c(0.05, 0.50, 0.95)
green = rgb(93/255,151/255,50/255)
blue = rgb(0/255,86/255,149/255)
shale = rgb(29/255,37/255,45/255)
#=====================================================================================================================
# Calculate the percentage below the target stock size
#=====================================================================================================================
above.target = matrix(NA, length(steep.vec), length(hcr.vec))
med.dist = matrix(NA, length(steep.vec), length(hcr.vec))
below.msst.all = matrix(NA, length(steep.vec), length(hcr.vec))
si = matrix(NA, length(steep.vec), length(hcr.vec))
x = 151:175 # Summary years for the operating model
for (a in 1:length(steep.vec)){
for(b in 1:length(hcr.vec)){
above.target[a,b] = sum(om.out[[a]]$depl[b,x,] > target[b]) / length(om.out[[a]]$depl[b,x,])
med.dist[a,b] = median(om.out[[a]]$depl[b,x,])
below.msst.all[a,b] = sum(om.out[[a]]$depl[b,x,] < target[b] * 0.50) / length(om.out[[a]]$depl[b,x,])
temp = quantile(om.out[[a]]$depl[b,x,], quant.vec[c(1,3)])
si[a,b] = paste(print(temp[1],2), "-", print(temp[2],2))
}
}
#=====================================================================================================================
# Relative Stock Size Distributions
#=====================================================================================================================
setwd("C:/Users/chantell.wetzel/Documents/GitHub/Dissertation/Presentation")
#setwd(paste0(drive,"/PhD/Chapter4/Presentation/Plots/"))
png(filename = "Depletion_Dist_95SI_alt_3.png", width = 12, height = 8.5, units = 'in', res = 256)
par(mfrow = c(1, 3), mar = c(0.7,0.5,1.2,1), oma = c(4,4,4,5))
letter.cex = 1.8; axis.cex = 1.7; label.cex = 1.5 #0.8
dep.xvec = c(0.2, 0.35, 0.40, 0.50); dep.yvec = c(0.22, 0.47, 0.72, 0.97)
sim = 17; ymax = 10 * length(hcr.vec)
lab.pos = c(rep(0.23,3), 0.17, 0.18, 0.23)
set.bw = 0.021
main.lab = c("Steepness = 0.75", "Steepness = 0.85", "Steepness = 0.95")
adj.vec = c(-3, 0, 3)
col.vec = c("deepskyblue", "dodgerblue3")
col.vec = c(shale,blue, green)
for (a in 1:length(steep.vec)) {
offset = 0
out = density(om.out[[a]]$depl[1,x,], bw = set.bw, from = 0.01, to = 1)
xx = c(out$x, rev(out$x)); yy = c(out$y + offset, rev(rep(offset, length(out$y))))
plot("","",xlim = c(0, 0.70), ylim = c(0, ymax), xlab = "", ylab = '', axes = F, yaxs="i",
xaxs = 'i', main = "")
mtext(main.lab[a], side = 3, outer = F, line = 1, cex = label.cex)
polygon(xx, yy, col = col.vec[1], lty = 0)
out = density(om.out[[a]]$depl[1,x,], bw = set.bw, from = quantile(om.out[[a]]$depl[1,x,], quant.vec[1]),
to = quantile(om.out[[a]]$depl[1,x,], quant.vec[3]))
xx = c(out$x, rev(out$x)); yy = c(out$y + offset, rev(rep(offset, length(out$y))))
polygon(xx, yy, col = col.vec[2], lty = 0)
lines(c(target[1], target[1]), c(0, max(yy)+ 2), col = col.vec[3], lty = 1, lwd = 3) # Target line
print.letter(xy = c(0.80, dep.yvec[1] - 0.05), label = paste("med =", print(med.dist[a,1],2)), cex = letter.cex)
print.letter(xy = c(0.77, dep.yvec[1]), label = paste0("> target = ", print(100*above.target[a,1],0), "%"), cex = letter.cex)
box()
#print.letter(xy = c(lab.pos[a], 0.95), label = labels2[a], cex = (letter.cex+0.1))
#print.letter(xy = c(0.06, 0.95), label = alpha.label[a], cex = letter.cex)
for (b in 2:length(hcr.vec)){
offset = offset + 10
out = density(om.out[[a]]$depl[b,x,], bw = set.bw,from = 0.01, to = 1)
xx = c(out$x, rev(out$x)); yy = c(out$y + offset, rev(rep(offset, length(out$y))))
polygon(xx,yy, col = col.vec[1], lty = 0)
out = density(om.out[[a]]$depl[b,x,], bw = set.bw, from = quantile(om.out[[a]]$depl[b,x,], quant.vec[1]),
to = quantile(om.out[[a]]$depl[b,x,], quant.vec[3]))
xx = c(out$x, rev(out$x)); yy = c(out$y + offset, rev(rep(offset, length(out$y))))
polygon(xx, yy, col = col.vec[2], lty = 0)
abline(h = offset, lty = 1)
lines(c(target[b], target[b]), c(offset , max(yy) + 2), col = col.vec[3], lty = 1, lwd = 3)
print.letter(xy = c(0.80, dep.yvec[b] - 0.05), label = paste("med =",print(med.dist[a,b],2)), cex = letter.cex)
print.letter(xy = c(0.75, dep.yvec[b]), label = paste0("> target = ", print(100*above.target[a,b],0), "%"), cex = letter.cex)
}
axis(side = 1, at = seq(0, 0.60, 0.10), cex.axis = axis.cex)
if (a == 3 || a == 6 ){
axis(side = 4, at = c(0.15*ymax, 0.38*ymax, 0.62*ymax, 0.85*ymax), label = c("20-5", "25-5", "30-10", "40-10"), las = 1,
cex.axis = axis.cex, tick = FALSE, padj = 0, hadj = 0.25 )
}
}
mtext("Frequency", side = 2, outer = T, line = 1, cex = label.cex)
mtext("Relative biomass", side = 1, outer = T, line = 2, cex = label.cex)
p = par('usr')
mtext("Harvest control rule", side = 4, outer = T, line = 4, cex = label.cex, las = 3)
dev.off()
# Single panel
png(filename = "Depletion_Dist_95SI_alt_1.png", width = 7, height = 8.5, units = 'in', res = 256)
par(mfrow = c(1, 1), mar = c(0.7,0.5,1.2,1), oma = c(4,4,4,5))
letter.cex = 1.8; axis.cex = 1.7; label.cex = 1.5 #0.8
dep.xvec = c(0.2, 0.35, 0.40, 0.50); dep.yvec = c(0.22, 0.47, 0.72, 0.97)
sim = 17; ymax = 10 * length(hcr.vec)
lab.pos = c(rep(0.23,3), 0.17, 0.18, 0.23)
set.bw = 0.021
main.lab = c("Steepness = 0.75", "Steepness = 0.85", "Steepness = 0.95")
adj.vec = c(-3, 0, 3)
col.vec = c("deepskyblue", "dodgerblue3")
col.vec = c(shale,blue, green)
for (a in 1:1) {
offset = 0
out = density(om.out[[a]]$depl[1,x,], bw = set.bw, from = 0.01, to = 1)
xx = c(out$x, rev(out$x)); yy = c(out$y + offset, rev(rep(offset, length(out$y))))
plot("","",xlim = c(0, 0.70), ylim = c(0, ymax), xlab = "", ylab = '', axes = F, yaxs="i",
xaxs = 'i', main = "")
mtext(main.lab[a], side = 3, outer = F, line = 1, cex = label.cex)
polygon(xx, yy, col = col.vec[1], lty = 0)
out = density(om.out[[a]]$depl[1,x,], bw = set.bw, from = quantile(om.out[[a]]$depl[1,x,], quant.vec[1]),
to = quantile(om.out[[a]]$depl[1,x,], quant.vec[3]))
xx = c(out$x, rev(out$x)); yy = c(out$y + offset, rev(rep(offset, length(out$y))))
polygon(xx, yy, col = col.vec[2], lty = 0)
lines(c(target[1], target[1]), c(0, max(yy)+ 2), col = col.vec[3], lty = 1, lwd = 3) # Target line
print.letter(xy = c(0.80, dep.yvec[1] - 0.05), label = paste("med =", print(med.dist[a,1],2)), cex = letter.cex)
print.letter(xy = c(0.77, dep.yvec[1]), label = paste0("> target = ", print(100*above.target[a,1],0), "%"), cex = letter.cex)
box()
#print.letter(xy = c(lab.pos[a], 0.95), label = labels2[a], cex = (letter.cex+0.1))
#print.letter(xy = c(0.06, 0.95), label = alpha.label[a], cex = letter.cex)
for (b in 2:length(hcr.vec)){
offset = offset + 10
out = density(om.out[[a]]$depl[b,x,], bw = set.bw,from = 0.01, to = 1)
xx = c(out$x, rev(out$x)); yy = c(out$y + offset, rev(rep(offset, length(out$y))))
polygon(xx,yy, col = col.vec[1], lty = 0)
out = density(om.out[[a]]$depl[b,x,], bw = set.bw, from = quantile(om.out[[a]]$depl[b,x,], quant.vec[1]),
to = quantile(om.out[[a]]$depl[b,x,], quant.vec[3]))
xx = c(out$x, rev(out$x)); yy = c(out$y + offset, rev(rep(offset, length(out$y))))
polygon(xx, yy, col = col.vec[2], lty = 0)
abline(h = offset, lty = 1)
lines(c(target[b], target[b]), c(offset , max(yy) + 2), col = col.vec[3], lty = 1, lwd = 3)
print.letter(xy = c(0.80, dep.yvec[b] - 0.05), label = paste("med =",print(med.dist[a,b],2)), cex = letter.cex)
print.letter(xy = c(0.75, dep.yvec[b]), label = paste0("> target = ", print(100*above.target[a,b],0), "%"), cex = letter.cex)
}
axis(side = 1, at = seq(0, 0.60, 0.10), cex.axis = axis.cex)
axis(side = 4, at = c(0.15*ymax, 0.38*ymax, 0.62*ymax, 0.85*ymax), label = c("20-5", "25-5", "30-10", "40-10"), las = 1,
cex.axis = axis.cex, tick = FALSE, padj = 0, hadj = 0.25 )
}
mtext("Frequency", side = 2, outer = T, line = 1, cex = label.cex)
mtext("Relative biomass", side = 1, outer = T, line = 2, cex = label.cex)
p = par('usr')
mtext("Harvest control rule", side = 4, outer = T, line = 4, cex = label.cex, las = 3)
dev.off()
#=====================================================================================================================
# Trade-off plot
#=====================================================================================================================
msy.vec = c(160, 142, 182)
# Calculate the median probability over the last 25 years
target10 = ave.catch = aav = matrix(NA, length(steep.vec), length(hcr.vec))
sum.yrs = 56:80
for (a in 1:length(steep.vec)){
target10[a,] = apply(hcr.out[[a]]$target.true[,sum.yrs], 1, median)
ave.catch[a,] = apply(hcr.out[[a]]$catch.ave, 1, median)
aav[a,] = apply(hcr.out[[a]]$aav, 1, median) }
png(filename = "Tradeoffs_msy_1.png", width = 12, height = 8.5, units = 'in', res = 256)
main.lab = c("Steepness = 0.75", "Steepness = 0.85", "Steepness = 0.95")
pch.vec = 21:24
pch.col = c('red', 'green', 'blue') #pch.col = c(1, "grey50", "white")
#pch.col = c(green, blue, shale)
par(mfrow = c(1, 1), mar = c(3,4,3,3), oma = c(2,2,1,0))
letter.cex = 1.6; axis.cex = 1.8; label.cex = 1.5; pch.cex = 3; main.cex = 1
part2 = expression("Biomass"[target] %+-% "10%")
max.prob = 0.50 ; max.catch = 190; min.catch = 120
#1 ave catch vs. 10% for correct steep range
plot(target10[1,], ave.catch[1,], axes = F, xlim = c(0,max.prob), ylim = c(min.catch,max.catch), yaxs = 'i',
xaxs = 'i', xlab = "", ylab = "")
#abline(h = msy.vec[1], lty = 2, col = 'grey70')
#points(0.03, msy.vec[1], pch = pch.vec[1], bg = pch.col[2], cex = pch.cex)
#abline(h = msy.vec[2], lty = 2, col = 'grey70')
#points(0.03, msy.vec[2], pch = pch.vec[1], bg = pch.col[1], cex = pch.cex)
#abline(h = msy.vec[3], lty = 2, col = 'grey70')
#points(0.03, msy.vec[3], pch = pch.vec[1], bg = pch.col[3], cex = pch.cex)
box()
for(a in 1:3){
lines(target10[a,], ave.catch[a,], lty = 2)
points(target10[a,1], ave.catch[a,1], pch = pch.vec[1], cex = pch.cex, bg = pch.col[a])
axis(side = 1, cex.axis = axis.cex)
axis(side = 2, las = 1, at = seq(120, 180, 20), cex.axis = axis.cex)
mtext(side = 2, "Average catch", line = 3.5, cex = label.cex)
mtext(side = 1, part2, line = 3.5, cex = label.cex)
for(b in 2:length(hcr.vec)){
points(target10[a,b], ave.catch[a,b], pch = pch.vec[b],cex = pch.cex, bg = pch.col[a])
}
}
#2 aav vs 10% probability
#plot(target10[1,], aav[1,], axes = F, xlim = c(0,max.prob), ylim = c(0, 12), yaxs = 'i', xaxs = 'i', xlab = "", ylab = "")
#plot(aav[1,], target10[1,], axes = F, xlim = c(0,12), ylim = c(0, max.prob), yaxs = 'i', xaxs = 'i', xlab = "", ylab = "")
#box()
#for(a in 1:3){
# lines(aav[a,], target10[a,], lty = 2)
# points(aav[a,1], target10[a,1], pch = pch.vec[1], cex = pch.cex, bg = pch.col[a])
# axis(side = 2, cex.axis = axis.cex)
# axis(side = 1, las = 1, at = seq(0, 15, 5), cex.axis = axis.cex)
# mtext(side = 1, "Average annual variation - catch", line = 3, cex = label.cex)
# mtext(side = 2, part2, line = 3.5, cex = label.cex)
# for(b in 2:length(hcr.vec)){
# points(aav[a,b], target10[a,b], pch = pch.vec[b], cex = pch.cex, bg = pch.col[a])
# }
# #lines(target10[a,], aav[a,], lty = 2)
# #points(target10[a,1], aav[a,1], pch = pch.vec[1], cex = pch.cex, bg = pch.col[a])
# #axis(side = 1, cex.axis = axis.cex)
# #axis(side = 2, las = 1, at = seq(0, 15, 5), cex.axis = axis.cex)
# #mtext(side = 2, "Average annual variation - catch", line = 3, cex = label.cex)
# #mtext(side = 1, part2, line = 3.5, cex = label.cex)
# #for(b in 2:length(hcr.vec)){
# # points(target10[a,b], aav[a,b], pch = pch.vec[b], cex = pch.cex, bg = pch.col[a])
# #}
#}
#print.letter(xy = c(0.08, 0.95), label = alpha.label[2], cex = letter.cex)
#mtext(side = 3, comb.lab[1], line = 0, cex = main.cex)
legend('topright', legend = main.lab, pch = rep(16,3), col = pch.col, bty = 'n', cex = letter.cex + 0.5)
legend('topright', legend = main.lab, pch = rep(21,3), col = rep(1,3), bty = 'n', cex = letter.cex + 0.5)
#3 aav vs 10% probability
#plot(aav[1,], ave.catch[1,], axes = F, ylim = c(min.catch,max.catch), xlim = c(0, 12), yaxs = 'i', xaxs = 'i', xlab = "", ylab = "")
#box()
#for(a in 1:3){
# lines(aav[a,], ave.catch[a,], lty = 2)
# points(aav[a,1], ave.catch[a,1], pch = pch.vec[1], cex = pch.cex, bg = pch.col[a])
# axis(side = 2, las = 1,at = seq(120, 180, 20),cex.axis = axis.cex)
# axis(side = 1, las = 1, at = c(0,5,10,15), cex.axis = axis.cex)
# mtext(side = 1, "Average annual variation - catch", line = 3, cex = label.cex)
# mtext(side = 2, "Average catch", line = 3.5, cex = label.cex)
# for(b in 2:length(hcr.vec)){
# points(aav[a,b], ave.catch[a,b], pch = pch.vec[b], cex = pch.cex, bg = pch.col[a])
# }
#}
#print.letter(xy = c(0.08, 0.95), label = alpha.label[3], cex = letter.cex)
#plot(ave.catch[1,], aav[1,], axes = F, xlim = c(min.catch,max.catch), ylim = c(0, 12), yaxs = 'i', xaxs = 'i', xlab = "", ylab = "")
#box()
#for(a in 1:3){
# lines(ave.catch[a,], aav[a,], lty = 2)
# points(ave.catch[a,1], aav[a,1], pch = pch.vec[1], cex = pch.cex, bg = pch.col[a])
# axis(side = 1, at = seq(120, 180, 20),cex.axis = axis.cex)
# axis(side = 2, las = 1, at = c(0,5,10,15), cex.axis = axis.cex)
# mtext(side = 2, "Average annual variation - catch", line = 3, cex = label.cex)
# mtext(side = 1, "Average catch", line = 3, cex = label.cex)
# for(b in 2:length(hcr.vec)){
# points(ave.catch[a,b], aav[a,b], pch = pch.vec[b], cex = pch.cex, bg = pch.col[a])
# }
#}
##print.letter(xy = c(0.08, 0.95), label = alpha.label[3], cex = letter.cex)
#legend('topleft', pch = pch.vec, legend = c("20-5", "25-5", "30-10", "40-10"), bty = 'n', cex = letter.cex + 0.5)
dev.off()
# =========================================================================================================
# Stock Recruitment
# =========================================================================================================
set.seed(2)
steep = 0.8
SSB0 = 1000
R0 = 100
SSB = seq(0,SSB0,10)
sigmaR = 0.10
rho <- 0.99 #1 / sqrt(2)
recdevs <- rnorm(100, 0, sigmaR)
autocorr <- rep(0, 100)
autocorr[1] <- recdevs[1]
for (e in 2:100) {
autocorr[e] <- rho*autocorr[e-1]+sqrt(1-rho*rho)*recdevs[e]
}
Rauto = Rdev = numeric(0)
Rdeterm <- (4 * steep * ( R0 ) * SSB) / (SSB0 * (1 - steep) + SSB * (5 * steep - 1))
steep = 0.65
Rmid <- (4 * steep * ( R0 ) * SSB) / (SSB0 * (1 - steep) + SSB * (5 * steep - 1))
steep = 0.50
Rlow <- (4 * steep * ( R0 ) * SSB) / (SSB0 * (1 - steep) + SSB * (5 * steep - 1))
for (y in 1:99){
change = 100 - y
Rdev[change+1] <- Rdeterm[change+1] * exp(-0.5 * (sigmaR^2)) * exp(recdevs[y+1])
#Rauto[change+1] <- R0 * exp(-0.5 * (sigmaR^2)) * exp(autocorr[y+1])
}
par(mfrow = c(1,1))
plot(Rdeterm, type = 'l', col = 'blue', lwd = 3, ylim = c(0,120), axes = F)
lines(Rmid, lty = 1, col = 2, lwd = 3)
lines(Rlow, lty = 1, col = 3, lwd = 3)
par(mfrow = c(1,1))
plot(Rdeterm, type = 'l', col = 'blue', lwd = 3, ylim = c(0,120), axes = F)
points(20:100,Rdev[20:100], pch = 16, col = 2)
| /Defense_plots.R | no_license | chantelwetzel-noaa/Chapter4_HCR | R | false | false | 17,284 | r | ############################################
# Flatfish Harvest Control Rule MSE #
#Load in the files from the Operating Model#
# and the estimation model. #
# #
# Plotting and Reporting #
# Created January 13, 2016 by #
# Chantel Wetzel #
############################################
#Load in the R objects from the Simulation Eval Code ========================================
drive = "G:/SyncBack"
steep.vec <- c("Steep_75","Steep_85", "Steep_95") #"Steep_85_75", "Steep_85_95", "Steep_85_data_30",
#"Steep_85_sigmaR_60", "Steep_85_auto", "Steep_85_auto_sigmaR_60")
hcr.vec <- c( "hcr_20_5_ramp_constant",
"hcr_25_5_ramp_constant",
"hcr_30_10_ramp_constant",
"hcr_40_10_ramp_constant")
dir = paste0(drive, "/PhD/Chapter4/output")
om.out <- ss.out <- hcr.out <- med.out <- list()
for (a in 1:length(steep.vec)){
load(paste0(dir,"/", steep.vec[a], "_om_all"))
om.out[[a]] <- om.all
load(paste0(dir,"/", steep.vec[a], "_ss_all"))
ss.out[[a]] <- ss.all
load(paste0(dir,"/", steep.vec[a], "_hcr_all"))
hcr.out[[a]] <- hcr.all
load(paste0(dir,"/", steep.vec[a], "_medians_all"))
med.out[[a]] <- med.all
}
print.letter <- function(label="(a)",xy=c(0.1,0.925),...) { #... means any new parameters are copied faithfully to text() below
tmp <- par("usr")
text.x <- tmp[1]+xy[1]*diff(tmp[1:2]) #x position, diff=difference
text.y <- tmp[3]+xy[2]*diff(tmp[3:4]) #y position
text(x=text.x, y=text.y, labels=label, ...)
}
print.numeric<-function(x, digits) { formatC(x, digits = digits, format = "f") }
target = c(0.20, 0.25, 0.30, 0.40)
#alpha.label = c('(a)', '(b)', '(c)', '(d)', '(e)', '(f)','(g)', '(h)', '(i)', '(j)')
labels2 =c(expression("steepness"["75"]), #expression(italic("h")['75']),
expression("steepness"["85"]),#expression(italic("h")['85']),
expression("steepness"["95"]))#expression(italic("h")['95']),
#"recr. var.",
#"recr. auto.",
#"var. & auto")
#expression(italic("h")[LO]),
#expression(italic("h")[HI]),
#'RD',
#expression(sigma[R]),
#expression(rho[R]),
#expression(sigma[R]~' & '~rho[R]))
hcr.lab = c("20-5", "25-5", "30-10", "40-10")
x = 151:175 # Summary years for the operating model
quant.vec = c(0.05, 0.50, 0.95)
green = rgb(93/255,151/255,50/255)
blue = rgb(0/255,86/255,149/255)
shale = rgb(29/255,37/255,45/255)
#=====================================================================================================================
# Calculate the percentage below the target stock size
#=====================================================================================================================
above.target = matrix(NA, length(steep.vec), length(hcr.vec))
med.dist = matrix(NA, length(steep.vec), length(hcr.vec))
below.msst.all = matrix(NA, length(steep.vec), length(hcr.vec))
si = matrix(NA, length(steep.vec), length(hcr.vec))
x = 151:175 # Summary years for the operating model
for (a in 1:length(steep.vec)){
for(b in 1:length(hcr.vec)){
above.target[a,b] = sum(om.out[[a]]$depl[b,x,] > target[b]) / length(om.out[[a]]$depl[b,x,])
med.dist[a,b] = median(om.out[[a]]$depl[b,x,])
below.msst.all[a,b] = sum(om.out[[a]]$depl[b,x,] < target[b] * 0.50) / length(om.out[[a]]$depl[b,x,])
temp = quantile(om.out[[a]]$depl[b,x,], quant.vec[c(1,3)])
si[a,b] = paste(print(temp[1],2), "-", print(temp[2],2))
}
}
#=====================================================================================================================
# Relative Stock Size Distributions
#=====================================================================================================================
setwd("C:/Users/chantell.wetzel/Documents/GitHub/Dissertation/Presentation")
#setwd(paste0(drive,"/PhD/Chapter4/Presentation/Plots/"))
png(filename = "Depletion_Dist_95SI_alt_3.png", width = 12, height = 8.5, units = 'in', res = 256)
par(mfrow = c(1, 3), mar = c(0.7,0.5,1.2,1), oma = c(4,4,4,5))
letter.cex = 1.8; axis.cex = 1.7; label.cex = 1.5 #0.8
dep.xvec = c(0.2, 0.35, 0.40, 0.50); dep.yvec = c(0.22, 0.47, 0.72, 0.97)
sim = 17; ymax = 10 * length(hcr.vec)
lab.pos = c(rep(0.23,3), 0.17, 0.18, 0.23)
set.bw = 0.021
main.lab = c("Steepness = 0.75", "Steepness = 0.85", "Steepness = 0.95")
adj.vec = c(-3, 0, 3)
col.vec = c("deepskyblue", "dodgerblue3")
col.vec = c(shale,blue, green)
for (a in 1:length(steep.vec)) {
offset = 0
out = density(om.out[[a]]$depl[1,x,], bw = set.bw, from = 0.01, to = 1)
xx = c(out$x, rev(out$x)); yy = c(out$y + offset, rev(rep(offset, length(out$y))))
plot("","",xlim = c(0, 0.70), ylim = c(0, ymax), xlab = "", ylab = '', axes = F, yaxs="i",
xaxs = 'i', main = "")
mtext(main.lab[a], side = 3, outer = F, line = 1, cex = label.cex)
polygon(xx, yy, col = col.vec[1], lty = 0)
out = density(om.out[[a]]$depl[1,x,], bw = set.bw, from = quantile(om.out[[a]]$depl[1,x,], quant.vec[1]),
to = quantile(om.out[[a]]$depl[1,x,], quant.vec[3]))
xx = c(out$x, rev(out$x)); yy = c(out$y + offset, rev(rep(offset, length(out$y))))
polygon(xx, yy, col = col.vec[2], lty = 0)
lines(c(target[1], target[1]), c(0, max(yy)+ 2), col = col.vec[3], lty = 1, lwd = 3) # Target line
print.letter(xy = c(0.80, dep.yvec[1] - 0.05), label = paste("med =", print(med.dist[a,1],2)), cex = letter.cex)
print.letter(xy = c(0.77, dep.yvec[1]), label = paste0("> target = ", print(100*above.target[a,1],0), "%"), cex = letter.cex)
box()
#print.letter(xy = c(lab.pos[a], 0.95), label = labels2[a], cex = (letter.cex+0.1))
#print.letter(xy = c(0.06, 0.95), label = alpha.label[a], cex = letter.cex)
for (b in 2:length(hcr.vec)){
offset = offset + 10
out = density(om.out[[a]]$depl[b,x,], bw = set.bw,from = 0.01, to = 1)
xx = c(out$x, rev(out$x)); yy = c(out$y + offset, rev(rep(offset, length(out$y))))
polygon(xx,yy, col = col.vec[1], lty = 0)
out = density(om.out[[a]]$depl[b,x,], bw = set.bw, from = quantile(om.out[[a]]$depl[b,x,], quant.vec[1]),
to = quantile(om.out[[a]]$depl[b,x,], quant.vec[3]))
xx = c(out$x, rev(out$x)); yy = c(out$y + offset, rev(rep(offset, length(out$y))))
polygon(xx, yy, col = col.vec[2], lty = 0)
abline(h = offset, lty = 1)
lines(c(target[b], target[b]), c(offset , max(yy) + 2), col = col.vec[3], lty = 1, lwd = 3)
print.letter(xy = c(0.80, dep.yvec[b] - 0.05), label = paste("med =",print(med.dist[a,b],2)), cex = letter.cex)
print.letter(xy = c(0.75, dep.yvec[b]), label = paste0("> target = ", print(100*above.target[a,b],0), "%"), cex = letter.cex)
}
axis(side = 1, at = seq(0, 0.60, 0.10), cex.axis = axis.cex)
if (a == 3 || a == 6 ){
axis(side = 4, at = c(0.15*ymax, 0.38*ymax, 0.62*ymax, 0.85*ymax), label = c("20-5", "25-5", "30-10", "40-10"), las = 1,
cex.axis = axis.cex, tick = FALSE, padj = 0, hadj = 0.25 )
}
}
mtext("Frequency", side = 2, outer = T, line = 1, cex = label.cex)
mtext("Relative biomass", side = 1, outer = T, line = 2, cex = label.cex)
p = par('usr')
mtext("Harvest control rule", side = 4, outer = T, line = 4, cex = label.cex, las = 3)
dev.off()
# Single panel
png(filename = "Depletion_Dist_95SI_alt_1.png", width = 7, height = 8.5, units = 'in', res = 256)
par(mfrow = c(1, 1), mar = c(0.7,0.5,1.2,1), oma = c(4,4,4,5))
letter.cex = 1.8; axis.cex = 1.7; label.cex = 1.5 #0.8
dep.xvec = c(0.2, 0.35, 0.40, 0.50); dep.yvec = c(0.22, 0.47, 0.72, 0.97)
sim = 17; ymax = 10 * length(hcr.vec)
lab.pos = c(rep(0.23,3), 0.17, 0.18, 0.23)
set.bw = 0.021
main.lab = c("Steepness = 0.75", "Steepness = 0.85", "Steepness = 0.95")
adj.vec = c(-3, 0, 3)
col.vec = c("deepskyblue", "dodgerblue3")
col.vec = c(shale,blue, green)
for (a in 1:1) {
offset = 0
out = density(om.out[[a]]$depl[1,x,], bw = set.bw, from = 0.01, to = 1)
xx = c(out$x, rev(out$x)); yy = c(out$y + offset, rev(rep(offset, length(out$y))))
plot("","",xlim = c(0, 0.70), ylim = c(0, ymax), xlab = "", ylab = '', axes = F, yaxs="i",
xaxs = 'i', main = "")
mtext(main.lab[a], side = 3, outer = F, line = 1, cex = label.cex)
polygon(xx, yy, col = col.vec[1], lty = 0)
out = density(om.out[[a]]$depl[1,x,], bw = set.bw, from = quantile(om.out[[a]]$depl[1,x,], quant.vec[1]),
to = quantile(om.out[[a]]$depl[1,x,], quant.vec[3]))
xx = c(out$x, rev(out$x)); yy = c(out$y + offset, rev(rep(offset, length(out$y))))
polygon(xx, yy, col = col.vec[2], lty = 0)
lines(c(target[1], target[1]), c(0, max(yy)+ 2), col = col.vec[3], lty = 1, lwd = 3) # Target line
print.letter(xy = c(0.80, dep.yvec[1] - 0.05), label = paste("med =", print(med.dist[a,1],2)), cex = letter.cex)
print.letter(xy = c(0.77, dep.yvec[1]), label = paste0("> target = ", print(100*above.target[a,1],0), "%"), cex = letter.cex)
box()
#print.letter(xy = c(lab.pos[a], 0.95), label = labels2[a], cex = (letter.cex+0.1))
#print.letter(xy = c(0.06, 0.95), label = alpha.label[a], cex = letter.cex)
for (b in 2:length(hcr.vec)){
offset = offset + 10
out = density(om.out[[a]]$depl[b,x,], bw = set.bw,from = 0.01, to = 1)
xx = c(out$x, rev(out$x)); yy = c(out$y + offset, rev(rep(offset, length(out$y))))
polygon(xx,yy, col = col.vec[1], lty = 0)
out = density(om.out[[a]]$depl[b,x,], bw = set.bw, from = quantile(om.out[[a]]$depl[b,x,], quant.vec[1]),
to = quantile(om.out[[a]]$depl[b,x,], quant.vec[3]))
xx = c(out$x, rev(out$x)); yy = c(out$y + offset, rev(rep(offset, length(out$y))))
polygon(xx, yy, col = col.vec[2], lty = 0)
abline(h = offset, lty = 1)
lines(c(target[b], target[b]), c(offset , max(yy) + 2), col = col.vec[3], lty = 1, lwd = 3)
print.letter(xy = c(0.80, dep.yvec[b] - 0.05), label = paste("med =",print(med.dist[a,b],2)), cex = letter.cex)
print.letter(xy = c(0.75, dep.yvec[b]), label = paste0("> target = ", print(100*above.target[a,b],0), "%"), cex = letter.cex)
}
axis(side = 1, at = seq(0, 0.60, 0.10), cex.axis = axis.cex)
axis(side = 4, at = c(0.15*ymax, 0.38*ymax, 0.62*ymax, 0.85*ymax), label = c("20-5", "25-5", "30-10", "40-10"), las = 1,
cex.axis = axis.cex, tick = FALSE, padj = 0, hadj = 0.25 )
}
mtext("Frequency", side = 2, outer = T, line = 1, cex = label.cex)
mtext("Relative biomass", side = 1, outer = T, line = 2, cex = label.cex)
p = par('usr')
mtext("Harvest control rule", side = 4, outer = T, line = 4, cex = label.cex, las = 3)
dev.off()
#=====================================================================================================================
# Trade-off plot
#=====================================================================================================================
msy.vec = c(160, 142, 182)
# Calculate the median probability over the last 25 years
target10 = ave.catch = aav = matrix(NA, length(steep.vec), length(hcr.vec))
sum.yrs = 56:80
for (a in 1:length(steep.vec)){
target10[a,] = apply(hcr.out[[a]]$target.true[,sum.yrs], 1, median)
ave.catch[a,] = apply(hcr.out[[a]]$catch.ave, 1, median)
aav[a,] = apply(hcr.out[[a]]$aav, 1, median) }
png(filename = "Tradeoffs_msy_1.png", width = 12, height = 8.5, units = 'in', res = 256)
main.lab = c("Steepness = 0.75", "Steepness = 0.85", "Steepness = 0.95")
pch.vec = 21:24
pch.col = c('red', 'green', 'blue') #pch.col = c(1, "grey50", "white")
#pch.col = c(green, blue, shale)
par(mfrow = c(1, 1), mar = c(3,4,3,3), oma = c(2,2,1,0))
letter.cex = 1.6; axis.cex = 1.8; label.cex = 1.5; pch.cex = 3; main.cex = 1
part2 = expression("Biomass"[target] %+-% "10%")
max.prob = 0.50 ; max.catch = 190; min.catch = 120
#1 ave catch vs. 10% for correct steep range
plot(target10[1,], ave.catch[1,], axes = F, xlim = c(0,max.prob), ylim = c(min.catch,max.catch), yaxs = 'i',
xaxs = 'i', xlab = "", ylab = "")
#abline(h = msy.vec[1], lty = 2, col = 'grey70')
#points(0.03, msy.vec[1], pch = pch.vec[1], bg = pch.col[2], cex = pch.cex)
#abline(h = msy.vec[2], lty = 2, col = 'grey70')
#points(0.03, msy.vec[2], pch = pch.vec[1], bg = pch.col[1], cex = pch.cex)
#abline(h = msy.vec[3], lty = 2, col = 'grey70')
#points(0.03, msy.vec[3], pch = pch.vec[1], bg = pch.col[3], cex = pch.cex)
box()
for(a in 1:3){
lines(target10[a,], ave.catch[a,], lty = 2)
points(target10[a,1], ave.catch[a,1], pch = pch.vec[1], cex = pch.cex, bg = pch.col[a])
axis(side = 1, cex.axis = axis.cex)
axis(side = 2, las = 1, at = seq(120, 180, 20), cex.axis = axis.cex)
mtext(side = 2, "Average catch", line = 3.5, cex = label.cex)
mtext(side = 1, part2, line = 3.5, cex = label.cex)
for(b in 2:length(hcr.vec)){
points(target10[a,b], ave.catch[a,b], pch = pch.vec[b],cex = pch.cex, bg = pch.col[a])
}
}
#2 aav vs 10% probability
#plot(target10[1,], aav[1,], axes = F, xlim = c(0,max.prob), ylim = c(0, 12), yaxs = 'i', xaxs = 'i', xlab = "", ylab = "")
#plot(aav[1,], target10[1,], axes = F, xlim = c(0,12), ylim = c(0, max.prob), yaxs = 'i', xaxs = 'i', xlab = "", ylab = "")
#box()
#for(a in 1:3){
# lines(aav[a,], target10[a,], lty = 2)
# points(aav[a,1], target10[a,1], pch = pch.vec[1], cex = pch.cex, bg = pch.col[a])
# axis(side = 2, cex.axis = axis.cex)
# axis(side = 1, las = 1, at = seq(0, 15, 5), cex.axis = axis.cex)
# mtext(side = 1, "Average annual variation - catch", line = 3, cex = label.cex)
# mtext(side = 2, part2, line = 3.5, cex = label.cex)
# for(b in 2:length(hcr.vec)){
# points(aav[a,b], target10[a,b], pch = pch.vec[b], cex = pch.cex, bg = pch.col[a])
# }
# #lines(target10[a,], aav[a,], lty = 2)
# #points(target10[a,1], aav[a,1], pch = pch.vec[1], cex = pch.cex, bg = pch.col[a])
# #axis(side = 1, cex.axis = axis.cex)
# #axis(side = 2, las = 1, at = seq(0, 15, 5), cex.axis = axis.cex)
# #mtext(side = 2, "Average annual variation - catch", line = 3, cex = label.cex)
# #mtext(side = 1, part2, line = 3.5, cex = label.cex)
# #for(b in 2:length(hcr.vec)){
# # points(target10[a,b], aav[a,b], pch = pch.vec[b], cex = pch.cex, bg = pch.col[a])
# #}
#}
#print.letter(xy = c(0.08, 0.95), label = alpha.label[2], cex = letter.cex)
#mtext(side = 3, comb.lab[1], line = 0, cex = main.cex)
legend('topright', legend = main.lab, pch = rep(16,3), col = pch.col, bty = 'n', cex = letter.cex + 0.5)
legend('topright', legend = main.lab, pch = rep(21,3), col = rep(1,3), bty = 'n', cex = letter.cex + 0.5)
#3 aav vs 10% probability
#plot(aav[1,], ave.catch[1,], axes = F, ylim = c(min.catch,max.catch), xlim = c(0, 12), yaxs = 'i', xaxs = 'i', xlab = "", ylab = "")
#box()
#for(a in 1:3){
# lines(aav[a,], ave.catch[a,], lty = 2)
# points(aav[a,1], ave.catch[a,1], pch = pch.vec[1], cex = pch.cex, bg = pch.col[a])
# axis(side = 2, las = 1,at = seq(120, 180, 20),cex.axis = axis.cex)
# axis(side = 1, las = 1, at = c(0,5,10,15), cex.axis = axis.cex)
# mtext(side = 1, "Average annual variation - catch", line = 3, cex = label.cex)
# mtext(side = 2, "Average catch", line = 3.5, cex = label.cex)
# for(b in 2:length(hcr.vec)){
# points(aav[a,b], ave.catch[a,b], pch = pch.vec[b], cex = pch.cex, bg = pch.col[a])
# }
#}
#print.letter(xy = c(0.08, 0.95), label = alpha.label[3], cex = letter.cex)
#plot(ave.catch[1,], aav[1,], axes = F, xlim = c(min.catch,max.catch), ylim = c(0, 12), yaxs = 'i', xaxs = 'i', xlab = "", ylab = "")
#box()
#for(a in 1:3){
# lines(ave.catch[a,], aav[a,], lty = 2)
# points(ave.catch[a,1], aav[a,1], pch = pch.vec[1], cex = pch.cex, bg = pch.col[a])
# axis(side = 1, at = seq(120, 180, 20),cex.axis = axis.cex)
# axis(side = 2, las = 1, at = c(0,5,10,15), cex.axis = axis.cex)
# mtext(side = 2, "Average annual variation - catch", line = 3, cex = label.cex)
# mtext(side = 1, "Average catch", line = 3, cex = label.cex)
# for(b in 2:length(hcr.vec)){
# points(ave.catch[a,b], aav[a,b], pch = pch.vec[b], cex = pch.cex, bg = pch.col[a])
# }
#}
##print.letter(xy = c(0.08, 0.95), label = alpha.label[3], cex = letter.cex)
#legend('topleft', pch = pch.vec, legend = c("20-5", "25-5", "30-10", "40-10"), bty = 'n', cex = letter.cex + 0.5)
dev.off()
# =========================================================================================================
# Stock Recruitment
# =========================================================================================================
set.seed(2)
steep = 0.8
SSB0 = 1000
R0 = 100
SSB = seq(0,SSB0,10)
sigmaR = 0.10
rho <- 0.99 #1 / sqrt(2)
recdevs <- rnorm(100, 0, sigmaR)
autocorr <- rep(0, 100)
autocorr[1] <- recdevs[1]
for (e in 2:100) {
autocorr[e] <- rho*autocorr[e-1]+sqrt(1-rho*rho)*recdevs[e]
}
Rauto = Rdev = numeric(0)
Rdeterm <- (4 * steep * ( R0 ) * SSB) / (SSB0 * (1 - steep) + SSB * (5 * steep - 1))
steep = 0.65
Rmid <- (4 * steep * ( R0 ) * SSB) / (SSB0 * (1 - steep) + SSB * (5 * steep - 1))
steep = 0.50
Rlow <- (4 * steep * ( R0 ) * SSB) / (SSB0 * (1 - steep) + SSB * (5 * steep - 1))
for (y in 1:99){
change = 100 - y
Rdev[change+1] <- Rdeterm[change+1] * exp(-0.5 * (sigmaR^2)) * exp(recdevs[y+1])
#Rauto[change+1] <- R0 * exp(-0.5 * (sigmaR^2)) * exp(autocorr[y+1])
}
par(mfrow = c(1,1))
plot(Rdeterm, type = 'l', col = 'blue', lwd = 3, ylim = c(0,120), axes = F)
lines(Rmid, lty = 1, col = 2, lwd = 3)
lines(Rlow, lty = 1, col = 3, lwd = 3)
par(mfrow = c(1,1))
plot(Rdeterm, type = 'l', col = 'blue', lwd = 3, ylim = c(0,120), axes = F)
points(20:100,Rdev[20:100], pch = 16, col = 2)
|
#' @title Timestamp classifier
#' @description Get the timestamp classifier of an object of class \code{eventlog}
#' @param x An \code{eventlog} of \code{eventlog_mapping}
#' @seealso \code{\link{eventlog}}, \code{\link{mapping}}
#' @family Eventlog classifiers
#' @export
timestamp <- function(x) {
UseMethod("timestamp")
}
#' @describeIn timestamp Retrieve timestamp identifier from eventlog
#' @export
timestamp.eventlog <- function(x){
return(attr(x, "timestamp"))
}
#' @describeIn timestamp Retrieve timestamp identifier from eventlog mapping
#' @export
timestamp.eventlog_mapping <- function(x) {
return(x$timestamp)
}
| /R/timestamp.R | no_license | analyzethat/bupaR | R | false | false | 648 | r | #' @title Timestamp classifier
#' @description Get the timestamp classifier of an object of class \code{eventlog}
#' @param x An \code{eventlog} of \code{eventlog_mapping}
#' @seealso \code{\link{eventlog}}, \code{\link{mapping}}
#' @family Eventlog classifiers
#' @export
timestamp <- function(x) {
UseMethod("timestamp")
}
#' @describeIn timestamp Retrieve timestamp identifier from eventlog
#' @export
timestamp.eventlog <- function(x){
return(attr(x, "timestamp"))
}
#' @describeIn timestamp Retrieve timestamp identifier from eventlog mapping
#' @export
timestamp.eventlog_mapping <- function(x) {
return(x$timestamp)
}
|
"============================================================
Title : RBM vs AE vs CNN
Updates : 20200718
============================================================"
rm(list = ls())
#=========================================================================
# 0. 패키지 로드
#=========================================================================
library(deepnet); library(keras); library(data.table); library(imager);
library(e1071)
#=========================================================================
# 1. 데이터 준비
#=========================================================================
#현재 스크립트의 경로를 기본 경로로 설정합니다.
print(dirname(rstudioapi::getActiveDocumentContext()$path))
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
#한 단계 상위 디렉토리를 기본 경로로 설정합니다.
setwd("../")
#MNIST 자료가 있는 경로로 설정합니다.
setwd("./0.Data/MNIST")
#mnist 데이터 로드
mnist <- readRDS("mnist.RDS")
#학습데이터 개수 지정
num_train <- 60000
#테스트 데이터 개수 지정
num_test <- 10000
#이미지 height,width 지정
x_pixl <- 28; y_pixl <-28;
# 이미지 Normalization 0 ~ 255 -> 0 ~ 1
X_train <- mnist$train$x/255
X_test <- mnist$test$x/255
# Flatten (이미지를 하나의 벡터형태로 펼치기)
X_train_flatten <- array_reshape(X_train, c(num_train, x_pixl*y_pixl))
X_test_flatten <- array_reshape(X_test, c(num_test, x_pixl*y_pixl))
# Label(Y) 생성
Y_train<-mnist$train$y
Y_test<-mnist$test$y
# dimension 확인
cat("Dim(X_train): ", dim(X_train), "\n")
cat("Dim(X_test): ", dim(X_test), "\n")
cat("Dim(X_train_flatten): ", dim(X_train_flatten), "\n")
cat("Dim(X_test_flatten): ", dim(X_test_flatten), "\n")
cat("Dim(Y_train): ", dim(Y_train), "\n")
cat("Dim(Y_test): ", dim(Y_test), "\n")
#=========================================================================
# 2.이미지 탐색
#=========================================================================
# 그래픽 파라메터 설정
par(mfrow=c(5,5), mai=c(0,0.1,0.3,0)) #margin size (b, l, t, r)
#학습자료의 처음 25개의 이미지 Plotting
for(n in 1:25){
image(t(X_train[n,,])[,y_pixl:1], axes=F, col=gray((0:255)/255))
}
par(fig=c(0,1,0.9,1), new=T)
plot.new()
title("X_train: Frist 25 images", cex.main=2, col.main="purple")
dev.off()
# 테스트자료의 처음 25개의 이미지 Plotting
par(mfrow=c(5,5), mai=c(0,0.1,0.3,0))
for(n in 1:25){
image(t(X_test[n,,])[,y_pixl:1], axes=F, col=gray((0:255)/255))
}
par(fig=c(0,1,0.9,1), new=T)
plot.new()
title("X_test: Frist 25 images", cex.main=2, col.main="purple")
dev.off()
#=========================================================================
# 3. Model: Restricted Boltzmann Machine
#=========================================================================
# RBM 학습
RBM_mnist <- rbm.train(X_train_flatten,
hidden = 100,
batchsize=128,
numepochs = 10,
cd=1 )
# Train 자료를 input으로 하여 RBM 모델의 Hidden node 값을 추출
RBM_hidden_node_train <- rbm.up(RBM_mnist, X_train_flatten)
# Test자료를 input으로 하여 RBM 모델의 Hidden node 값을 추출
RBM_hidden_node_test <- rbm.up(RBM_mnist, X_test_flatten)
# Test자료 이미지 복원
RBM_recon_vec <- rbm.down(RBM_mnist, RBM_hidden_node_test)
# 이미지 복원값 Array로 만들기
RBM_recon <- array(RBM_recon_vec, dim=c(dim(RBM_hidden_node_test)[1], x_pixl, y_pixl))
#생성된 특징(Feature)의 dimmension 확인
# Train 자료 Hidden node 값 dim
cat("RBM_hidden_node_train Dim: ", dim(RBM_hidden_node_train), "\n")
# Test 자료 Hidden node 값 dim
cat("RBM_recon_vec RBM_hidden_node_test: ", dim(RBM_hidden_node_test), "\n")
# Test 자료의 output array 변환 후 dim
cat("RBM_recon Dim: ", dim(RBM_recon), "\n")
#=========================================================================
# 4. Model: Autoencoder
#=========================================================================
# 모델 정의
AE <- keras_model_sequential()
AE %>%
layer_dense(input_shape = c(x_pixl*y_pixl), units = 100, activation = "relu",name='AE_hidden_layer') %>%
layer_dense(units = x_pixl*y_pixl, activation = "sigmoid",name='output_layer')
#정의된 모델 확인
summary(AE)
# loss 및 optimizer 설정
AE %>% compile(
optimizer = "adam",
loss = 'binary_crossentropy'
)
# 학습
AE %>% fit(
X_train_flatten, X_train_flatten,
shuffle = TRUE,
epochs = 10,
batch_size = 128,
validation_split = 0.2
)
# Test자료를 input으로 하여 AE 모델의 Hidden layer의 값을 추출
layer_name <- 'AE_hidden_layer'
feature_extract_model <- keras_model(inputs = AE$input,outputs = get_layer(AE, layer_name)$output)
AE_hidden_layer_train <- predict(feature_extract_model,X_train_flatten)
AE_hidden_layer_test <- predict(feature_extract_model,X_test_flatten)
#테스트자료의 이미지 복원값 계산
AE_recon_vec <- AE %>% predict(X_test_flatten)
#계산된 결과값 이미지 어레이로 변경
AE_recon <- array(AE_recon_vec, dim=c(num_test, x_pixl, y_pixl))
# 생성된 특징(Feature)의 dimmension 확인
# Train 자료의 특성 추출 값 dim
cat("AE_hidden_layer_train Dim : ", dim(AE_hidden_layer_train), "\n")
#Test 자료의 특성 추출 값 dim
cat("AE_hidden_layer_test Dim : ", dim(AE_hidden_layer_test), "\n")
#Test 자료의 복원이미지 차원 dim
cat("AE_recon Dim : ", dim(AE_recon), "\n")
#=========================================================================
# 5. RBM, AE 복원 이미지 확인
#=========================================================================
# 원본이미지 vs 복원이미지 확인
par(mfrow=c(3,5), mai=c(0,0.1,0.3,0))
#원본이미지
for(n in 1:5){
image(t(X_test[n,,])[,y_pixl:1], axes=F, col=gray((0:255)/255),main="Origianl")
}
#RBM 복원이미지
for(n in 1:5){
image(RBM_recon[n,,][,y_pixl:1], axes=F, col=gray((0:255)/255),main="RBM")
}
#AE 복원이미지
for(n in 1:5){
image(AE_recon[n,,][,y_pixl:1], axes=F, col=gray((0:255)/255),main="AE",col.main="purple")
}
dev.off()
#=========================================================================
# 6.특성 추출값 + SVM 을 이용한 multi-class classification
#=========================================================================
#RBM 특성 추출 데이터 Column name 변경 및 데이터 프레임 변환
colnames(RBM_hidden_node_train)<-paste("RBM_Feature_",1:100)
colnames(RBM_hidden_node_test)<-paste("RBM_Feature_",1:100)
RBM_hidden_node_train<-data.frame(RBM_hidden_node_train)
RBM_hidden_node_test<-data.frame(RBM_hidden_node_test)
#AE 특성 추출 데이터 Column name 변경 및 데이터 프레임 변환
colnames(AE_hidden_layer_train)<-paste("AE_Feature_",1:100)
colnames(AE_hidden_layer_test)<-paste("AE_Feature_",1:100)
AE_hidden_layer_train<-data.frame(AE_hidden_layer_train)
AE_hidden_layer_test<-data.frame(AE_hidden_layer_test)
# SVM 자료 준비
rbm_x<-RBM_hidden_node_train
ae_x<-AE_hidden_layer_train
y<-as.factor(Y_train)
#SVM 모델 Fitting
#학습 시간이 오래걸려 미리 학습한 파일을 저장해 두었습니다.
#svm_rbm_model <- svm(rbm_x, y, probability = TRUE)
#svm_ae_model <- svm(ae_x, y, probability = TRUE)
#saveRDS(svm_rbm_model,"svm_rbm_model.RDS")
#saveRDS(svm_ae_model,"svm_ae_model.RDS")
#학습된 모델 로드
svm_rbm_model <-readRDS("svm_rbm_model.RDS")
svm_ae_model <-readRDS("svm_ae_model.RDS")
#계산 시간이 오래걸려 미리 결과 파일을 저장해 두었습니다.
#예측값 산출
#svm_rbm_pred_prob <- predict(svm_rbm_model, RBM_hidden_node_test, decision.values = TRUE, probability = TRUE)
#svm_ae_pred_prob <- predict(svm_ae_model, AE_hidden_layer_test, decision.values = TRUE, probability = TRUE)
#예측 정확도 계산
#pred_result<-data.frame(rbm_pred=svm_rbm_pred_prob,ae_pred=svm_ae_pred_prob,true_label=Y_test)
#saveRDS(pred_result,"svm_pred_result.RDS")
#예측값 테이블 로드
pred_result <-readRDS("svm_pred_result.RDS")
#RBM 예측 정확도
rbm_acc<-sum(pred_result[,1]==pred_result[,3])/nrow(pred_result)
#AE 예측 정확도
ae_acc<-sum(pred_result[,2]==pred_result[,3])/nrow(pred_result)
#=========================================================================
# 6. CNN을 활용한 multi-class classification
#=========================================================================
##CNN 학습을 위한 데이터 준비
#CNN 파라메터 설정
batch_size <- 128
num_classes <- 10
epochs <- 12
# Input image dimensions
img_rows <- 28
img_cols <- 28
# CNN 학습을 위한 MNIST 자료 차원 변경
X_train <- array_reshape(X_train, c(nrow(X_train), img_rows, img_cols, 1))
X_test <- array_reshape(X_test, c(nrow(X_test), img_rows, img_cols, 1))
input_shape <- c(img_rows, img_cols, 1)
# 변경된 차원 확인
cat('X_train_shape:', dim(X_train), '\n')
cat(nrow(X_train), 'train samples\n')
cat(nrow(X_test), 'test samples\n')
# Softmax output에 알맞은 형태로 one-hot encoding
Y_train <- to_categorical(Y_train, num_classes)
Y_test <- to_categorical(Y_test, num_classes)
##CNN 모델 정의
model <- keras_model_sequential() %>%
layer_conv_2d(filters = 32, kernel_size = c(3,3), activation = 'relu',
input_shape = input_shape) %>%
layer_conv_2d(filters = 64, kernel_size = c(3,3), activation = 'relu') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(rate = 0.25) %>%
layer_flatten() %>%
layer_dense(units = 128, activation = 'relu') %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = num_classes, activation = 'softmax')
# 학습된 모델 로드
model<-load_model_hdf5("cnn_mnist_model.h5",compile = F)
# 모델 컴파일
model %>% compile(
loss = loss_categorical_crossentropy,
optimizer = optimizer_adadelta(),
metrics = c('accuracy')
)
# 모델 학습
# model %>% fit(
# X_train, Y_train,
# batch_size = batch_size,
# epochs = epochs,
# validation_split = 0.2
# )
# save_model_hdf5(model,"cnn_mnist_model.h5")
# 모델 평가
scores <- model %>% evaluate(
X_test, Y_test, verbose = 0
)
#예측 정확도 계산
cnn_acc<-scores[[2]]
#=========================================================================
# 7. 최종 결과 확인
#=========================================================================
cat('RBM Test accuracy:', rbm_acc, '\n')
cat('AE Test accuracy:', ae_acc, '\n')
cat('CNN Test accuracy:', cnn_acc, '\n')
| /on_site_training/8. 딥러닝 알고리즘 강화학습 - 김도현 대표/2.Practice1/1.RBM_AE_CNN.R | no_license | soykim-snail/Begas-BigDataTraining | R | false | false | 10,549 | r | "============================================================
Title : RBM vs AE vs CNN
Updates : 20200718
============================================================"
rm(list = ls())
#=========================================================================
# 0. 패키지 로드
#=========================================================================
library(deepnet); library(keras); library(data.table); library(imager);
library(e1071)
#=========================================================================
# 1. 데이터 준비
#=========================================================================
#현재 스크립트의 경로를 기본 경로로 설정합니다.
print(dirname(rstudioapi::getActiveDocumentContext()$path))
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
#한 단계 상위 디렉토리를 기본 경로로 설정합니다.
setwd("../")
#MNIST 자료가 있는 경로로 설정합니다.
setwd("./0.Data/MNIST")
#mnist 데이터 로드
mnist <- readRDS("mnist.RDS")
#학습데이터 개수 지정
num_train <- 60000
#테스트 데이터 개수 지정
num_test <- 10000
#이미지 height,width 지정
x_pixl <- 28; y_pixl <-28;
# 이미지 Normalization 0 ~ 255 -> 0 ~ 1
X_train <- mnist$train$x/255
X_test <- mnist$test$x/255
# Flatten (이미지를 하나의 벡터형태로 펼치기)
X_train_flatten <- array_reshape(X_train, c(num_train, x_pixl*y_pixl))
X_test_flatten <- array_reshape(X_test, c(num_test, x_pixl*y_pixl))
# Label(Y) 생성
Y_train<-mnist$train$y
Y_test<-mnist$test$y
# dimension 확인
cat("Dim(X_train): ", dim(X_train), "\n")
cat("Dim(X_test): ", dim(X_test), "\n")
cat("Dim(X_train_flatten): ", dim(X_train_flatten), "\n")
cat("Dim(X_test_flatten): ", dim(X_test_flatten), "\n")
cat("Dim(Y_train): ", dim(Y_train), "\n")
cat("Dim(Y_test): ", dim(Y_test), "\n")
#=========================================================================
# 2.이미지 탐색
#=========================================================================
# 그래픽 파라메터 설정
par(mfrow=c(5,5), mai=c(0,0.1,0.3,0)) #margin size (b, l, t, r)
#학습자료의 처음 25개의 이미지 Plotting
for(n in 1:25){
image(t(X_train[n,,])[,y_pixl:1], axes=F, col=gray((0:255)/255))
}
par(fig=c(0,1,0.9,1), new=T)
plot.new()
title("X_train: Frist 25 images", cex.main=2, col.main="purple")
dev.off()
# 테스트자료의 처음 25개의 이미지 Plotting
par(mfrow=c(5,5), mai=c(0,0.1,0.3,0))
for(n in 1:25){
image(t(X_test[n,,])[,y_pixl:1], axes=F, col=gray((0:255)/255))
}
par(fig=c(0,1,0.9,1), new=T)
plot.new()
title("X_test: Frist 25 images", cex.main=2, col.main="purple")
dev.off()
#=========================================================================
# 3. Model: Restricted Boltzmann Machine
#=========================================================================
# RBM 학습
RBM_mnist <- rbm.train(X_train_flatten,
hidden = 100,
batchsize=128,
numepochs = 10,
cd=1 )
# Train 자료를 input으로 하여 RBM 모델의 Hidden node 값을 추출
RBM_hidden_node_train <- rbm.up(RBM_mnist, X_train_flatten)
# Test자료를 input으로 하여 RBM 모델의 Hidden node 값을 추출
RBM_hidden_node_test <- rbm.up(RBM_mnist, X_test_flatten)
# Test자료 이미지 복원
RBM_recon_vec <- rbm.down(RBM_mnist, RBM_hidden_node_test)
# 이미지 복원값 Array로 만들기
RBM_recon <- array(RBM_recon_vec, dim=c(dim(RBM_hidden_node_test)[1], x_pixl, y_pixl))
#생성된 특징(Feature)의 dimmension 확인
# Train 자료 Hidden node 값 dim
cat("RBM_hidden_node_train Dim: ", dim(RBM_hidden_node_train), "\n")
# Test 자료 Hidden node 값 dim
cat("RBM_recon_vec RBM_hidden_node_test: ", dim(RBM_hidden_node_test), "\n")
# Test 자료의 output array 변환 후 dim
cat("RBM_recon Dim: ", dim(RBM_recon), "\n")
#=========================================================================
# 4. Model: Autoencoder
#=========================================================================
# 모델 정의
AE <- keras_model_sequential()
AE %>%
layer_dense(input_shape = c(x_pixl*y_pixl), units = 100, activation = "relu",name='AE_hidden_layer') %>%
layer_dense(units = x_pixl*y_pixl, activation = "sigmoid",name='output_layer')
#정의된 모델 확인
summary(AE)
# loss 및 optimizer 설정
AE %>% compile(
optimizer = "adam",
loss = 'binary_crossentropy'
)
# 학습
AE %>% fit(
X_train_flatten, X_train_flatten,
shuffle = TRUE,
epochs = 10,
batch_size = 128,
validation_split = 0.2
)
# Test자료를 input으로 하여 AE 모델의 Hidden layer의 값을 추출
layer_name <- 'AE_hidden_layer'
feature_extract_model <- keras_model(inputs = AE$input,outputs = get_layer(AE, layer_name)$output)
AE_hidden_layer_train <- predict(feature_extract_model,X_train_flatten)
AE_hidden_layer_test <- predict(feature_extract_model,X_test_flatten)
#테스트자료의 이미지 복원값 계산
AE_recon_vec <- AE %>% predict(X_test_flatten)
#계산된 결과값 이미지 어레이로 변경
AE_recon <- array(AE_recon_vec, dim=c(num_test, x_pixl, y_pixl))
# 생성된 특징(Feature)의 dimmension 확인
# Train 자료의 특성 추출 값 dim
cat("AE_hidden_layer_train Dim : ", dim(AE_hidden_layer_train), "\n")
#Test 자료의 특성 추출 값 dim
cat("AE_hidden_layer_test Dim : ", dim(AE_hidden_layer_test), "\n")
#Test 자료의 복원이미지 차원 dim
cat("AE_recon Dim : ", dim(AE_recon), "\n")
#=========================================================================
# 5. RBM, AE 복원 이미지 확인
#=========================================================================
# 원본이미지 vs 복원이미지 확인
par(mfrow=c(3,5), mai=c(0,0.1,0.3,0))
#원본이미지
for(n in 1:5){
image(t(X_test[n,,])[,y_pixl:1], axes=F, col=gray((0:255)/255),main="Origianl")
}
#RBM 복원이미지
for(n in 1:5){
image(RBM_recon[n,,][,y_pixl:1], axes=F, col=gray((0:255)/255),main="RBM")
}
#AE 복원이미지
for(n in 1:5){
image(AE_recon[n,,][,y_pixl:1], axes=F, col=gray((0:255)/255),main="AE",col.main="purple")
}
dev.off()
#=========================================================================
# 6.특성 추출값 + SVM 을 이용한 multi-class classification
#=========================================================================
#RBM 특성 추출 데이터 Column name 변경 및 데이터 프레임 변환
colnames(RBM_hidden_node_train)<-paste("RBM_Feature_",1:100)
colnames(RBM_hidden_node_test)<-paste("RBM_Feature_",1:100)
RBM_hidden_node_train<-data.frame(RBM_hidden_node_train)
RBM_hidden_node_test<-data.frame(RBM_hidden_node_test)
#AE 특성 추출 데이터 Column name 변경 및 데이터 프레임 변환
colnames(AE_hidden_layer_train)<-paste("AE_Feature_",1:100)
colnames(AE_hidden_layer_test)<-paste("AE_Feature_",1:100)
AE_hidden_layer_train<-data.frame(AE_hidden_layer_train)
AE_hidden_layer_test<-data.frame(AE_hidden_layer_test)
# SVM 자료 준비
rbm_x<-RBM_hidden_node_train
ae_x<-AE_hidden_layer_train
y<-as.factor(Y_train)
#SVM 모델 Fitting
#학습 시간이 오래걸려 미리 학습한 파일을 저장해 두었습니다.
#svm_rbm_model <- svm(rbm_x, y, probability = TRUE)
#svm_ae_model <- svm(ae_x, y, probability = TRUE)
#saveRDS(svm_rbm_model,"svm_rbm_model.RDS")
#saveRDS(svm_ae_model,"svm_ae_model.RDS")
#학습된 모델 로드
svm_rbm_model <-readRDS("svm_rbm_model.RDS")
svm_ae_model <-readRDS("svm_ae_model.RDS")
#계산 시간이 오래걸려 미리 결과 파일을 저장해 두었습니다.
#예측값 산출
#svm_rbm_pred_prob <- predict(svm_rbm_model, RBM_hidden_node_test, decision.values = TRUE, probability = TRUE)
#svm_ae_pred_prob <- predict(svm_ae_model, AE_hidden_layer_test, decision.values = TRUE, probability = TRUE)
#예측 정확도 계산
#pred_result<-data.frame(rbm_pred=svm_rbm_pred_prob,ae_pred=svm_ae_pred_prob,true_label=Y_test)
#saveRDS(pred_result,"svm_pred_result.RDS")
#예측값 테이블 로드
pred_result <-readRDS("svm_pred_result.RDS")
#RBM 예측 정확도
rbm_acc<-sum(pred_result[,1]==pred_result[,3])/nrow(pred_result)
#AE 예측 정확도
ae_acc<-sum(pred_result[,2]==pred_result[,3])/nrow(pred_result)
#=========================================================================
# 6. CNN을 활용한 multi-class classification
#=========================================================================
##CNN 학습을 위한 데이터 준비
#CNN 파라메터 설정
batch_size <- 128
num_classes <- 10
epochs <- 12
# Input image dimensions
img_rows <- 28
img_cols <- 28
# CNN 학습을 위한 MNIST 자료 차원 변경
X_train <- array_reshape(X_train, c(nrow(X_train), img_rows, img_cols, 1))
X_test <- array_reshape(X_test, c(nrow(X_test), img_rows, img_cols, 1))
input_shape <- c(img_rows, img_cols, 1)
# 변경된 차원 확인
cat('X_train_shape:', dim(X_train), '\n')
cat(nrow(X_train), 'train samples\n')
cat(nrow(X_test), 'test samples\n')
# Softmax output에 알맞은 형태로 one-hot encoding
Y_train <- to_categorical(Y_train, num_classes)
Y_test <- to_categorical(Y_test, num_classes)
##CNN 모델 정의
model <- keras_model_sequential() %>%
layer_conv_2d(filters = 32, kernel_size = c(3,3), activation = 'relu',
input_shape = input_shape) %>%
layer_conv_2d(filters = 64, kernel_size = c(3,3), activation = 'relu') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(rate = 0.25) %>%
layer_flatten() %>%
layer_dense(units = 128, activation = 'relu') %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = num_classes, activation = 'softmax')
# 학습된 모델 로드
model<-load_model_hdf5("cnn_mnist_model.h5",compile = F)
# 모델 컴파일
model %>% compile(
loss = loss_categorical_crossentropy,
optimizer = optimizer_adadelta(),
metrics = c('accuracy')
)
# 모델 학습
# model %>% fit(
# X_train, Y_train,
# batch_size = batch_size,
# epochs = epochs,
# validation_split = 0.2
# )
# save_model_hdf5(model,"cnn_mnist_model.h5")
# 모델 평가
scores <- model %>% evaluate(
X_test, Y_test, verbose = 0
)
#예측 정확도 계산
cnn_acc<-scores[[2]]
#=========================================================================
# 7. 최종 결과 확인
#=========================================================================
cat('RBM Test accuracy:', rbm_acc, '\n')
cat('AE Test accuracy:', ae_acc, '\n')
cat('CNN Test accuracy:', cnn_acc, '\n')
|
#' Build a data frame or list.
#'
#' @description
#' `tibble()` is a trimmed down version of [data.frame()] that:
#'
#' * Never coerces inputs (i.e. strings stay as strings!).
#' * Never adds `row.names`.
#' * Never munges column names.
#' * Only recycles length 1 inputs.
#' * Evaluates its arguments lazily and in order.
#' * Adds `tbl_df` class to output.
#' * Automatically adds column names.
#'
#'
#' @param ... A set of name-value pairs. Arguments are evaluated sequentially,
#' so you can refer to previously created variables. These arguments are
#' processed with [rlang::quos()] and support unquote via `!!` and
#' unquote-splice via `!!!`.
#' @param xs A list of unevaluated expressions created with `~`,
#' [quote()], or (deprecated) [lazyeval::lazy()].
#' @seealso [as_tibble()] to turn an existing list into
#' a data frame.
#' @export
#' @examples
#' a <- 1:5
#' tibble(a, b = a * 2)
#' tibble(a, b = a * 2, c = 1)
#' tibble(x = runif(10), y = x * 2)
#'
#' lst(n = 5, x = runif(n))
#'
#' # tibble never coerces its inputs
#' str(tibble(letters))
#' str(tibble(x = list(diag(1), diag(2))))
#'
#' # or munges column names
#' tibble(`a + b` = 1:5)
#'
#' # You can splice-unquote a list of quotes and formulas
#' tibble(!!! list(x = rlang::quo(1:10), y = quote(x * 2)))
#'
#' # data frames can only contain 1d atomic vectors and lists
#' # and can not contain POSIXlt
#' \dontrun{
#' tibble(x = tibble(1, 2, 3))
#' tibble(y = strptime("2000/01/01", "%x"))
#' }
tibble <- function(...) {
xs <- quos(..., .named = TRUE)
as_tibble(lst_quos(xs, expand = TRUE))
}
#' @export
#' @rdname tibble
tibble_ <- function(xs) {
xs <- compat_lazy_dots(xs, caller_env())
tibble(!!! xs)
}
#' @export
#' @rdname tibble
#' @usage NULL
data_frame <- tibble
#' @export
#' @rdname tibble
#' @usage NULL
data_frame_ <- tibble_
#' Test if the object is a tibble.
#'
#' @param x An object
#' @return `TRUE` if the object inherits from the `tbl_df` class.
#' @export
is.tibble <- function(x) {
"tbl_df" %in% class(x)
}
#' @rdname is.tibble
#' @export
is_tibble <- is.tibble
# Validity checks --------------------------------------------------------------
check_tibble <- function(x) {
# Names
names_x <- names2(x)
bad_name <- is.na(names_x) | names_x == ""
if (any(bad_name)) {
invalid_df("must be named", x, which(bad_name))
}
dups <- duplicated(names_x)
if (any(dups)) {
invalid_df("must have [a] unique name(s)", x, dups)
}
# Types
is_1d <- map_lgl(x, is_1d)
if (any(!is_1d)) {
invalid_df("must be [a] 1d atomic vector(s) or [a] list(s)", x, !is_1d)
}
x[] <- map(x, strip_dim)
posixlt <- map_lgl(x, inherits, "POSIXlt")
if (any(posixlt)) {
invalid_df("[is](are) [a] date(s)/time(s) and must be stored as POSIXct, not POSIXlt", x, posixlt)
}
x
}
recycle_columns <- function(x) {
if (length(x) == 0) {
return(x)
}
# Validate column lengths
lengths <- map_int(x, NROW)
max <- max(c(lengths[lengths != 1L], 0L))
bad_len <- lengths != 1L & lengths != max
if (any(bad_len)) {
invalid_df_msg(
paste0("must be length 1 or ", max, ", not "), x, bad_len, lengths[bad_len]
)
}
short <- lengths == 1
if (max > 1L && any(short)) {
x[short] <- map(x[short], rep, max)
}
x
}
invalid_df <- function(problem, df, vars) {
if (is.logical(vars)) {
vars <- names(df)[vars]
}
stopc(
pluralise_msg("Column(s) ", vars), " ",
pluralise(problem, vars)
)
}
invalid_df_msg <- function(problem, df, vars, extra) {
if (is.logical(vars)) {
vars <- names(df)[vars]
}
stopc(
pluralise_msg("Column(s) ", vars), " ",
pluralise_msg(problem, extra)
)
}
| /R/tibble.R | no_license | fomenkosmart/tibble | R | false | false | 3,683 | r | #' Build a data frame or list.
#'
#' @description
#' `tibble()` is a trimmed down version of [data.frame()] that:
#'
#' * Never coerces inputs (i.e. strings stay as strings!).
#' * Never adds `row.names`.
#' * Never munges column names.
#' * Only recycles length 1 inputs.
#' * Evaluates its arguments lazily and in order.
#' * Adds `tbl_df` class to output.
#' * Automatically adds column names.
#'
#'
#' @param ... A set of name-value pairs. Arguments are evaluated sequentially,
#' so you can refer to previously created variables. These arguments are
#' processed with [rlang::quos()] and support unquote via `!!` and
#' unquote-splice via `!!!`.
#' @param xs A list of unevaluated expressions created with `~`,
#' [quote()], or (deprecated) [lazyeval::lazy()].
#' @seealso [as_tibble()] to turn an existing list into
#' a data frame.
#' @export
#' @examples
#' a <- 1:5
#' tibble(a, b = a * 2)
#' tibble(a, b = a * 2, c = 1)
#' tibble(x = runif(10), y = x * 2)
#'
#' lst(n = 5, x = runif(n))
#'
#' # tibble never coerces its inputs
#' str(tibble(letters))
#' str(tibble(x = list(diag(1), diag(2))))
#'
#' # or munges column names
#' tibble(`a + b` = 1:5)
#'
#' # You can splice-unquote a list of quotes and formulas
#' tibble(!!! list(x = rlang::quo(1:10), y = quote(x * 2)))
#'
#' # data frames can only contain 1d atomic vectors and lists
#' # and can not contain POSIXlt
#' \dontrun{
#' tibble(x = tibble(1, 2, 3))
#' tibble(y = strptime("2000/01/01", "%x"))
#' }
tibble <- function(...) {
xs <- quos(..., .named = TRUE)
as_tibble(lst_quos(xs, expand = TRUE))
}
#' @export
#' @rdname tibble
tibble_ <- function(xs) {
xs <- compat_lazy_dots(xs, caller_env())
tibble(!!! xs)
}
#' @export
#' @rdname tibble
#' @usage NULL
data_frame <- tibble
#' @export
#' @rdname tibble
#' @usage NULL
data_frame_ <- tibble_
#' Test if the object is a tibble.
#'
#' @param x An object
#' @return `TRUE` if the object inherits from the `tbl_df` class.
#' @export
is.tibble <- function(x) {
"tbl_df" %in% class(x)
}
#' @rdname is.tibble
#' @export
is_tibble <- is.tibble
# Validity checks --------------------------------------------------------------
check_tibble <- function(x) {
# Names
names_x <- names2(x)
bad_name <- is.na(names_x) | names_x == ""
if (any(bad_name)) {
invalid_df("must be named", x, which(bad_name))
}
dups <- duplicated(names_x)
if (any(dups)) {
invalid_df("must have [a] unique name(s)", x, dups)
}
# Types
is_1d <- map_lgl(x, is_1d)
if (any(!is_1d)) {
invalid_df("must be [a] 1d atomic vector(s) or [a] list(s)", x, !is_1d)
}
x[] <- map(x, strip_dim)
posixlt <- map_lgl(x, inherits, "POSIXlt")
if (any(posixlt)) {
invalid_df("[is](are) [a] date(s)/time(s) and must be stored as POSIXct, not POSIXlt", x, posixlt)
}
x
}
recycle_columns <- function(x) {
if (length(x) == 0) {
return(x)
}
# Validate column lengths
lengths <- map_int(x, NROW)
max <- max(c(lengths[lengths != 1L], 0L))
bad_len <- lengths != 1L & lengths != max
if (any(bad_len)) {
invalid_df_msg(
paste0("must be length 1 or ", max, ", not "), x, bad_len, lengths[bad_len]
)
}
short <- lengths == 1
if (max > 1L && any(short)) {
x[short] <- map(x[short], rep, max)
}
x
}
invalid_df <- function(problem, df, vars) {
if (is.logical(vars)) {
vars <- names(df)[vars]
}
stopc(
pluralise_msg("Column(s) ", vars), " ",
pluralise(problem, vars)
)
}
invalid_df_msg <- function(problem, df, vars, extra) {
if (is.logical(vars)) {
vars <- names(df)[vars]
}
stopc(
pluralise_msg("Column(s) ", vars), " ",
pluralise_msg(problem, extra)
)
}
|
#
# vim:set ff=unix expandtab ts=2 sw=2:
vecFuncMaker=function# creates a vector valued function from the functions for the components
### The function is a helper to create a vector valued function of two arguments which is very useful to create systems of ode
(
funcs, ##<< The list of functions computing the vector components
arg1, ##<< The first argument of the component functions
arg2 ##<< The second argument of the component functions
)
{
function(arg1,arg2){
matrix(byrow=TRUE,
nrow=length(funcs),
mapply(
function(fun){fun(arg1,arg2)},
funcs
)
)
}
### A vector valued function with the vector size equal to the number of
### functions in the first argument
}
| /pkg/R/vecFuncMaker.R | no_license | Dong-po/SoilR-exp | R | false | false | 698 | r | #
# vim:set ff=unix expandtab ts=2 sw=2:
vecFuncMaker=function# creates a vector valued function from the functions for the components
### The function is a helper to create a vector valued function of two arguments which is very useful to create systems of ode
(
funcs, ##<< The list of functions computing the vector components
arg1, ##<< The first argument of the component functions
arg2 ##<< The second argument of the component functions
)
{
function(arg1,arg2){
matrix(byrow=TRUE,
nrow=length(funcs),
mapply(
function(fun){fun(arg1,arg2)},
funcs
)
)
}
### A vector valued function with the vector size equal to the number of
### functions in the first argument
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_select.R
\name{model_select}
\alias{model_select}
\title{Fit multiple models and select the best fit}
\usage{
model_select(
x,
models = univariateML_models,
criterion = c("aic", "bic", "loglik"),
na.rm = FALSE,
...
)
}
\arguments{
\item{x}{a (non-empty) numeric vector of data values.}
\item{models}{a character vector containing the distribution models to
select from; see \code{print(univariateML_models)}.}
\item{criterion}{the model selection criterion. Must be one of \code{"aic"},
\code{"bic"}, and \code{"loglik"}.}
\item{na.rm}{logical. Should missing values be removed?}
\item{...}{unused.}
}
\value{
\code{model_select} returns an object of \link[base:class]{class}
\code{univariateML}. This is a named numeric vector with maximum likelihood
estimates for the parameters of the best fitting model and the following
attributes:
\item{\code{model}}{The name of the model.}
\item{\code{density}}{The density associated with the estimates.}
\item{\code{logLik}}{The loglikelihood at the maximum.}
\item{\code{support}}{The support of the density.}
\item{\code{n}}{The number of observations.}
\item{\code{call}}{The call as captured my \code{match.call}}
}
\description{
Selects the best model by log-likelihood, AIC, or BIC.
}
\examples{
model_select(precip)
}
\seealso{
Johnson, N. L., Kotz, S. and Balakrishnan, N. (1995) Continuous Univariate
Distributions, Volume 1, Chapter 17. Wiley, New York.
}
| /man/model_select.Rd | no_license | cran/univariateML | R | false | true | 1,556 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_select.R
\name{model_select}
\alias{model_select}
\title{Fit multiple models and select the best fit}
\usage{
model_select(
x,
models = univariateML_models,
criterion = c("aic", "bic", "loglik"),
na.rm = FALSE,
...
)
}
\arguments{
\item{x}{a (non-empty) numeric vector of data values.}
\item{models}{a character vector containing the distribution models to
select from; see \code{print(univariateML_models)}.}
\item{criterion}{the model selection criterion. Must be one of \code{"aic"},
\code{"bic"}, and \code{"loglik"}.}
\item{na.rm}{logical. Should missing values be removed?}
\item{...}{unused.}
}
\value{
\code{model_select} returns an object of \link[base:class]{class}
\code{univariateML}. This is a named numeric vector with maximum likelihood
estimates for the parameters of the best fitting model and the following
attributes:
\item{\code{model}}{The name of the model.}
\item{\code{density}}{The density associated with the estimates.}
\item{\code{logLik}}{The loglikelihood at the maximum.}
\item{\code{support}}{The support of the density.}
\item{\code{n}}{The number of observations.}
\item{\code{call}}{The call as captured my \code{match.call}}
}
\description{
Selects the best model by log-likelihood, AIC, or BIC.
}
\examples{
model_select(precip)
}
\seealso{
Johnson, N. L., Kotz, S. and Balakrishnan, N. (1995) Continuous Univariate
Distributions, Volume 1, Chapter 17. Wiley, New York.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/R2ReasyR_functions.R
\name{r2easyR.write}
\alias{r2easyR.write}
\title{Generates a Stockholm file and a R2R meta file that can be read by R2R to draw a secondary structure.}
\usage{
r2easyR.write(output, data_frame, RNA_name = "default", colors = "NA")
}
\arguments{
\item{output}{Path to the output Stockholm file}
\item{data_frame}{A data_frame containing columns labeled "Nucleotide", "Dotbracket", "Labels", and "Colors".}
\item{RNA_name}{The name of the RNA that you are drawing}
\item{colors}{How R2R will draw reactivity colors. No colors = "NA". Colored letters = "letters". Colored circles = "circles".}
}
\value{
A Stockholm formated file.
}
\description{
Generates a Stockholm file and a R2R meta file that can be read by R2R to draw a secondary structure.
}
| /man/r2easyR.write.Rd | permissive | JPSieg/R2easyR | R | false | true | 874 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/R2ReasyR_functions.R
\name{r2easyR.write}
\alias{r2easyR.write}
\title{Generates a Stockholm file and a R2R meta file that can be read by R2R to draw a secondary structure.}
\usage{
r2easyR.write(output, data_frame, RNA_name = "default", colors = "NA")
}
\arguments{
\item{output}{Path to the output Stockholm file}
\item{data_frame}{A data_frame containing columns labeled "Nucleotide", "Dotbracket", "Labels", and "Colors".}
\item{RNA_name}{The name of the RNA that you are drawing}
\item{colors}{How R2R will draw reactivity colors. No colors = "NA". Colored letters = "letters". Colored circles = "circles".}
}
\value{
A Stockholm formated file.
}
\description{
Generates a Stockholm file and a R2R meta file that can be read by R2R to draw a secondary structure.
}
|
set.seed(2)
x <- matrix(rnorm(50 * 2), ncol = 2)
x[1:25, 1] <- x[1:25, 1] + 3
x[1:25, 2] <- x[1:25, 2] - 4
km.out <- kmeans(x, 2, nstart = 20)
km.out$cluster
plot(x, col = (km.out$cluster + 1), main = "K-Means Clustering Results with K=2",
xlab = "", ylab = "", pch = 20, cex = 2)
km.out$centers
points(km.out$centers[1, 1], km.out$centers[1, 2], pch = 10, col = "red", cex = 2)
points(km.out$centers[2, 1], km.out$centers[2, 2], pch = 10, col = "blue", cex = 2)
| /R语言数据分析与挖掘实战/第五章/kmeans_example.R | no_license | xiaogang00/R-and-machine-learning | R | false | false | 480 | r | set.seed(2)
x <- matrix(rnorm(50 * 2), ncol = 2)
x[1:25, 1] <- x[1:25, 1] + 3
x[1:25, 2] <- x[1:25, 2] - 4
km.out <- kmeans(x, 2, nstart = 20)
km.out$cluster
plot(x, col = (km.out$cluster + 1), main = "K-Means Clustering Results with K=2",
xlab = "", ylab = "", pch = 20, cex = 2)
km.out$centers
points(km.out$centers[1, 1], km.out$centers[1, 2], pch = 10, col = "red", cex = 2)
points(km.out$centers[2, 1], km.out$centers[2, 2], pch = 10, col = "blue", cex = 2)
|
library(UsingR)
data(galton)
shinyServer(
function(input, output) {
output$myHist <- renderPlot({
hist(galton$child, xlab='child height', col='lightblue',main='Histogram')
mu <- input$mu
lines(c(mu, mu), c(0, 200),col="red",lwd=5)
mse <- mean((galton$child - mu)^2)
text(63, 150, paste("mu = ", mu))
text(63, 140, paste("MSE = ", round(mse, 2)))
}) }
)
| /q1q5/server.R | no_license | philallen117/data-products | R | false | false | 453 | r | library(UsingR)
data(galton)
shinyServer(
function(input, output) {
output$myHist <- renderPlot({
hist(galton$child, xlab='child height', col='lightblue',main='Histogram')
mu <- input$mu
lines(c(mu, mu), c(0, 200),col="red",lwd=5)
mse <- mean((galton$child - mu)^2)
text(63, 150, paste("mu = ", mu))
text(63, 140, paste("MSE = ", round(mse, 2)))
}) }
)
|
#' Calculate averages (e.g. ERPs) for single datasets
#'
#' This function is used to create an \code{eeg_evoked} object from
#' \code{eeg_epochs}.
#'
#' @param data An \code{eeg_epochs} object.
#' @param ... Other arguments passed to the averaging functions
#' @author Matt craddock \email{matt@@mattcraddock.com}
#' @export
eeg_average <- function(data,
...) {
UseMethod("eeg_average", data)
}
#' @describeIn eeg_average Default method for averaging EEG objects
#' @export
eeg_average.default <- function(data,
...) {
stop("eeg_epochs or eeg_tfr object required as input.")
}
#' @describeIn eeg_average Create evoked data from \code{eeg_epochs}
#' @importFrom tibble tibble
#' @importFrom dplyr left_join group_by_at summarise_at ungroup
#' @param cols Columns from the \code{epochs} structure that the average should
#' group on. NULL, the default, uses all columns other than the \code{epoch}
#' column.
#' @export
eeg_average.eeg_epochs <- function(data,
cols = NULL,
...) {
elecs <- channel_names(data)
if (is.null(data$epochs)) {
n_epochs <- length(unique(data$timings$epoch))
data$epochs <-
tibble::new_tibble(list(epoch = unique(data$timings$epoch),
participant_id = character(n_epochs),
recording = character(n_epochs)),
nrow = n_epochs,
class = "epoch_info")
}
data$signals <- dplyr::left_join(cbind(data$signals,
data$timings),
data$epochs, by = "epoch")
if (!is.null(cols)) {
if ("participant_id" %in% cols) {
col_names <- cols
} else {
col_names <- c("participant_id", cols)
}
} else {
col_names <- names(data$epochs)
col_names <- col_names[!(col_names %in% c("epoch"))]
}
data$signals <-
dplyr::group_by_at(data$signals,
.vars = vars(time, col_names)) %>%
dplyr::summarise_at(.vars = vars(elecs),
mean) %>%
dplyr::group_by_at(.vars = col_names) %>%
dplyr::mutate(epoch = dplyr::group_indices()) %>%
dplyr::ungroup()
timings <- data$signals[, c("time", "epoch", col_names)]
epochs <- dplyr::select(timings,
epoch,
!!col_names) %>%
dplyr::distinct()
class(epochs) <- c("epoch_info", "tbl_df", "tbl", "data.frame")
data <-
eeg_evoked(data = data$signals[, elecs],
chan_info = data$chan_info,
srate = data$srate,
timings = timings,
epochs = epochs)
data
}
#' @describeIn eeg_average average an eeg_tfr objects over epochs.
#' @export
eeg_average.eeg_tfr <- function(data,
cols = NULL,
...) {
if (!"epoch" %in% data$dimensions) {
message("Data is already averaged.")
} else {
orig_names <- dimnames(data$signals)
data <- average_tf(data)
#dimnames(data$signals) <- corig_names[1:3]
data$dimensions <- data$dimensions[-which(data$dimensions == "epoch")]
}
data
}
#' Internal function for averaging over epochs for eeg_tfr objects.
#' @param data data to average over
#' @keywords internal
average_tf <- function(data) {
# Need to find a way to make this respect epochs structure...
orig_dims <- dimnames(data$signals)
if (data$freq_info$output == "phase") {
data$signals <- apply(data$signals,
c(2, 3, 4),
circ_mean)
} else {
avg_tf <- array(0, dim = dim(data$signals)[2:4])
for (iz in 1:dim(data$signals)[3]) {
for (ij in 1:dim(data$signals)[4]) {
avg_tf[, iz, ij] <- colMeans(data$signals[ , , iz, ij, drop = FALSE])
}
}
data$signals <- avg_tf
dimnames(data$signals) <- orig_dims[2:4]
cols <- c("epoch", "participant_id", "recording")
data$epochs <- data$epochs[1, colnames(data$epochs) %in% cols]
}
data$timings <- dplyr::filter(data$timings, epoch == 1)
data
}
#' Grand average
#'
#' @param data A list of objects to be averaged over; currently only supports
#' lists of \code{eeg_evoked} objects
#' @param keep_indivs Keep averages for individual participants. Logical.
#' Defaults to TRUE.
#' @noRd
eeg_grandaverage <- function(data,
keep_indivs = TRUE) {
if (!class(data) == "list") {
stop("A list of objects is required.")
}
if (!is.eeg_evoked(data[[1]])) {
stop("Only eeg_evoked objects are supported at this time.")
}
# Check for consistency of input
if (check_classes(data)) {
if (is.null(dim(data[[1]]$signals))) {
}
} else {
stop("Some objects are of different classes.")
}
grand_avg <- eeg_GA(ga_sigs,
srate = data[[1]]$srate,
timings = data[[1]]$timings,
chan_info = data[[1]]$chan_info,
indivs = keep_indivs)
grand_avg
}
#' Create a grand average file from multiple evoked objects
#' @importFrom dplyr bind_rows
#' @keywords internal
create_grandavg <- function(data,
keep_indivs,
x = NULL) {
if (is.null(x)) {
indiv_data <- lapply(data,
function(i) i$signals)
} else {
indiv_data <- lapply(data,
function(i) i$signals[[x]])
}
if (keep_indivs) {
indiv_data <- dplyr::bind_rows(indiv_data,
.id = "sub_no")
} else {
indiv_data <- Reduce("+",
indiv_data) / length(indiv_data)
}
}
#' Check that all classes in a list match
#'
#' @param data list of objects to check
#' @keywords internal
check_classes <- function(data) {
stopifnot(is.list(data))
dat_classes <- lapply(data,
class)
check_class <- sapply(dat_classes,
identical,
dat_classes[[1]])
all(check_class)
}
#' Check that all conditions in a list match
#' @noRd
check_conds <- function(data_list) {
get_names <- lapply(data_list,
function(x) names(x$signals))
check_names <- sapply(get_names,
identical,
get_names[[1]])
all(check_names)
}
| /R/data_averaging.R | permissive | dannydaniel/eegUtils | R | false | false | 6,468 | r | #' Calculate averages (e.g. ERPs) for single datasets
#'
#' This function is used to create an \code{eeg_evoked} object from
#' \code{eeg_epochs}.
#'
#' @param data An \code{eeg_epochs} object.
#' @param ... Other arguments passed to the averaging functions
#' @author Matt craddock \email{matt@@mattcraddock.com}
#' @export
eeg_average <- function(data,
...) {
UseMethod("eeg_average", data)
}
#' @describeIn eeg_average Default method for averaging EEG objects
#' @export
eeg_average.default <- function(data,
...) {
stop("eeg_epochs or eeg_tfr object required as input.")
}
#' @describeIn eeg_average Create evoked data from \code{eeg_epochs}
#' @importFrom tibble tibble
#' @importFrom dplyr left_join group_by_at summarise_at ungroup
#' @param cols Columns from the \code{epochs} structure that the average should
#' group on. NULL, the default, uses all columns other than the \code{epoch}
#' column.
#' @export
eeg_average.eeg_epochs <- function(data,
cols = NULL,
...) {
elecs <- channel_names(data)
if (is.null(data$epochs)) {
n_epochs <- length(unique(data$timings$epoch))
data$epochs <-
tibble::new_tibble(list(epoch = unique(data$timings$epoch),
participant_id = character(n_epochs),
recording = character(n_epochs)),
nrow = n_epochs,
class = "epoch_info")
}
data$signals <- dplyr::left_join(cbind(data$signals,
data$timings),
data$epochs, by = "epoch")
if (!is.null(cols)) {
if ("participant_id" %in% cols) {
col_names <- cols
} else {
col_names <- c("participant_id", cols)
}
} else {
col_names <- names(data$epochs)
col_names <- col_names[!(col_names %in% c("epoch"))]
}
data$signals <-
dplyr::group_by_at(data$signals,
.vars = vars(time, col_names)) %>%
dplyr::summarise_at(.vars = vars(elecs),
mean) %>%
dplyr::group_by_at(.vars = col_names) %>%
dplyr::mutate(epoch = dplyr::group_indices()) %>%
dplyr::ungroup()
timings <- data$signals[, c("time", "epoch", col_names)]
epochs <- dplyr::select(timings,
epoch,
!!col_names) %>%
dplyr::distinct()
class(epochs) <- c("epoch_info", "tbl_df", "tbl", "data.frame")
data <-
eeg_evoked(data = data$signals[, elecs],
chan_info = data$chan_info,
srate = data$srate,
timings = timings,
epochs = epochs)
data
}
#' @describeIn eeg_average average an eeg_tfr objects over epochs.
#' @export
eeg_average.eeg_tfr <- function(data,
cols = NULL,
...) {
if (!"epoch" %in% data$dimensions) {
message("Data is already averaged.")
} else {
orig_names <- dimnames(data$signals)
data <- average_tf(data)
#dimnames(data$signals) <- corig_names[1:3]
data$dimensions <- data$dimensions[-which(data$dimensions == "epoch")]
}
data
}
#' Internal function for averaging over epochs for eeg_tfr objects.
#' @param data data to average over
#' @keywords internal
average_tf <- function(data) {
# Need to find a way to make this respect epochs structure...
orig_dims <- dimnames(data$signals)
if (data$freq_info$output == "phase") {
data$signals <- apply(data$signals,
c(2, 3, 4),
circ_mean)
} else {
avg_tf <- array(0, dim = dim(data$signals)[2:4])
for (iz in 1:dim(data$signals)[3]) {
for (ij in 1:dim(data$signals)[4]) {
avg_tf[, iz, ij] <- colMeans(data$signals[ , , iz, ij, drop = FALSE])
}
}
data$signals <- avg_tf
dimnames(data$signals) <- orig_dims[2:4]
cols <- c("epoch", "participant_id", "recording")
data$epochs <- data$epochs[1, colnames(data$epochs) %in% cols]
}
data$timings <- dplyr::filter(data$timings, epoch == 1)
data
}
#' Grand average
#'
#' @param data A list of objects to be averaged over; currently only supports
#' lists of \code{eeg_evoked} objects
#' @param keep_indivs Keep averages for individual participants. Logical.
#' Defaults to TRUE.
#' @noRd
eeg_grandaverage <- function(data,
keep_indivs = TRUE) {
if (!class(data) == "list") {
stop("A list of objects is required.")
}
if (!is.eeg_evoked(data[[1]])) {
stop("Only eeg_evoked objects are supported at this time.")
}
# Check for consistency of input
if (check_classes(data)) {
if (is.null(dim(data[[1]]$signals))) {
}
} else {
stop("Some objects are of different classes.")
}
grand_avg <- eeg_GA(ga_sigs,
srate = data[[1]]$srate,
timings = data[[1]]$timings,
chan_info = data[[1]]$chan_info,
indivs = keep_indivs)
grand_avg
}
#' Create a grand average file from multiple evoked objects
#' @importFrom dplyr bind_rows
#' @keywords internal
create_grandavg <- function(data,
keep_indivs,
x = NULL) {
if (is.null(x)) {
indiv_data <- lapply(data,
function(i) i$signals)
} else {
indiv_data <- lapply(data,
function(i) i$signals[[x]])
}
if (keep_indivs) {
indiv_data <- dplyr::bind_rows(indiv_data,
.id = "sub_no")
} else {
indiv_data <- Reduce("+",
indiv_data) / length(indiv_data)
}
}
#' Check that all classes in a list match
#'
#' @param data list of objects to check
#' @keywords internal
check_classes <- function(data) {
stopifnot(is.list(data))
dat_classes <- lapply(data,
class)
check_class <- sapply(dat_classes,
identical,
dat_classes[[1]])
all(check_class)
}
#' Check that all conditions in a list match
#' @noRd
check_conds <- function(data_list) {
get_names <- lapply(data_list,
function(x) names(x$signals))
check_names <- sapply(get_names,
identical,
get_names[[1]])
all(check_names)
}
|
### R code from vignette source 'Ch_multiple_linear_regression.Rnw'
###################################################
### code chunk number 1: setup
###################################################
rm(list = ls())
s <- search()[-1]
s <- s[-match(c("package:base", "package:stats", "package:graphics", "package:grDevices",
"package:utils", "package:datasets", "package:methods", "Autoloads"), s)]
if (length(s) > 0) sapply(s, detach, character.only = TRUE)
if (!file.exists("tables")) dir.create("tables")
if (!file.exists("figures")) dir.create("figures")
set.seed(290875)
options(prompt = "R> ", continue = "+ ",
width = 63, # digits = 4,
show.signif.stars = FALSE,
SweaveHooks = list(leftpar = function()
par(mai = par("mai") * c(1, 1.05, 1, 1)),
bigleftpar = function()
par(mai = par("mai") * c(1, 1.7, 1, 1))))
HSAURpkg <- require("HSAUR2")
if (!HSAURpkg) stop("cannot load package ", sQuote("HSAUR2"))
rm(HSAURpkg)
### </FIXME> hm, R-2.4.0 --vanilla seems to need this
a <- Sys.setlocale("LC_ALL", "C")
### </FIXME>
book <- TRUE
refs <- cbind(c("AItR", "DAGD", "SI", "CI", "ANOVA", "MLR", "GLM",
"DE", "RP", "GAM", "SA", "ALDI", "ALDII", "SIMC", "MA", "PCA",
"MDS", "CA"), 1:18)
ch <- function(x) {
ch <- refs[which(refs[,1] == x),]
if (book) {
return(paste("Chapter~\\\\ref{", ch[1], "}", sep = ""))
} else {
return(paste("Chapter~", ch[2], sep = ""))
}
}
if (file.exists("deparse.R"))
source("deparse.R")
setHook(packageEvent("lattice", "attach"), function(...) {
lattice.options(default.theme =
function()
standard.theme("pdf", color = FALSE))
})
###################################################
### code chunk number 2: singlebook
###################################################
book <- FALSE
###################################################
### code chunk number 3: MLR-setup
###################################################
library("wordcloud")
###################################################
### code chunk number 4: MLR-hubble-tab
###################################################
data("hubble", package = "gamair")
names(hubble) <- c("galaxy", "velocity", "distance")
toLatex(HSAURtable(hubble, package = "gamair"), pcol = 2,
caption = paste("Distance and velocity for 24 galaxies."),
label = "MLR-hubble-tab")
###################################################
### code chunk number 5: MLR-clouds-tab
###################################################
data("clouds", package = "HSAUR2")
toLatex(HSAURtable(clouds), pcol = 1,
caption = paste("Cloud seeding experiments in Florida -- see text for",
"explanations of the variables."),
label = "MLR-clouds-tab")
###################################################
### code chunk number 6: MLR-hubble-plot
###################################################
plot(velocity ~ distance, data = hubble)
###################################################
### code chunk number 7: MLR-hubble-beta1
###################################################
sum(hubble$distance * hubble$velocity) /
sum(hubble$distance^2)
###################################################
### code chunk number 8: MLR-hubble-lm
###################################################
hmod <- lm(velocity ~ distance - 1, data = hubble)
###################################################
### code chunk number 9: MLR-hubble-lm
###################################################
coef(hmod)
###################################################
### code chunk number 10: MLR-hubble-age
###################################################
Mpc <- 3.09 * 10^19
ysec <- 60^2 * 24 * 365.25
Mpcyear <- Mpc / ysec
1 / (coef(hmod) / Mpcyear)
###################################################
### code chunk number 11: MLR-hubble-lmplot
###################################################
layout(matrix(1:2, ncol = 2))
plot(velocity ~ distance, data = hubble)
abline(hmod)
plot(hmod, which = 1)
###################################################
### code chunk number 12: MLR-clouds-boxplots
###################################################
data("clouds", package = "HSAUR2")
layout(matrix(1:2, nrow = 2))
bxpseeding <- boxplot(rainfall ~ seeding, data = clouds,
ylab = "Rainfall", xlab = "Seeding")
bxpecho <- boxplot(rainfall ~ echomotion, data = clouds,
ylab = "Rainfall", xlab = "Echo Motion")
###################################################
### code chunk number 13: MLR-clouds-scatterplots
###################################################
layout(matrix(1:4, nrow = 2))
plot(rainfall ~ time, data = clouds)
plot(rainfall ~ cloudcover, data = clouds)
plot(rainfall ~ sne, data = clouds, xlab="S-Ne criterion")
plot(rainfall ~ prewetness, data = clouds)
###################################################
### code chunk number 14: MLR-clouds-outliers
###################################################
rownames(clouds)[clouds$rainfall %in% c(bxpseeding$out,
bxpecho$out)]
###################################################
### code chunk number 15: MLR-clouds-formula
###################################################
clouds_formula <- rainfall ~ seeding +
seeding:(sne + cloudcover + prewetness + echomotion) +
time
###################################################
### code chunk number 16: MLR-clouds-modelmatrix
###################################################
Xstar <- model.matrix(clouds_formula, data = clouds)
###################################################
### code chunk number 17: MLR-clouds-contrasts
###################################################
attr(Xstar, "contrasts")
###################################################
### code chunk number 18: MLR-clouds-lm
###################################################
clouds_lm <- lm(clouds_formula, data = clouds)
class(clouds_lm)
###################################################
### code chunk number 19: MLR-clouds-summary
###################################################
summary(clouds_lm)
###################################################
### code chunk number 20: MLR-clouds-coef
###################################################
betastar <- coef(clouds_lm)
betastar
###################################################
### code chunk number 21: MLR-clouds-vcov
###################################################
Vbetastar <- vcov(clouds_lm)
###################################################
### code chunk number 22: MLR-clouds-sd
###################################################
sqrt(diag(Vbetastar))
###################################################
### code chunk number 23: MLR-clouds-lmplot
###################################################
psymb <- as.numeric(clouds$seeding)
plot(rainfall ~ sne, data = clouds, pch = psymb,
xlab = "S-Ne criterion")
abline(lm(rainfall ~ sne, data = clouds,
subset = seeding == "no"))
abline(lm(rainfall ~ sne, data = clouds,
subset = seeding == "yes"), lty = 2)
legend("topright", legend = c("No seeding", "Seeding"),
pch = 1:2, lty = 1:2, bty = "n")
###################################################
### code chunk number 24: MLR-clouds-residfitted
###################################################
clouds_resid <- residuals(clouds_lm)
clouds_fitted <- fitted(clouds_lm)
###################################################
### code chunk number 25: MLR-clouds-residplot
###################################################
plot(clouds_fitted, clouds_resid, xlab = "Fitted values",
ylab = "Residuals", type = "n",
ylim = max(abs(clouds_resid)) * c(-1, 1))
abline(h = 0, lty = 2)
textplot(clouds_fitted, clouds_resid, words = rownames(clouds), new = FALSE)
###################################################
### code chunk number 26: MLR-clouds-qqplot
###################################################
qqnorm(clouds_resid, ylab = "Residuals")
qqline(clouds_resid)
###################################################
### code chunk number 27: MLR-clouds-cook (eval = FALSE)
###################################################
## plot(clouds_lm)
###################################################
### code chunk number 28: MLR-clouds-cook
###################################################
plot(clouds_lm, which = 4, sub.caption = NULL)
| /HSAUR2/doc/Ch_multiple_linear_regression.R | permissive | solgenomics/R_libs | R | false | false | 8,412 | r | ### R code from vignette source 'Ch_multiple_linear_regression.Rnw'
###################################################
### code chunk number 1: setup
###################################################
rm(list = ls())
s <- search()[-1]
s <- s[-match(c("package:base", "package:stats", "package:graphics", "package:grDevices",
"package:utils", "package:datasets", "package:methods", "Autoloads"), s)]
if (length(s) > 0) sapply(s, detach, character.only = TRUE)
if (!file.exists("tables")) dir.create("tables")
if (!file.exists("figures")) dir.create("figures")
set.seed(290875)
options(prompt = "R> ", continue = "+ ",
width = 63, # digits = 4,
show.signif.stars = FALSE,
SweaveHooks = list(leftpar = function()
par(mai = par("mai") * c(1, 1.05, 1, 1)),
bigleftpar = function()
par(mai = par("mai") * c(1, 1.7, 1, 1))))
HSAURpkg <- require("HSAUR2")
if (!HSAURpkg) stop("cannot load package ", sQuote("HSAUR2"))
rm(HSAURpkg)
### </FIXME> hm, R-2.4.0 --vanilla seems to need this
a <- Sys.setlocale("LC_ALL", "C")
### </FIXME>
book <- TRUE
refs <- cbind(c("AItR", "DAGD", "SI", "CI", "ANOVA", "MLR", "GLM",
"DE", "RP", "GAM", "SA", "ALDI", "ALDII", "SIMC", "MA", "PCA",
"MDS", "CA"), 1:18)
ch <- function(x) {
ch <- refs[which(refs[,1] == x),]
if (book) {
return(paste("Chapter~\\\\ref{", ch[1], "}", sep = ""))
} else {
return(paste("Chapter~", ch[2], sep = ""))
}
}
if (file.exists("deparse.R"))
source("deparse.R")
setHook(packageEvent("lattice", "attach"), function(...) {
lattice.options(default.theme =
function()
standard.theme("pdf", color = FALSE))
})
###################################################
### code chunk number 2: singlebook
###################################################
book <- FALSE
###################################################
### code chunk number 3: MLR-setup
###################################################
library("wordcloud")
###################################################
### code chunk number 4: MLR-hubble-tab
###################################################
data("hubble", package = "gamair")
names(hubble) <- c("galaxy", "velocity", "distance")
toLatex(HSAURtable(hubble, package = "gamair"), pcol = 2,
caption = paste("Distance and velocity for 24 galaxies."),
label = "MLR-hubble-tab")
###################################################
### code chunk number 5: MLR-clouds-tab
###################################################
data("clouds", package = "HSAUR2")
toLatex(HSAURtable(clouds), pcol = 1,
caption = paste("Cloud seeding experiments in Florida -- see text for",
"explanations of the variables."),
label = "MLR-clouds-tab")
###################################################
### code chunk number 6: MLR-hubble-plot
###################################################
plot(velocity ~ distance, data = hubble)
###################################################
### code chunk number 7: MLR-hubble-beta1
###################################################
sum(hubble$distance * hubble$velocity) /
sum(hubble$distance^2)
###################################################
### code chunk number 8: MLR-hubble-lm
###################################################
hmod <- lm(velocity ~ distance - 1, data = hubble)
###################################################
### code chunk number 9: MLR-hubble-lm
###################################################
coef(hmod)
###################################################
### code chunk number 10: MLR-hubble-age
###################################################
Mpc <- 3.09 * 10^19
ysec <- 60^2 * 24 * 365.25
Mpcyear <- Mpc / ysec
1 / (coef(hmod) / Mpcyear)
###################################################
### code chunk number 11: MLR-hubble-lmplot
###################################################
layout(matrix(1:2, ncol = 2))
plot(velocity ~ distance, data = hubble)
abline(hmod)
plot(hmod, which = 1)
###################################################
### code chunk number 12: MLR-clouds-boxplots
###################################################
data("clouds", package = "HSAUR2")
layout(matrix(1:2, nrow = 2))
bxpseeding <- boxplot(rainfall ~ seeding, data = clouds,
ylab = "Rainfall", xlab = "Seeding")
bxpecho <- boxplot(rainfall ~ echomotion, data = clouds,
ylab = "Rainfall", xlab = "Echo Motion")
###################################################
### code chunk number 13: MLR-clouds-scatterplots
###################################################
layout(matrix(1:4, nrow = 2))
plot(rainfall ~ time, data = clouds)
plot(rainfall ~ cloudcover, data = clouds)
plot(rainfall ~ sne, data = clouds, xlab="S-Ne criterion")
plot(rainfall ~ prewetness, data = clouds)
###################################################
### code chunk number 14: MLR-clouds-outliers
###################################################
rownames(clouds)[clouds$rainfall %in% c(bxpseeding$out,
bxpecho$out)]
###################################################
### code chunk number 15: MLR-clouds-formula
###################################################
clouds_formula <- rainfall ~ seeding +
seeding:(sne + cloudcover + prewetness + echomotion) +
time
###################################################
### code chunk number 16: MLR-clouds-modelmatrix
###################################################
Xstar <- model.matrix(clouds_formula, data = clouds)
###################################################
### code chunk number 17: MLR-clouds-contrasts
###################################################
attr(Xstar, "contrasts")
###################################################
### code chunk number 18: MLR-clouds-lm
###################################################
clouds_lm <- lm(clouds_formula, data = clouds)
class(clouds_lm)
###################################################
### code chunk number 19: MLR-clouds-summary
###################################################
summary(clouds_lm)
###################################################
### code chunk number 20: MLR-clouds-coef
###################################################
betastar <- coef(clouds_lm)
betastar
###################################################
### code chunk number 21: MLR-clouds-vcov
###################################################
Vbetastar <- vcov(clouds_lm)
###################################################
### code chunk number 22: MLR-clouds-sd
###################################################
sqrt(diag(Vbetastar))
###################################################
### code chunk number 23: MLR-clouds-lmplot
###################################################
psymb <- as.numeric(clouds$seeding)
plot(rainfall ~ sne, data = clouds, pch = psymb,
xlab = "S-Ne criterion")
abline(lm(rainfall ~ sne, data = clouds,
subset = seeding == "no"))
abline(lm(rainfall ~ sne, data = clouds,
subset = seeding == "yes"), lty = 2)
legend("topright", legend = c("No seeding", "Seeding"),
pch = 1:2, lty = 1:2, bty = "n")
###################################################
### code chunk number 24: MLR-clouds-residfitted
###################################################
clouds_resid <- residuals(clouds_lm)
clouds_fitted <- fitted(clouds_lm)
###################################################
### code chunk number 25: MLR-clouds-residplot
###################################################
plot(clouds_fitted, clouds_resid, xlab = "Fitted values",
ylab = "Residuals", type = "n",
ylim = max(abs(clouds_resid)) * c(-1, 1))
abline(h = 0, lty = 2)
textplot(clouds_fitted, clouds_resid, words = rownames(clouds), new = FALSE)
###################################################
### code chunk number 26: MLR-clouds-qqplot
###################################################
qqnorm(clouds_resid, ylab = "Residuals")
qqline(clouds_resid)
###################################################
### code chunk number 27: MLR-clouds-cook (eval = FALSE)
###################################################
## plot(clouds_lm)
###################################################
### code chunk number 28: MLR-clouds-cook
###################################################
plot(clouds_lm, which = 4, sub.caption = NULL)
|
#Importing the data
dataset <- read.csv('Salary_Data.csv')
#In this case Years of Experience is the indepedent variable and salary the dependent variable
#install.packages('caTools')
library(caTools)
set.seed(123)
split <- sample.split(dataset$Salary, SplitRatio = 2/3)
training_set <- subset(dataset, split == TRUE)
test_set <- subset(dataset, split == FALSE)
#Writing the regression function
regressor <- lm (formula = Salary ~ YearsExperience,
data = training_set)
#Predicting the test data set
y_pred <- predict(regressor, newdata = test_set)
#Visualising the results for the training dataset
# install.packages('ggplot2')
library(ggplot2)
ggplot() +
geom_point(aes( x = training_set$YearsExperience, y = training_set$Salary ),
colour ='red') +
geom_line(aes( x = training_set$YearsExperience, y = predict(regressor, newdata = training_set)),
colour = 'blue') +
ggtitle('Years of Experience vs Salary (Training Data Set)') +
xlab('Years of Experience') +
ylab('Salary')
#Visualising the results for the tess dataset
ggplot() +
geom_point(aes( x = test_set$YearsExperience, y = test_set$Salary ),
colour ='red') +
geom_line(aes( x = training_set$YearsExperience, y = predict(regressor, newdata = training_set)),
colour = 'blue') +
ggtitle('Years of Experience vs Salary (Test Data Set)') +
xlab('Years of Experience') +
ylab('Salary')
| /simple-linear-regression/YearsOfExperience vs Salary.r | permissive | iamdv/ml-atoz | R | false | false | 1,456 | r | #Importing the data
dataset <- read.csv('Salary_Data.csv')
#In this case Years of Experience is the indepedent variable and salary the dependent variable
#install.packages('caTools')
library(caTools)
set.seed(123)
split <- sample.split(dataset$Salary, SplitRatio = 2/3)
training_set <- subset(dataset, split == TRUE)
test_set <- subset(dataset, split == FALSE)
#Writing the regression function
regressor <- lm (formula = Salary ~ YearsExperience,
data = training_set)
#Predicting the test data set
y_pred <- predict(regressor, newdata = test_set)
#Visualising the results for the training dataset
# install.packages('ggplot2')
library(ggplot2)
ggplot() +
geom_point(aes( x = training_set$YearsExperience, y = training_set$Salary ),
colour ='red') +
geom_line(aes( x = training_set$YearsExperience, y = predict(regressor, newdata = training_set)),
colour = 'blue') +
ggtitle('Years of Experience vs Salary (Training Data Set)') +
xlab('Years of Experience') +
ylab('Salary')
#Visualising the results for the tess dataset
ggplot() +
geom_point(aes( x = test_set$YearsExperience, y = test_set$Salary ),
colour ='red') +
geom_line(aes( x = training_set$YearsExperience, y = predict(regressor, newdata = training_set)),
colour = 'blue') +
ggtitle('Years of Experience vs Salary (Test Data Set)') +
xlab('Years of Experience') +
ylab('Salary')
|
#' Fish growth snippet from Katie's summer 2018 field work
#' @format length is in mm, these are all integers
#' @source Katie Zinn
"Fish_data"
| /FishGrowth/R/fish_data.R | no_license | katiezinn/Instantaneous_growth | R | false | false | 150 | r | #' Fish growth snippet from Katie's summer 2018 field work
#' @format length is in mm, these are all integers
#' @source Katie Zinn
"Fish_data"
|
# Random forest classification
rm(list=ls())
library(randomForest)
library(rgdal)
library(raster)
train_csv_path = "/projectnb/landsat/users/shijuan/above/bh09v15/rand_forest/combined_csv.csv"
img_dir = "/projectnb/landsat/projects/ABOVE/CCDC/Bh09v15/out_tc/"
#img = "Bh09v15_dTC_2006.tif"
#img_2006 = paste0(img_dir, img)
output_dir = "/projectnb/landsat/users/shijuan/above/bh09v15/rand_forest/rf_img_v1/"
# for now, only 37 training samples
agent_train <- read.csv(file=train_csv_path,header=T)
agent_train <- agent_train[complete.cases(agent_train),]
agent_rf <- randomForest(agent ~ db + dg + dw, data=agent_train)
img_files <- list.files(path=img_dir,pattern="*.tif$",all.files=T,full.names=T)
for(file in img_files){
img <- brick(file)
names(img) <- c('db', 'dg', 'dw')
preds_rf <- predict(img, model=agent_rf, na.rm=T)
file_name = strsplit(basename(file),'[.]')[[1]]
new_name = paste0(file_name[1],'_rf.tif')
output_path = paste0(output_dir,new_name)
print(output_path)
writeRaster(preds_rf, filename=output_path,format='GTiff',overwrite=TRUE)
}
| /others/rand_forest_classify.R | no_license | fdbesanto2/ABOVE | R | false | false | 1,078 | r | # Random forest classification
rm(list=ls())
library(randomForest)
library(rgdal)
library(raster)
train_csv_path = "/projectnb/landsat/users/shijuan/above/bh09v15/rand_forest/combined_csv.csv"
img_dir = "/projectnb/landsat/projects/ABOVE/CCDC/Bh09v15/out_tc/"
#img = "Bh09v15_dTC_2006.tif"
#img_2006 = paste0(img_dir, img)
output_dir = "/projectnb/landsat/users/shijuan/above/bh09v15/rand_forest/rf_img_v1/"
# for now, only 37 training samples
agent_train <- read.csv(file=train_csv_path,header=T)
agent_train <- agent_train[complete.cases(agent_train),]
agent_rf <- randomForest(agent ~ db + dg + dw, data=agent_train)
img_files <- list.files(path=img_dir,pattern="*.tif$",all.files=T,full.names=T)
for(file in img_files){
img <- brick(file)
names(img) <- c('db', 'dg', 'dw')
preds_rf <- predict(img, model=agent_rf, na.rm=T)
file_name = strsplit(basename(file),'[.]')[[1]]
new_name = paste0(file_name[1],'_rf.tif')
output_path = paste0(output_dir,new_name)
print(output_path)
writeRaster(preds_rf, filename=output_path,format='GTiff',overwrite=TRUE)
}
|
stoch.reg <-
function(data, cols.full, cols.red, alpha, L, M, plot.which) {
#
nextn = stats::nextn
kernel = stats::kernel
qf = stats::qf
plot = graphics::plot
abline = graphics::abline
#
# SPEC[i,j,k] is the spectrum between the i-th and j-th series at frequency k/n':
SPEC = array(dim = c(ncol(data),ncol(data),nextn(nrow(data))/2))
for(i in 1:ncol(data)) {
for (j in i:ncol(data)) {
power = stats::spec.pgram(data[,c(i,j)], kernel("daniell",(L-1)/2), plot = FALSE)
SPEC[i,i, ] = power$spec[,1]
SPEC[j,j, ] = power$spec[,2]
coh.ij = power$coh
phase.ij = power$phase
SPEC[i,j, ] = sqrt(coh.ij*power$spec[,1]*power$spec[,2])*exp(1i*phase.ij)
SPEC[j,i, ] = Conj(SPEC[i,j, ])
}}
### Compute the power under the full model:
f.yy = SPEC[ncol(data), ncol(data), ]
f.xx = SPEC[cols.full, cols.full, ]
f.xy = SPEC[cols.full, ncol(data), ]
f.yx = SPEC[ncol(data), cols.full, ]
power.full = vector(length = dim(SPEC)[3])
for (k in 1:length(power.full)) {
power.full[k] = f.yy[k] - sum(f.yx[,k]*solve(f.xx[,,k],f.xy[,k]))
}
power.full = Re(power.full)
### Compute the IFT of the coefficients in the full model:
B = array(dim = c(length(cols.full), dim(SPEC)[3]))
for (k in 1:length(power.full)) {
B[,k] = solve(t(f.xx[,,k]),f.yx[,k])
}
# Isolate those frequencies at which we need B:
# These are the frequencies 1/M, 2/M, ... .5*M/M
# Currently the frequencies used are 1/N, 2/N, ... .5*N/N
N = 2*length(power$freq) # This will be n', in our notation
# R displays the power at only half of the frequencies.
sampled.indices = (N/M)*(1:(M/2)) # These are the indices of the frequencies we want
B = B[, sampled.indices]
# Invert B, by discretizing the defining integral, to get the coefficients b:
delta = 1/M
Omega = seq(from = 1/M, to = .5, length = M/2)
b = array(dim = c(M-1, length(cols.full)))
for (s in seq(from = -M/2+1, to = M/2 - 1, length = M-1)) {
for (j in 1:length(cols.full)) {
b[s + M/2,j] = 2*delta*sum(exp(2i*pi*Omega*s)*B[j,])
}}
Betahat = Re(b)
### Compute the power under the reduced model:
if (length(cols.red) > 0) {
f.xx = SPEC[cols.red, cols.red, ]
f.xy = SPEC[cols.red, ncol(data), ]
f.yx = SPEC[ncol(data), cols.red, ]
}
power.red = vector(length = dim(SPEC)[3])
for (k in 1:length(power.red)) {
if(length(cols.red)==0) power.red[k] = f.yy[k]
if(length(cols.red)==1) power.red[k] = f.yy[k] - f.yx[k]*f.xy[k]/f.xx[k]
if(length(cols.red)> 1) power.red[k] = f.yy[k] - sum(f.yx[,k]*solve(f.xx[,,k],f.xy[,k]))
}
power.red = Re(power.red)
### Compute and plot the F statistics
q = length(cols.full)
q1 = length(cols.red)
q2 = q - q1
df.num = 2*q2
df.denom = 2*(L-q)
crit.F = qf(1-alpha, df.num, df.denom)
MS.drop = L*(power.red - power.full)/df.num
MSE = L*power.full/df.denom
F.to.drop = MS.drop/MSE
coh.mult = F.to.drop/(F.to.drop + df.denom/df.num)
crit.coh = crit.F/(crit.F + df.denom/df.num)
if(plot.which=="F.stat") {
plot(power$freq, F.to.drop, type = "l", xlab = "Frequency", ylab = "F", ylim = c(0, 3*crit.F))
abline(h=crit.F)
}
if(plot.which=="coh") {
plot(power$freq, coh.mult, type = "l", xlab = "Frequency", ylab = "Sq Coherence", ylim=c(0,1))
abline(h=crit.coh)
}
list(power.full = power.full, power.red = power.red, Betahat = Betahat, eF = F.to.drop, coh = coh.mult)
}
| /R/stoch.reg.R | no_license | cran/astsa | R | false | false | 3,366 | r | stoch.reg <-
function(data, cols.full, cols.red, alpha, L, M, plot.which) {
#
nextn = stats::nextn
kernel = stats::kernel
qf = stats::qf
plot = graphics::plot
abline = graphics::abline
#
# SPEC[i,j,k] is the spectrum between the i-th and j-th series at frequency k/n':
SPEC = array(dim = c(ncol(data),ncol(data),nextn(nrow(data))/2))
for(i in 1:ncol(data)) {
for (j in i:ncol(data)) {
power = stats::spec.pgram(data[,c(i,j)], kernel("daniell",(L-1)/2), plot = FALSE)
SPEC[i,i, ] = power$spec[,1]
SPEC[j,j, ] = power$spec[,2]
coh.ij = power$coh
phase.ij = power$phase
SPEC[i,j, ] = sqrt(coh.ij*power$spec[,1]*power$spec[,2])*exp(1i*phase.ij)
SPEC[j,i, ] = Conj(SPEC[i,j, ])
}}
### Compute the power under the full model:
f.yy = SPEC[ncol(data), ncol(data), ]
f.xx = SPEC[cols.full, cols.full, ]
f.xy = SPEC[cols.full, ncol(data), ]
f.yx = SPEC[ncol(data), cols.full, ]
power.full = vector(length = dim(SPEC)[3])
for (k in 1:length(power.full)) {
power.full[k] = f.yy[k] - sum(f.yx[,k]*solve(f.xx[,,k],f.xy[,k]))
}
power.full = Re(power.full)
### Compute the IFT of the coefficients in the full model:
B = array(dim = c(length(cols.full), dim(SPEC)[3]))
for (k in 1:length(power.full)) {
B[,k] = solve(t(f.xx[,,k]),f.yx[,k])
}
# Isolate those frequencies at which we need B:
# These are the frequencies 1/M, 2/M, ... .5*M/M
# Currently the frequencies used are 1/N, 2/N, ... .5*N/N
N = 2*length(power$freq) # This will be n', in our notation
# R displays the power at only half of the frequencies.
sampled.indices = (N/M)*(1:(M/2)) # These are the indices of the frequencies we want
B = B[, sampled.indices]
# Invert B, by discretizing the defining integral, to get the coefficients b:
delta = 1/M
Omega = seq(from = 1/M, to = .5, length = M/2)
b = array(dim = c(M-1, length(cols.full)))
for (s in seq(from = -M/2+1, to = M/2 - 1, length = M-1)) {
for (j in 1:length(cols.full)) {
b[s + M/2,j] = 2*delta*sum(exp(2i*pi*Omega*s)*B[j,])
}}
Betahat = Re(b)
### Compute the power under the reduced model:
if (length(cols.red) > 0) {
f.xx = SPEC[cols.red, cols.red, ]
f.xy = SPEC[cols.red, ncol(data), ]
f.yx = SPEC[ncol(data), cols.red, ]
}
power.red = vector(length = dim(SPEC)[3])
for (k in 1:length(power.red)) {
if(length(cols.red)==0) power.red[k] = f.yy[k]
if(length(cols.red)==1) power.red[k] = f.yy[k] - f.yx[k]*f.xy[k]/f.xx[k]
if(length(cols.red)> 1) power.red[k] = f.yy[k] - sum(f.yx[,k]*solve(f.xx[,,k],f.xy[,k]))
}
power.red = Re(power.red)
### Compute and plot the F statistics
q = length(cols.full)
q1 = length(cols.red)
q2 = q - q1
df.num = 2*q2
df.denom = 2*(L-q)
crit.F = qf(1-alpha, df.num, df.denom)
MS.drop = L*(power.red - power.full)/df.num
MSE = L*power.full/df.denom
F.to.drop = MS.drop/MSE
coh.mult = F.to.drop/(F.to.drop + df.denom/df.num)
crit.coh = crit.F/(crit.F + df.denom/df.num)
if(plot.which=="F.stat") {
plot(power$freq, F.to.drop, type = "l", xlab = "Frequency", ylab = "F", ylim = c(0, 3*crit.F))
abline(h=crit.F)
}
if(plot.which=="coh") {
plot(power$freq, coh.mult, type = "l", xlab = "Frequency", ylab = "Sq Coherence", ylim=c(0,1))
abline(h=crit.coh)
}
list(power.full = power.full, power.red = power.red, Betahat = Betahat, eF = F.to.drop, coh = coh.mult)
}
|
#' Nonparametric Sobol Estimator with Bootstrap Bandwidth
#'
#' Algorithm to estimate the Sobol indices using a non-parametric
#' fit of the regression curve. The bandwidth is estimated using
#' bootstrap to reduce the finite-sample bias.
#'
#' @param Y Response continuous variable
#' @param X Matrix of independent variables
#' @param bandwidth If \code{bandwidth.compute = TRUE}, it sets the starting bandwidth to find the bootstrap bandwidth. If \code{NULL} the least-square cross-validation bandwidth is used.
#' If \code{bandwidth.compute = FALSE}, it will use the values provided fixed over all the simulation. Defaults to \code{NULL}.
#' @param bandwidth.compute Logical value. Indicates if the bandwidth should be estimated or not. Defaults to \code{TRUE}.
#' @param bootstrap Logical value. Indicates if the estimation should be with bootstrap or not. Defaults to \code{TRUE}.
#' @param nboot Number of bootstrap samples taken for the method. Ignored if `bootstrap = FALSE`. Defaults to \code{100}.
#' @param ckerorder Numeric value specifying kernel order (should be one of
#' \code{(2,4,6,8)}). Defaults to \code{2}.
#' @param mc.cores Number of cores used. Defaults to \code{1}.
#'
#' @return A list of class \code{sobolnp} with the following elements:
#' \describe{
#' \item{\strong{S}}{First order Sobol indices estimated with nonparametric
#' regression and a cross-validation bandwidth}
#' \item{\strong{bws}}{Bandwidth estimated with cross-validation}
#' \item{\strong{Sboot}}{First order Sobol indices estimated with
#' nonparametric regression and a bootstrap bandwidth}
#' \item{\strong{bwsboot}}{Bandwidth estimated with bootstrap}
#' }
#' @references
#'
#'Solís, Maikol. "Nonparametric estimation of the first order Sobol indices with bootstrap bandwidth." \emph{arXiv preprint arXiv:1803.03333} (2018).
#'
#' @export
#' @examples
#' ishigami.fun <- function(X) {
#' A <- 7
#' B <- 0.1
#' sin(X[, 1]) + A * sin(X[, 2])^2 + B * X[, 3]^4 * sin(X[, 1])
#' }
#'
#' X <- matrix(runif(3*100, -pi, pi), ncol = 3)
#' Y <- ishigami.fun(X)
#'
#' estimation <- sobolnp(Y = Y, X = X, nboot = 5)
#'
#' @import np pbmcapply minqa
#' @importFrom stats residuals var
sobolnp <- function(Y,
X,
bandwidth = NULL,
# bandwidth.total = NULL,
bandwidth.compute = TRUE,
bootstrap = TRUE,
nboot = 100,
ckerorder = 2,
mc.cores = 1){
n <- length(Y)
p <- ncol(X)
output <- list(call = match.call(),
num.obs = n,
num.var = p)
if (any(bandwidth < 0)) {
return(list(S = rep(NA, p),
# ST = rep(NA, p),
bws = rep(NA, p)
# bwsT = rep(NA, p)
))
}
if (is.null(bandwidth)) {
bandwidth <- NULL
}
# if (is.null(bandwidth.total)) {
# bandwidth.total <- rep(NULL, p - 1)
# }
message("Estimating non-parametric Sobol indices with cross-validation bandwidth")
SList <- pbmclapply(
X = 1:p,
FUN = function(k) {
compute.sobol.indices(
xdat = X[, k],
ydat = Y,
bws = bandwidth,
bandwidth.compute = bandwidth.compute,
ckerorder = ckerorder
)
},
mc.cores = mc.cores
)
# STotalList <- pbmclapply(
# X = 1:p,
# FUN = function(k) {
# x <- compute.sobol.indices(
# xdat = X[, -k],
# ydat = Y,
# bws = bandwidth.total,
# bandwidth.compute = bandwidth.compute,
# ckerorder = ckerorder
# )
# x$S <- 1 - x$S
# x$bws <-
# append(x$bws , NA, after = k - 1)
# return(x)
# },
# mc.cores = mc.cores
# )
S <- sapply(SList, function(l)
l$S)
bws_initial <- sapply(SList, function(l)
l$bws)
# ST <- sapply(STotalList, function(l)
# l$S)
# bwsT_initial <- t(sapply(STotalList, function(l)
# l$bws))
if (bootstrap == FALSE) {
return(
list(
S = S,
# ST = ST,
bws = bws_initial
# bwsT = bwsT_initial
))
} else {
meanS <- sapply(SList, function(l)
l$mean)
sderr <- sapply(SList, function(l)
l$sderr)
# meanres <- sapply(SList, function(l)
# l$meanres)
error <- sapply(SList, function(l)
l$error)
# meanT <- sapply(STotalList, function(l)
# l$mean)
# sderrT <- sapply(STotalList, function(l)
# l$sderr)
# errorT <- sapply(STotalList, function(l)
# l$error)
Y_boot <- mapply(
FUN = function(n) {
#browser()
idx <-
matrix(sample(1:n, size = n * p, replace = TRUE), nrow = n)
return(meanS + sderr * error[idx])
},
rep(n, nboot),
SIMPLIFY = FALSE
)
# Y_bootT <- mapply(
# FUN = function(n) {
# #browser()
# idx <-
# matrix(sample(1:n, size = n * p, replace = TRUE), nrow = n)
# return(meanT + sderrT * errorT[idx])
# },
# rep(n, nboot),
# SIMPLIFY = FALSE
# )
# Sboot <- numeric(p)
# STboot <- numeric(p)
# bwsboot <- numeric(p)
# bwsSTboot <- matrix(nrow = p, ncol = p)
#
message("Estimating non-parametric Sobol indices with bootstrap bandwidth")
Sfitboot <- pbmclapply(
X = 1:p,
FUN = function(k) {
cv_opt <-
minqa::bobyqa(
par = bws_initial[k],
fn = BLS,
xdat = X[, k],
ydat = Y,
ydat_boot = Y_boot,
var.index = k,
ckerorder = ckerorder,
#method = "L-BFGS-B"#,
lower = 0,
upper = 10 * bws_initial[k],
# method = "L-BFGS-B",
control = list(iprint = 0)
)
gS <- compute.sobol.indices(
xdat = X[, k],
ydat = Y,
bws = cv_opt$par,
bandwidth.compute = FALSE,
ckerorder = ckerorder,
only.mean = TRUE
)
# Sboot[k] <- var(gS) / var(Y)
# bwsboot[k] <- cv_opt$par
return(list(Sboot = var(gS) / var(Y),
bwsboot = cv_opt$par))
},
mc.cores = mc.cores
)
Sboot <- sapply(Sfitboot, function(x) x$Sboot)
bwsboot <- sapply(Sfitboot, function(x) x$bwsboot)
# for (k in 1:p) {
# cv_opt <-
# minqa::bobyqa(
# par = bws_initial[k],
# fn = BLS,
# xdat = X[, k],
# ydat = Y,
# ydat_boot = Y_boot,
# var.index = k,
# ckerorder = ckerorder,
# #method = "L-BFGS-B"#,
# lower = 0,
# upper = 10 * bws_initial[k],
# # method = "L-BFGS-B",
# control = list(iprint = 2)
# )
#
#
# gS <- compute.sobol.indices(
# xdat = X[, k],
# ydat = Y ,
# bws = cv_opt$par,
# bandwidth.compute = FALSE,
# ckerorder = ckerorder ,
# only.mean = TRUE
# )
# Sboot[k] <- var(gS) / var(Y)
# bwsboot[k] <- cv_opt$par
#
#
# # cv_opt_T <-
# # bobyqa(
# # par = bwsT_initial[k, -k],
# # fn = BLS,
# # xdat = X[, -k],
# # ydat = Y,
# # ydat_boot = Y_bootT,
# # var.index = k,
# # ckerorder = ckerorder,
# # #method = "L-BFGS-B"#,
# # lower = 0,
# # upper = 10 * bwsT_initial[k, -k],
# # control = list(iprint = 2)
# # )
# #
# # gT <- compute.sobol.indices(
# # xdat = X[, -k],
# # ydat = Y ,
# # bws = cv_opt_T$par,
# # bandwidth.compute = FALSE,
# # ckerorder = ckerorder ,
# # only.mean = TRUE
# # )
# # STboot[k] <- 1 - var(gT) / var(Y)
# #
# #
# # bwsSTboot[k, ] <-
# # append(cv_opt_T$par, NA, after = k - 1)
# }
names(S) <- names(Sboot) <- colnames(X)
output[["S"]] <- S
output[["bws"]] <- bws_initial
output[["Sboot"]] <- Sboot
output[["bwsboot"]] <- bwsboot
class(output) <- "sobolnp"
return(output)
}#end-else-bootstrap
}
compute.sobol.indices <-
function(xdat,
ydat,
bws,
bandwidth.compute,
ckerorder,
only.mean = FALSE) {
ghat <- npreg(
npregbw(
bws = bws,
xdat = xdat,
ydat = ydat,
bandwidth.compute = bandwidth.compute,
ckertype = "epanechnikov",
ckerorder = ckerorder,
bwmethod = "cv.ls"
),
residuals = TRUE
)
g <- ghat$mean
if (only.mean) {
return(mean = g)
} else{
bws <- ghat$bw
res <- residuals(ghat)
sderr <- ghat$merr
sderr.idx <- sderr <= 1e-5
#meanres <- mean(res)
error <- (res - mean(res)) / sderr
error[sderr.idx] <- 0
return(list(
S = var(g) / var(ydat),
mean = g,
bws = bws,
sderr = sderr,
#meanres = meanres,
error = error
))
}
}
BLS <-
function(bws,
xdat,
ydat,
ydat_boot,
var.index,
ckerorder) {
#message(bws)
if (any(bws < 0) | any(is.na(bws))) {
return(10e10)
}
mean_boot <- sapply(ydat_boot, function(yboot,
xdat,
bws,
ckerorder) {
g <- compute.sobol.indices(
xdat = xdat,
ydat = yboot[, var.index],
bws = bws,
bandwidth.compute = FALSE,
ckerorder = ckerorder,
only.mean = TRUE
)
return(g)
},
xdat = xdat,
bws = bws,
ckerorder)
return(mean(abs(ydat - rowMeans(mean_boot))))
# delta <- 10
# yerr <- ydat - rowMeans(mean_boot)
# loss <-
# ifelse(abs(yerr) <= delta, 0.5 * yerr ^ 2, delta * (abs(yerr) - delta * 0.5))
# print(mean(loss))
# return(mean(loss))
# return(mean(log(cosh(
#
# ))))
# return(mean((ydat - rowMeans(mean_boot)) ^ 2))
}
| /R/sobolnp.R | no_license | maikol-solis/sobolnp | R | false | false | 10,225 | r | #' Nonparametric Sobol Estimator with Bootstrap Bandwidth
#'
#' Algorithm to estimate the Sobol indices using a non-parametric
#' fit of the regression curve. The bandwidth is estimated using
#' bootstrap to reduce the finite-sample bias.
#'
#' @param Y Response continuous variable
#' @param X Matrix of independent variables
#' @param bandwidth If \code{bandwidth.compute = TRUE}, it sets the starting bandwidth to find the bootstrap bandwidth. If \code{NULL} the least-square cross-validation bandwidth is used.
#' If \code{bandwidth.compute = FALSE}, it will use the values provided fixed over all the simulation. Defaults to \code{NULL}.
#' @param bandwidth.compute Logical value. Indicates if the bandwidth should be estimated or not. Defaults to \code{TRUE}.
#' @param bootstrap Logical value. Indicates if the estimation should be with bootstrap or not. Defaults to \code{TRUE}.
#' @param nboot Number of bootstrap samples taken for the method. Ignored if `bootstrap = FALSE`. Defaults to \code{100}.
#' @param ckerorder Numeric value specifying kernel order (should be one of
#' \code{(2,4,6,8)}). Defaults to \code{2}.
#' @param mc.cores Number of cores used. Defaults to \code{1}.
#'
#' @return A list of class \code{sobolnp} with the following elements:
#' \describe{
#' \item{\strong{S}}{First order Sobol indices estimated with nonparametric
#' regression and a cross-validation bandwidth}
#' \item{\strong{bws}}{Bandwidth estimated with cross-validation}
#' \item{\strong{Sboot}}{First order Sobol indices estimated with
#' nonparametric regression and a bootstrap bandwidth}
#' \item{\strong{bwsboot}}{Bandwidth estimated with bootstrap}
#' }
#' @references
#'
#'Solís, Maikol. "Nonparametric estimation of the first order Sobol indices with bootstrap bandwidth." \emph{arXiv preprint arXiv:1803.03333} (2018).
#'
#' @export
#' @examples
#' ishigami.fun <- function(X) {
#' A <- 7
#' B <- 0.1
#' sin(X[, 1]) + A * sin(X[, 2])^2 + B * X[, 3]^4 * sin(X[, 1])
#' }
#'
#' X <- matrix(runif(3*100, -pi, pi), ncol = 3)
#' Y <- ishigami.fun(X)
#'
#' estimation <- sobolnp(Y = Y, X = X, nboot = 5)
#'
#' @import np pbmcapply minqa
#' @importFrom stats residuals var
sobolnp <- function(Y,
X,
bandwidth = NULL,
# bandwidth.total = NULL,
bandwidth.compute = TRUE,
bootstrap = TRUE,
nboot = 100,
ckerorder = 2,
mc.cores = 1){
n <- length(Y)
p <- ncol(X)
output <- list(call = match.call(),
num.obs = n,
num.var = p)
if (any(bandwidth < 0)) {
return(list(S = rep(NA, p),
# ST = rep(NA, p),
bws = rep(NA, p)
# bwsT = rep(NA, p)
))
}
if (is.null(bandwidth)) {
bandwidth <- NULL
}
# if (is.null(bandwidth.total)) {
# bandwidth.total <- rep(NULL, p - 1)
# }
message("Estimating non-parametric Sobol indices with cross-validation bandwidth")
SList <- pbmclapply(
X = 1:p,
FUN = function(k) {
compute.sobol.indices(
xdat = X[, k],
ydat = Y,
bws = bandwidth,
bandwidth.compute = bandwidth.compute,
ckerorder = ckerorder
)
},
mc.cores = mc.cores
)
# STotalList <- pbmclapply(
# X = 1:p,
# FUN = function(k) {
# x <- compute.sobol.indices(
# xdat = X[, -k],
# ydat = Y,
# bws = bandwidth.total,
# bandwidth.compute = bandwidth.compute,
# ckerorder = ckerorder
# )
# x$S <- 1 - x$S
# x$bws <-
# append(x$bws , NA, after = k - 1)
# return(x)
# },
# mc.cores = mc.cores
# )
S <- sapply(SList, function(l)
l$S)
bws_initial <- sapply(SList, function(l)
l$bws)
# ST <- sapply(STotalList, function(l)
# l$S)
# bwsT_initial <- t(sapply(STotalList, function(l)
# l$bws))
if (bootstrap == FALSE) {
return(
list(
S = S,
# ST = ST,
bws = bws_initial
# bwsT = bwsT_initial
))
} else {
meanS <- sapply(SList, function(l)
l$mean)
sderr <- sapply(SList, function(l)
l$sderr)
# meanres <- sapply(SList, function(l)
# l$meanres)
error <- sapply(SList, function(l)
l$error)
# meanT <- sapply(STotalList, function(l)
# l$mean)
# sderrT <- sapply(STotalList, function(l)
# l$sderr)
# errorT <- sapply(STotalList, function(l)
# l$error)
Y_boot <- mapply(
FUN = function(n) {
#browser()
idx <-
matrix(sample(1:n, size = n * p, replace = TRUE), nrow = n)
return(meanS + sderr * error[idx])
},
rep(n, nboot),
SIMPLIFY = FALSE
)
# Y_bootT <- mapply(
# FUN = function(n) {
# #browser()
# idx <-
# matrix(sample(1:n, size = n * p, replace = TRUE), nrow = n)
# return(meanT + sderrT * errorT[idx])
# },
# rep(n, nboot),
# SIMPLIFY = FALSE
# )
# Sboot <- numeric(p)
# STboot <- numeric(p)
# bwsboot <- numeric(p)
# bwsSTboot <- matrix(nrow = p, ncol = p)
#
message("Estimating non-parametric Sobol indices with bootstrap bandwidth")
Sfitboot <- pbmclapply(
X = 1:p,
FUN = function(k) {
cv_opt <-
minqa::bobyqa(
par = bws_initial[k],
fn = BLS,
xdat = X[, k],
ydat = Y,
ydat_boot = Y_boot,
var.index = k,
ckerorder = ckerorder,
#method = "L-BFGS-B"#,
lower = 0,
upper = 10 * bws_initial[k],
# method = "L-BFGS-B",
control = list(iprint = 0)
)
gS <- compute.sobol.indices(
xdat = X[, k],
ydat = Y,
bws = cv_opt$par,
bandwidth.compute = FALSE,
ckerorder = ckerorder,
only.mean = TRUE
)
# Sboot[k] <- var(gS) / var(Y)
# bwsboot[k] <- cv_opt$par
return(list(Sboot = var(gS) / var(Y),
bwsboot = cv_opt$par))
},
mc.cores = mc.cores
)
Sboot <- sapply(Sfitboot, function(x) x$Sboot)
bwsboot <- sapply(Sfitboot, function(x) x$bwsboot)
# for (k in 1:p) {
# cv_opt <-
# minqa::bobyqa(
# par = bws_initial[k],
# fn = BLS,
# xdat = X[, k],
# ydat = Y,
# ydat_boot = Y_boot,
# var.index = k,
# ckerorder = ckerorder,
# #method = "L-BFGS-B"#,
# lower = 0,
# upper = 10 * bws_initial[k],
# # method = "L-BFGS-B",
# control = list(iprint = 2)
# )
#
#
# gS <- compute.sobol.indices(
# xdat = X[, k],
# ydat = Y ,
# bws = cv_opt$par,
# bandwidth.compute = FALSE,
# ckerorder = ckerorder ,
# only.mean = TRUE
# )
# Sboot[k] <- var(gS) / var(Y)
# bwsboot[k] <- cv_opt$par
#
#
# # cv_opt_T <-
# # bobyqa(
# # par = bwsT_initial[k, -k],
# # fn = BLS,
# # xdat = X[, -k],
# # ydat = Y,
# # ydat_boot = Y_bootT,
# # var.index = k,
# # ckerorder = ckerorder,
# # #method = "L-BFGS-B"#,
# # lower = 0,
# # upper = 10 * bwsT_initial[k, -k],
# # control = list(iprint = 2)
# # )
# #
# # gT <- compute.sobol.indices(
# # xdat = X[, -k],
# # ydat = Y ,
# # bws = cv_opt_T$par,
# # bandwidth.compute = FALSE,
# # ckerorder = ckerorder ,
# # only.mean = TRUE
# # )
# # STboot[k] <- 1 - var(gT) / var(Y)
# #
# #
# # bwsSTboot[k, ] <-
# # append(cv_opt_T$par, NA, after = k - 1)
# }
names(S) <- names(Sboot) <- colnames(X)
output[["S"]] <- S
output[["bws"]] <- bws_initial
output[["Sboot"]] <- Sboot
output[["bwsboot"]] <- bwsboot
class(output) <- "sobolnp"
return(output)
}#end-else-bootstrap
}
compute.sobol.indices <-
function(xdat,
ydat,
bws,
bandwidth.compute,
ckerorder,
only.mean = FALSE) {
ghat <- npreg(
npregbw(
bws = bws,
xdat = xdat,
ydat = ydat,
bandwidth.compute = bandwidth.compute,
ckertype = "epanechnikov",
ckerorder = ckerorder,
bwmethod = "cv.ls"
),
residuals = TRUE
)
g <- ghat$mean
if (only.mean) {
return(mean = g)
} else{
bws <- ghat$bw
res <- residuals(ghat)
sderr <- ghat$merr
sderr.idx <- sderr <= 1e-5
#meanres <- mean(res)
error <- (res - mean(res)) / sderr
error[sderr.idx] <- 0
return(list(
S = var(g) / var(ydat),
mean = g,
bws = bws,
sderr = sderr,
#meanres = meanres,
error = error
))
}
}
BLS <-
function(bws,
xdat,
ydat,
ydat_boot,
var.index,
ckerorder) {
#message(bws)
if (any(bws < 0) | any(is.na(bws))) {
return(10e10)
}
mean_boot <- sapply(ydat_boot, function(yboot,
xdat,
bws,
ckerorder) {
g <- compute.sobol.indices(
xdat = xdat,
ydat = yboot[, var.index],
bws = bws,
bandwidth.compute = FALSE,
ckerorder = ckerorder,
only.mean = TRUE
)
return(g)
},
xdat = xdat,
bws = bws,
ckerorder)
return(mean(abs(ydat - rowMeans(mean_boot))))
# delta <- 10
# yerr <- ydat - rowMeans(mean_boot)
# loss <-
# ifelse(abs(yerr) <= delta, 0.5 * yerr ^ 2, delta * (abs(yerr) - delta * 0.5))
# print(mean(loss))
# return(mean(loss))
# return(mean(log(cosh(
#
# ))))
# return(mean((ydat - rowMeans(mean_boot)) ^ 2))
}
|
\name{g1}
\alias{g1}
\title{g1 statistic for Skewness}
\description{A common statistic for skewness. Called by the function sktable}
\usage{g1(x)
}
\arguments{
\item{x}{The variable of interest}}
\details{see Wright and Herrington (2011)}
\references{
Wright, D.B. & Harrington, J.A. (2011, actual in press at the moment).
Problematic standard errors and confidence intervals for skewness and
kurtosis. \emph{Behavior Research Methods}. www2.fiu.edu/~dwright/skewkurt
}
\author{Daniel B. Wright}
\note{While this can be called on its own, it was written to be used by sktable.}
\seealso{sktable}
\examples{
varx <- runif(20)^2
g1(varx)
}
\keyword{skewness}
| /man/g1.Rd | no_license | cran/mrt | R | false | false | 683 | rd | \name{g1}
\alias{g1}
\title{g1 statistic for Skewness}
\description{A common statistic for skewness. Called by the function sktable}
\usage{g1(x)
}
\arguments{
\item{x}{The variable of interest}}
\details{see Wright and Herrington (2011)}
\references{
Wright, D.B. & Harrington, J.A. (2011, actual in press at the moment).
Problematic standard errors and confidence intervals for skewness and
kurtosis. \emph{Behavior Research Methods}. www2.fiu.edu/~dwright/skewkurt
}
\author{Daniel B. Wright}
\note{While this can be called on its own, it was written to be used by sktable.}
\seealso{sktable}
\examples{
varx <- runif(20)^2
g1(varx)
}
\keyword{skewness}
|
# MARSHALL (CENTER FOR SYSTEMIC PEACE) COUP DATA
# 2015-04-29
# Source: Center for Systemic Peace http://www.systemicpeace.org/inscr/CSPCoupsList2014.xls
# Clear workspace
rm(list=ls(all=TRUE))
# Get working directory
wd <- getwd()
# Load required packages and functions
library(XLConnect)
library(reshape)
source(paste0(wd, "/r/f.pitfcodeit.r"))
source(paste0(wd, "/r/f.countryyearrackit.r"))
# Get the data, which is an event file (one row per event), not country-year
csp <- readWorksheetFromFile(paste0(wd, "/data.in/cspcoupslist2014.xls"), sheet=1)
# Cut down to the essentials
csp <- subset(csp, is.na(scode)==FALSE, select=c(scode, year, success))
names(csp) <- c("sftgcode", "year", "success")
# Generate binary versions for each event type
csp$successful <- ifelse(csp$success==1, 1, 0)
csp$failed <- ifelse(csp$success==2, 1, 0)
csp$plot <- ifelse(csp$success==3, 1, 0)
csp$rumor <- ifelse(csp$success==4, 1, 0)
# Generate country-year counts from event list
coupsum.s <- tapply(csp$successful, list(csp$sftgcode, csp$year), sum)
coupsum.f <- tapply(csp$failed, list(csp$sftgcode, csp$year), sum)
coupsum.p <- tapply(csp$plot, list(csp$sftgcode, csp$year), sum)
coupsum.r <- tapply(csp$rumor, list(csp$sftgcode, csp$year), sum)
coup.s <- melt(coupsum.s)
coup.f <- melt(coupsum.f)
coup.p <- melt(coupsum.p)
coup.r <- melt(coupsum.r)
names(coup.s) <- c("sftgcode", "year", "cmm.succ")
names(coup.f) <- c("sftgcode", "year", "cmm.fail")
names(coup.p) <- c("sftgcode", "year", "cmm.plot")
names(coup.r) <- c("sftgcode", "year", "cmm.rumr")
# Generate a complete country-year rectangular file for CSP period of observation (1946-2014) w/PITF codes
rack <- subset(pitfcodeit(countryyearrackit(1946,2014), "country"), select=c(sftgcode, year))
# Merge the CSP country-year sums with that larger file
csp.tscs <- merge(rack, coup.s, all.x = TRUE)
csp.tscs <- merge(csp.tscs, coup.f, all.x = TRUE)
csp.tscs <- merge(csp.tscs, coup.p, all.x = TRUE)
csp.tscs <- merge(csp.tscs, coup.r, all.x = TRUE)
# Fill in missing values with 0s; rack only includes all valid country-years, & tapply of event list
# leaves missing values for country-years that have no events.
csp.tscs[is.na(csp.tscs)] <- 0
# Order by country and year
csp.tscs <- csp.tscs[order(csp.tscs$sftgcode, csp.tscs$year),]
# Write it out
write.csv(csp.tscs, file = paste0(wd, "/data.out/cmm.csv"), row.names = FALSE)
| /R/data.cmm.R | no_license | EarlyWarningProject/2015-Statistical-Risk-Assessment | R | false | false | 2,392 | r | # MARSHALL (CENTER FOR SYSTEMIC PEACE) COUP DATA
# 2015-04-29
# Source: Center for Systemic Peace http://www.systemicpeace.org/inscr/CSPCoupsList2014.xls
# Clear workspace
rm(list=ls(all=TRUE))
# Get working directory
wd <- getwd()
# Load required packages and functions
library(XLConnect)
library(reshape)
source(paste0(wd, "/r/f.pitfcodeit.r"))
source(paste0(wd, "/r/f.countryyearrackit.r"))
# Get the data, which is an event file (one row per event), not country-year
csp <- readWorksheetFromFile(paste0(wd, "/data.in/cspcoupslist2014.xls"), sheet=1)
# Cut down to the essentials
csp <- subset(csp, is.na(scode)==FALSE, select=c(scode, year, success))
names(csp) <- c("sftgcode", "year", "success")
# Generate binary versions for each event type
csp$successful <- ifelse(csp$success==1, 1, 0)
csp$failed <- ifelse(csp$success==2, 1, 0)
csp$plot <- ifelse(csp$success==3, 1, 0)
csp$rumor <- ifelse(csp$success==4, 1, 0)
# Generate country-year counts from event list
coupsum.s <- tapply(csp$successful, list(csp$sftgcode, csp$year), sum)
coupsum.f <- tapply(csp$failed, list(csp$sftgcode, csp$year), sum)
coupsum.p <- tapply(csp$plot, list(csp$sftgcode, csp$year), sum)
coupsum.r <- tapply(csp$rumor, list(csp$sftgcode, csp$year), sum)
coup.s <- melt(coupsum.s)
coup.f <- melt(coupsum.f)
coup.p <- melt(coupsum.p)
coup.r <- melt(coupsum.r)
names(coup.s) <- c("sftgcode", "year", "cmm.succ")
names(coup.f) <- c("sftgcode", "year", "cmm.fail")
names(coup.p) <- c("sftgcode", "year", "cmm.plot")
names(coup.r) <- c("sftgcode", "year", "cmm.rumr")
# Generate a complete country-year rectangular file for CSP period of observation (1946-2014) w/PITF codes
rack <- subset(pitfcodeit(countryyearrackit(1946,2014), "country"), select=c(sftgcode, year))
# Merge the CSP country-year sums with that larger file
csp.tscs <- merge(rack, coup.s, all.x = TRUE)
csp.tscs <- merge(csp.tscs, coup.f, all.x = TRUE)
csp.tscs <- merge(csp.tscs, coup.p, all.x = TRUE)
csp.tscs <- merge(csp.tscs, coup.r, all.x = TRUE)
# Fill in missing values with 0s; rack only includes all valid country-years, & tapply of event list
# leaves missing values for country-years that have no events.
csp.tscs[is.na(csp.tscs)] <- 0
# Order by country and year
csp.tscs <- csp.tscs[order(csp.tscs$sftgcode, csp.tscs$year),]
# Write it out
write.csv(csp.tscs, file = paste0(wd, "/data.out/cmm.csv"), row.names = FALSE)
|
#Robert A Brown
#CS 581 ~ Homework 01
#University of Rhode Island
learner <- function(training.df) {
if (!is.data.frame(training.df))
stop("not a data frame")
n <- ncol(training.df)
target.attribute <- training.df[[n]]
target.levels <- table(target.attribute)
ix <- which.max(target.levels)
majority.label <- names(target.levels[ix])
function(x) majority.label
}
df <- read.csv('data/mammals.csv')
write("\nexpected value from model 'trained' on mammals.csv is 'false'", stdout())
write("actual:", stdout())
m <-learner(df)
m(df)
df <- read.csv('data/biomed.csv')
write("\nexpected value from model 'trained' on biomed.csv is 'MI'", stdout())
write("actual:", stdout())
m <-learner(df)
m(df)
| /01/1_4.r | no_license | rab170/SVMs | R | false | false | 715 | r | #Robert A Brown
#CS 581 ~ Homework 01
#University of Rhode Island
learner <- function(training.df) {
if (!is.data.frame(training.df))
stop("not a data frame")
n <- ncol(training.df)
target.attribute <- training.df[[n]]
target.levels <- table(target.attribute)
ix <- which.max(target.levels)
majority.label <- names(target.levels[ix])
function(x) majority.label
}
df <- read.csv('data/mammals.csv')
write("\nexpected value from model 'trained' on mammals.csv is 'false'", stdout())
write("actual:", stdout())
m <-learner(df)
m(df)
df <- read.csv('data/biomed.csv')
write("\nexpected value from model 'trained' on biomed.csv is 'MI'", stdout())
write("actual:", stdout())
m <-learner(df)
m(df)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/guardduty_operations.R
\name{guardduty_delete_members}
\alias{guardduty_delete_members}
\title{Deletes GuardDuty member accounts (to the current GuardDuty
administrator account) specified by the account IDs}
\usage{
guardduty_delete_members(DetectorId, AccountIds)
}
\arguments{
\item{DetectorId}{[required] The unique ID of the detector of the GuardDuty account whose members you
want to delete.}
\item{AccountIds}{[required] A list of account IDs of the GuardDuty member accounts that you want to
delete.}
}
\description{
Deletes GuardDuty member accounts (to the current GuardDuty
administrator account) specified by the account IDs.
}
\section{Request syntax}{
\preformatted{svc$delete_members(
DetectorId = "string",
AccountIds = list(
"string"
)
)
}
}
\keyword{internal}
| /cran/paws.security.identity/man/guardduty_delete_members.Rd | permissive | sanchezvivi/paws | R | false | true | 867 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/guardduty_operations.R
\name{guardduty_delete_members}
\alias{guardduty_delete_members}
\title{Deletes GuardDuty member accounts (to the current GuardDuty
administrator account) specified by the account IDs}
\usage{
guardduty_delete_members(DetectorId, AccountIds)
}
\arguments{
\item{DetectorId}{[required] The unique ID of the detector of the GuardDuty account whose members you
want to delete.}
\item{AccountIds}{[required] A list of account IDs of the GuardDuty member accounts that you want to
delete.}
}
\description{
Deletes GuardDuty member accounts (to the current GuardDuty
administrator account) specified by the account IDs.
}
\section{Request syntax}{
\preformatted{svc$delete_members(
DetectorId = "string",
AccountIds = list(
"string"
)
)
}
}
\keyword{internal}
|
dml_pliv_partial_z = function(data, y, d, z,
n_folds,
ml_r,
dml_procedure, score,
n_rep = 1, smpls=NULL,
params_r = NULL) {
if (is.null(smpls)) {
smpls = lapply(1:n_rep, function(x) sample_splitting(n_folds, data))
}
all_thetas = all_ses = rep(NA, n_rep)
all_preds = list()
for (i_rep in 1:n_rep) {
this_smpl = smpls[[i_rep]]
all_preds[[i_rep]] = fit_nuisance_pliv_partial_z(data, y, d, z,
ml_r,
this_smpl,
params_r)
residuals = compute_pliv_partial_z_residuals(data, y, d, z, n_folds,
this_smpl,
all_preds[[i_rep]])
r_hat = residuals$r_hat
D = data[, d]
Y = data[, y]
# DML 1
if (dml_procedure == "dml1") {
thetas = vars = rep(NA, n_folds)
for (i in 1:n_folds) {
test_index = this_smpl$test_ids[[i]]
orth_est = orth_pliv_partial_z_dml(
r_hat = r_hat[test_index],
y = Y[test_index],
d = D[test_index],
score = score)
thetas[i] = orth_est$theta
}
all_thetas[i_rep] = mean(thetas, na.rm = TRUE)
if (length(this_smpl$train_ids) == 1) {
r_hat = r_hat[test_index]
Y = Y[test_index]
D = D[test_index]
}
}
if (dml_procedure == "dml2") {
orth_est = orth_pliv_partial_z_dml(
r_hat = r_hat, y = Y, d = D,
score = score)
all_thetas[i_rep] = orth_est$theta
}
all_ses[i_rep] = sqrt(var_pliv_partial_z(
theta = all_thetas[i_rep], r_hat = r_hat, y = Y, d = D,
score = score))
}
theta = stats::median(all_thetas)
if (length(this_smpl$train_ids) > 1) {
n = nrow(data)
} else {
n = length(this_smpl$test_ids[[1]])
}
se = se_repeated(all_ses*sqrt(n), all_thetas, theta)/sqrt(n)
t = theta / se
pval = 2 * stats::pnorm(-abs(t))
names(theta) = names(se) = d
res = list(
coef = theta, se = se, t = t, pval = pval,
thetas = all_thetas, ses = all_ses,
all_preds=all_preds, smpls=smpls)
return(res)
}
fit_nuisance_pliv_partial_z = function(data, y, d, z,
ml_r,
smpls,
params_r) {
train_ids = smpls$train_ids
test_ids = smpls$test_ids
# nuisance r: E[D|X]
r_indx = names(data) != y
data_r = data[, r_indx, drop = FALSE]
task_r = mlr3::TaskRegr$new(id = paste0("nuis_r_", d), backend = data_r, target = d)
if (!is.null(params_r)) {
ml_r$param_set$values = params_r
}
resampling_r = mlr3::rsmp("custom")
resampling_r$instantiate(task_r, train_ids, test_ids)
r_r = mlr3::resample(task_r, ml_r, resampling_r, store_models = TRUE)
r_hat_list = lapply(r_r$predictions(), function(x) x$response)
all_preds = list(
r_hat_list = r_hat_list)
return(all_preds)
}
compute_pliv_partial_z_residuals = function(data, y, d, z, n_folds, smpls,
all_preds) {
test_ids = smpls$test_ids
r_hat_list = all_preds$r_hat_list
n = nrow(data)
r_hat = rep(NA, n)
for (i in 1:n_folds) {
test_index = test_ids[[i]]
r_hat[test_index] = r_hat_list[[i]]
}
residuals = list(r_hat=r_hat)
return(residuals)
}
orth_pliv_partial_z_dml = function(r_hat, y, d, score) {
stopifnot(score == "partialling out")
theta = mean(r_hat * y) / mean(r_hat * d)
res = list(theta = theta)
return(res)
}
var_pliv_partial_z = function(theta, r_hat, y, d, score) {
stopifnot(score == "partialling out")
var = mean(1 / length(r_hat) * 1 / (mean(r_hat * d))^2 *
mean(((y - d * theta) * r_hat)^2))
return(c(var))
}
bootstrap_pliv_partial_z = function(theta, se, data, y, d, z, n_folds, smpls,
all_preds, bootstrap,
n_rep_boot, n_rep=1) {
for (i_rep in 1:n_rep) {
residuals = compute_pliv_partial_z_residuals(data, y, d, z, n_folds,
smpls[[i_rep]],
all_preds[[i_rep]])
r_hat = residuals$r_hat
D = data[, d]
Y = data[, y]
psi = (Y - D * theta[i_rep]) * r_hat
psi_a = - r_hat * D
n = length(psi)
weights = draw_bootstrap_weights(bootstrap, n_rep_boot, n)
this_res = functional_bootstrap(theta[i_rep], se[i_rep], psi, psi_a, n_folds,
smpls[[i_rep]],
n_rep_boot, weights)
if (i_rep==1) {
boot_res = this_res
} else {
boot_res$boot_coef = cbind(boot_res$boot_coef, this_res$boot_coef)
boot_res$boot_t_stat = cbind(boot_res$boot_t_stat, this_res$boot_t_stat)
}
}
return(boot_res)
}
| /tests/testthat/helper-14-dml_pliv_partial_z.R | no_license | k-segiet/doubleml-for-r | R | false | false | 5,088 | r | dml_pliv_partial_z = function(data, y, d, z,
n_folds,
ml_r,
dml_procedure, score,
n_rep = 1, smpls=NULL,
params_r = NULL) {
if (is.null(smpls)) {
smpls = lapply(1:n_rep, function(x) sample_splitting(n_folds, data))
}
all_thetas = all_ses = rep(NA, n_rep)
all_preds = list()
for (i_rep in 1:n_rep) {
this_smpl = smpls[[i_rep]]
all_preds[[i_rep]] = fit_nuisance_pliv_partial_z(data, y, d, z,
ml_r,
this_smpl,
params_r)
residuals = compute_pliv_partial_z_residuals(data, y, d, z, n_folds,
this_smpl,
all_preds[[i_rep]])
r_hat = residuals$r_hat
D = data[, d]
Y = data[, y]
# DML 1
if (dml_procedure == "dml1") {
thetas = vars = rep(NA, n_folds)
for (i in 1:n_folds) {
test_index = this_smpl$test_ids[[i]]
orth_est = orth_pliv_partial_z_dml(
r_hat = r_hat[test_index],
y = Y[test_index],
d = D[test_index],
score = score)
thetas[i] = orth_est$theta
}
all_thetas[i_rep] = mean(thetas, na.rm = TRUE)
if (length(this_smpl$train_ids) == 1) {
r_hat = r_hat[test_index]
Y = Y[test_index]
D = D[test_index]
}
}
if (dml_procedure == "dml2") {
orth_est = orth_pliv_partial_z_dml(
r_hat = r_hat, y = Y, d = D,
score = score)
all_thetas[i_rep] = orth_est$theta
}
all_ses[i_rep] = sqrt(var_pliv_partial_z(
theta = all_thetas[i_rep], r_hat = r_hat, y = Y, d = D,
score = score))
}
theta = stats::median(all_thetas)
if (length(this_smpl$train_ids) > 1) {
n = nrow(data)
} else {
n = length(this_smpl$test_ids[[1]])
}
se = se_repeated(all_ses*sqrt(n), all_thetas, theta)/sqrt(n)
t = theta / se
pval = 2 * stats::pnorm(-abs(t))
names(theta) = names(se) = d
res = list(
coef = theta, se = se, t = t, pval = pval,
thetas = all_thetas, ses = all_ses,
all_preds=all_preds, smpls=smpls)
return(res)
}
fit_nuisance_pliv_partial_z = function(data, y, d, z,
ml_r,
smpls,
params_r) {
train_ids = smpls$train_ids
test_ids = smpls$test_ids
# nuisance r: E[D|X]
r_indx = names(data) != y
data_r = data[, r_indx, drop = FALSE]
task_r = mlr3::TaskRegr$new(id = paste0("nuis_r_", d), backend = data_r, target = d)
if (!is.null(params_r)) {
ml_r$param_set$values = params_r
}
resampling_r = mlr3::rsmp("custom")
resampling_r$instantiate(task_r, train_ids, test_ids)
r_r = mlr3::resample(task_r, ml_r, resampling_r, store_models = TRUE)
r_hat_list = lapply(r_r$predictions(), function(x) x$response)
all_preds = list(
r_hat_list = r_hat_list)
return(all_preds)
}
compute_pliv_partial_z_residuals = function(data, y, d, z, n_folds, smpls,
all_preds) {
test_ids = smpls$test_ids
r_hat_list = all_preds$r_hat_list
n = nrow(data)
r_hat = rep(NA, n)
for (i in 1:n_folds) {
test_index = test_ids[[i]]
r_hat[test_index] = r_hat_list[[i]]
}
residuals = list(r_hat=r_hat)
return(residuals)
}
orth_pliv_partial_z_dml = function(r_hat, y, d, score) {
stopifnot(score == "partialling out")
theta = mean(r_hat * y) / mean(r_hat * d)
res = list(theta = theta)
return(res)
}
var_pliv_partial_z = function(theta, r_hat, y, d, score) {
stopifnot(score == "partialling out")
var = mean(1 / length(r_hat) * 1 / (mean(r_hat * d))^2 *
mean(((y - d * theta) * r_hat)^2))
return(c(var))
}
bootstrap_pliv_partial_z = function(theta, se, data, y, d, z, n_folds, smpls,
all_preds, bootstrap,
n_rep_boot, n_rep=1) {
for (i_rep in 1:n_rep) {
residuals = compute_pliv_partial_z_residuals(data, y, d, z, n_folds,
smpls[[i_rep]],
all_preds[[i_rep]])
r_hat = residuals$r_hat
D = data[, d]
Y = data[, y]
psi = (Y - D * theta[i_rep]) * r_hat
psi_a = - r_hat * D
n = length(psi)
weights = draw_bootstrap_weights(bootstrap, n_rep_boot, n)
this_res = functional_bootstrap(theta[i_rep], se[i_rep], psi, psi_a, n_folds,
smpls[[i_rep]],
n_rep_boot, weights)
if (i_rep==1) {
boot_res = this_res
} else {
boot_res$boot_coef = cbind(boot_res$boot_coef, this_res$boot_coef)
boot_res$boot_t_stat = cbind(boot_res$boot_t_stat, this_res$boot_t_stat)
}
}
return(boot_res)
}
|
# ---------------------------------------------------------------------------- #
# Merge FinStress, WDI, OECD, Run basic regressions on FinStress Variance
# Christopher Gandrud
# MIT LICENSE
# ---------------------------------------------------------------------------- #
# Load required packages
library(simpleSetup)
pkgs <- c('rio', 'dplyr', 'lubridate', 'DataCombine',
'countrycode', 'WDI', 'plm', 'stargazer', 'tseries')
library_install(pkgs)
# Set working directory
possibles <- c('/git_repositories/predicting_finstress/analysis_data')
set_valid_wd(possibles)
# Load FinStress -------------------------------------------------
FinStress <- rio::import(
"https://raw.githubusercontent.com/christophergandrud/EIUCrisesMeasure/master/data/FinStress.csv")
# Annual data --------
FinStress$year <- year(FinStress$date)
finstress <- FinStress %>% select(iso3c, date, year, FinStress) %>%
rename(finstress = FinStress)
finstress$iso2c <- countrycode(finstress$iso3c, origin = 'iso3c',
destination = 'iso2c', warn = TRUE)
# Annual mean
finstress_yr_mean <- finstress %>% group_by(iso2c, year) %>%
summarise(finstress_mean = mean(finstress, na.rm = T))
# Annual variance
finstress_yr_var <- finstress %>% group_by(iso2c, year) %>%
summarise(finstress_var = var(finstress, na.rm = T))
# Annual variance
finstress_yr_sd <- finstress %>% group_by(iso2c, year) %>%
summarise(finstress_sd = sd(finstress, na.rm = T))
finstress_yr <- merge(finstress_yr_mean, finstress_yr_var,
by = c('iso2c', 'year'), all = T)
finstress_yr <- merge(finstress_yr, finstress_yr_sd,
by = c('iso2c', 'year'), all = T)
# rescale to make coefficients more easily interpretable
finstress_yr$finstress_var <- finstress_yr$finstress_var * 1000
finstress_yr <- finstress_yr %>% arrange(iso2c, year)
FindDups(finstress_yr, Vars = c('iso2c', 'year'))
# Check stationarity of FinStress Var
for (i in unique(finstress_yr$iso2c)) {
message(i)
sub <- subset(finstress_yr, iso2c == i)
sub <- sub[complete.cases(sub), ]
print(adf.test(sub$finstress_var))
}
# Lags and leads
finstress_yr <- slide(finstress_yr, Var = 'finstress_var', GroupVar = 'iso2c',
NewVar = 'finstress_var_lead1yr', slideBy = 1)
finstress_yr <- slide(finstress_yr, Var = 'finstress_sd', GroupVar = 'iso2c',
NewVar = 'finstress_sd_lead1yr', slideBy = 1)
finstress_yr <- slide(finstress_yr, Var = 'finstress_var', GroupVar = 'iso2c',
NewVar = 'finstress_var_lag1yr', slideBy = -1)
finstress_yr <- slide(finstress_yr, Var = 'finstress_sd', GroupVar = 'iso2c',
NewVar = 'finstress_sd_lag1yr', slideBy = -1)
finstress_yr <- slide(finstress_yr, Var = 'finstress_mean', GroupVar = 'iso2c',
NewVar = 'finstress_mean_lead1yr', slideBy = 1)
finstress_yr <- slide(finstress_yr, Var = 'finstress_mean', GroupVar = 'iso2c',
NewVar = 'finstress_mean_lag1yr', slideBy = -1)
# Download WDI gdp change & Stock Price Volatility -----------------------------
wdi <- WDI(indicator = c('NY.GDP.MKTP.KD.ZG', 'PA.NUS.FCRF', 'GFDD.SM.01',
'GFDD.OM.02'),
start = 2000, end = 2013, extra = T) %>%
rename(gdp_growth = NY.GDP.MKTP.KD.ZG) %>%
rename(exchange_rate_usd = PA.NUS.FCRF) %>%
rename(stock_price_volatility = GFDD.SM.01) %>%
rename(stock_returns = GFDD.OM.02)
# Drop poorly coded CV
wdi <- wdi %>% filter(iso2c != 'CV')
# Financial Fragility Indicators from Andrianova et al. (2015) -----------------
ff <- import('raw_data/Financial Fragility Database Stata.dta') %>%
select(-countryname, -countryid) %>%
dplyr::rename(iso2c = countrycode)
ff$log_imploans <- log(ff$ImpLoans)
# Laeven and Valencia Banking Crisis Dummy -------------------------------------
lv <- import('https://raw.githubusercontent.com/christophergandrud/EIUCrisesMeasure/master/data/alternative_measures/cleaned/laeven_valencia_banking_crisis.csv')
lv <- slide(lv, Var = 'lv_bank_crisis', GroupVar = 'iso2c',
NewVar = 'lv_lead1yr', slideBy = 1)
# Merge ------------------------------------------------------------------------
comb <- merge(finstress_yr, wdi, by = c('iso2c', 'year'), all.x = T)
comb <- merge(comb, ff, by = c('iso2c', 'year'), all.x = T)
comb <- merge(comb, lv, by = c('iso2c', 'year'), all.x = T)
comb <- FindDups(comb, c('iso2c', 'year'), NotDups = TRUE)
comb <- comb %>% filter(!is.na(iso2c))
# Save basic data ---------
export(comb, file = 'combined_data.csv')
comb <- import('combined_data.csv')
comb_high <- comb %>% filter(income == 'High income: OECD')
# Simple regression model ------------------------------------------------------
# Full sample --
## GDP and FinStress
comb_pd <- pdata.frame(comb, index = c('iso2c', 'year'))
mfull_1 <- plm(finstress_var_lead1yr ~ finstress_var + gdp_growth,
data = comb_pd)
mfull_2 <- plm(finstress_var_lead1yr ~ finstress_var + gdp_growth +
finstress_mean, data = comb_pd)
mfull_3 <- plm(finstress_var_lead1yr ~ finstress_var + finstress_mean +
stock_price_volatility, data = comb_pd)
## CAMELS
mfull_4 <- plm(finstress_var_lead1yr ~ finstress_var + log_imploans,
data = comb_pd)
# Using Stand. Dev. instead of Variance
mfull_1_sd <- plm(finstress_sd_lead1yr ~ finstress_sd + gdp_growth,
data = comb_pd)
mfull_2_sd <- plm(finstress_sd_lead1yr ~ finstress_sd + gdp_growth +
finstress_mean, data = comb_pd)
mfull_3_sd <- plm(finstress_sd_lead1yr ~ finstress_sd + finstress_mean +
stock_price_volatility, data = comb_pd)
mfull_4_sd <- plm(finstress_sd_lead1yr ~ finstress_sd + log_imploans,
data = comb_pd)
mfull_5_sd <- plm(finstress_sd_lead1yr ~ finstress_sd + Liquid,
data = comb_pd)
# High Income
## GDP and FinStress
comb_high_pd <- pdata.frame(comb_high, index = c('iso2c', 'year'))
moecd_1 <- plm(finstress_var_lead1yr ~ finstress_var + gdp_growth,
data = comb_high_pd)
moecd_2 <- plm(finstress_var_lead1yr ~ finstress_var + gdp_growth +
finstress_mean, data = comb_high_pd)
moecd_3 <- plm(finstress_var_lead1yr ~ finstress_var +
finstress_mean + stock_price_volatility,
data = comb_high_pd)
## CAMELS
moecd_4 <- plm(finstress_var_lead1yr ~ finstress_var + log_imploans,
data = comb_high_pd)
# Using Stand. Dev. instead of Variance
mfull_1_sd <- plm(finstress_sd_lead1yr ~ finstress_sd + gdp_growth,
data = comb_pd)
mfull_2_sd <- plm(finstress_sd_lead1yr ~ finstress_sd + gdp_growth +
finstress_mean, data = comb_pd)
mfull_3_sd <- plm(finstress_sd_lead1yr ~ finstress_sd + log_imploans,
data = comb_pd)
moecd_1_sd <- plm(finstress_sd_lead1yr ~ finstress_sd + gdp_growth,
data = comb_high_pd)
moecd_2_sd <- plm(finstress_sd_lead1yr ~ finstress_sd + gdp_growth +
finstress_mean, data = comb_high_pd)
moecd_3_sd <- plm(finstress_sd_lead1yr ~ finstress_sd + log_imploans,
data = comb_high_pd)
## No lagged DV ------------
mfull_nolagdv_1 <- plm(finstress_var_lead1yr ~ gdp_growth,
data = comb_pd)
mfull_nolagdv_2 <- plm(finstress_var_lead1yr ~ gdp_growth +
finstress_mean, data = comb_pd)
mfull_nolagdv_3 <- plm(finstress_var_lead1yr ~ finstress_mean +
stock_price_volatility, data = comb_pd)
mfull_nolagdv_4 <- plm(finstress_var_lead1yr ~ log_imploans,
data = comb_pd)
moecd_nolagdv_1 <- plm(finstress_var_lead1yr ~ gdp_growth,
data = comb_high_pd)
moecd_nolagdv_2 <- plm(finstress_var_lead1yr ~ gdp_growth +
finstress_mean, data = comb_high_pd)
moecd_nolagdv_3 <- plm(finstress_var_lead1yr ~ finstress_mean + stock_price_volatility,
data = comb_high_pd)
moecd_nolagdv_4 <- plm(finstress_var_lead1yr ~ log_imploans,
data = comb_high_pd)
## Annual For Paper ------
stargazer(mfull_1, mfull_2, mfull_3, mfull_4,
moecd_1, moecd_2, moecd_3, moecd_4,
type = 'latex',
dep.var.labels = 'Var(FinStress)$_{year+1}$',
covariate.labels = c('Var(FinStress)$_{year+0}$',
'GDP Growth (\\%)', 'FinStress Mean$_{year}$',
'Stock Price Volatility',
'Impaired Loans (log)') ,
column.labels = c(rep('Full Sample', 4), rep('OECD', 4)),
add.lines = list(c('Fixed Effects', rep('y', 8))),
label = 'annual_reg',
title = 'Regression result from predicting FinStress Variance using annual explanatory variable data',
out = 'results_tables/annual_regressions.tex',
font.size = 'tiny',
omit.stat = 'f'
)
## Annual No Lagged DV For Paper ------
stargazer(mfull_nolagdv_1, mfull_nolagdv_2, mfull_nolagdv_3, mfull_nolagdv_4,
moecd_nolagdv_1, moecd_nolagdv_2, moecd_nolagdv_3, moecd_nolagdv_4,
type = 'latex',
dep.var.labels = 'Var(FinStress)$_{year+1}$',
covariate.labels = c('GDP Growth (\\%)', 'FinStress Mean$_{year}$',
'Stock Price Volatility',
'Impaired Loans (log)') ,
column.labels = c(rep('Full Sample', 4), rep('OECD', 4)),
add.lines = list(c('Fixed Effects', rep('y', 8))),
label = 'annual_reg_nolag_dv',
title = 'Regression result from predicting FinStress Variance using annual explanatory variable data (without lagged dependent variable)',
out = 'results_tables/annual_noDVlag_regressions.tex',
font.size = 'tiny',
omit.stat = 'f'
)
## Annual For Presentation ------
stargazer(mfull_1, mfull_2, mfull_3, mfull_4,
type = 'latex',
dep.var.labels = 'Var(FinStress)$_{year+1}$',
covariate.labels = c('Var(FinStress)$_{year+0}$',
'GDP Growth (\\%)', 'FinStress Mean$_{year}$',
'Stock Price Volatility',
'Impaired Loans (log)') ,
column.labels = c(rep('Full Sample', 4)),
add.lines = list(c('Fixed Effects', rep('y', 4))),
label = 'annual_reg',
title = 'Regression result from predicting FinStress Variance using annual explanatory variable data (Full Sample)',
out = 'results_tables/annual_regressions_all.tex',
font.size = 'tiny',
omit.stat = 'f'
)
## Annual For Paper ------
stargazer(moecd_1, moecd_2, moecd_3, moecd_4,
type = 'latex',
dep.var.labels = 'Var(FinStress)$_{year+1}$',
covariate.labels = c('Var(FinStress)$_{year+0}$',
'GDP Growth (\\%)', 'FinStress Mean$_{year}$',
'Stock Price Volatility',
'Impaired Loans (log)', 'Liquid Assets Ratio') ,
column.labels = rep('OECD', 4),
add.lines = list(c('Fixed Effects', rep('y', 4))),
label = 'annual_reg',
title = 'Regression result from predicting FinStress Variance using annual explanatory variable data (OECD Sample)',
out = 'results_tables/annual_regressions_oecd.tex',
font.size = 'tiny',
omit.stat = 'f'
)
#------------------------------------------------------------------------------#
# Quarterly data ----------------------
finstress <- FinStress %>% select(iso2c, date, FinStress) %>%
rename(finstress = FinStress) %>% rename(quarter = date)
finstress$quarter <- quarter(finstress$quarter, with_year = T)
# Quarterly mean
finstress_qt_mean <- finstress %>% group_by(iso2c, quarter) %>%
summarise(finstress_mean = mean(finstress, na.rm = T))
# Quarterly variance
finstress_qt <- finstress %>% group_by(iso2c, quarter) %>%
summarise(finstress_var = var(finstress, na.rm = T))
finstress_qt <- merge(finstress_qt_mean, finstress_qt,
by = c('iso2c', 'quarter'), all = T)
# rescale to make coefficients more easily interpretable
finstress_qt$finstress_var <- finstress_qt$finstress_var * 1000
finstress_qt <- finstress_qt %>% arrange(iso2c, quarter)
FindDups(finstress_qt, Vars = c('iso2c', 'quarter'))
finstress_qt <- slide(finstress_qt, Var = 'finstress_var', GroupVar = 'iso2c',
NewVar = 'finstress_var_lead1qt', slideBy = 1)
finstress_qt <- slide(finstress_qt, Var = 'finstress_var', GroupVar = 'iso2c',
NewVar = 'finstress_var_lag1qt', slideBy = -1)
# Load quarterly gdp growth (seasonally adjusted): Originally downloaded from:
# https://stats.oecd.org
oecd <- import('raw_data/QNA_06112015123914289.csv')
oecd <- oecd %>%
filter(Measure == "Growth rate compared to the same quarter of previous year, seasonally adjusted" &
Subject == 'Gross domestic product - expenditure approach')
oecd <- oecd[, c(2, 9, 17)]
names(oecd) <- c('country', 'quarter', 'gdp_growth_oecd')
oecd$quarter <- gsub('-Q', '\\.', oecd$quarter)
oecd$iso2c <- countrycode(oecd$country, origin = 'country.name',
destination = 'iso2c')
oecd <- oecd %>% select(-country)
FindDups(oecd, c('iso2c', 'quarter'))
# Merge together
comb_qt <- merge(oecd, finstress_qt, by = c('iso2c', 'quarter'))
comb_qt <- DropNA(comb_qt, 'iso2c') # NA is Euro area
# Quarterly regressions -------
comb_qt_pd <- pdata.frame(comb_qt, index = c('iso2c', 'quarter'))
mqt_1 <- plm(finstress_var_lead1qt ~ finstress_var + gdp_growth_oecd,
data = comb_qt_pd)
mqt_2 <- plm(finstress_var_lead1qt ~ finstress_var + gdp_growth_oecd +
finstress_mean,
data = comb_qt_pd)
stargazer(mqt_1, mqt_2, type = 'latex',
dep.var.labels = 'Var(FinStress)$_{quarter+1}$',
covariate.labels = c('Var(FinStress)$_{quarter + 0}$',
'GDP Growth (\\%)',
'FinStress Mean$_{quarter + 0}$'),
add.lines = list(c('Fixed Effects', 'y', 'y')),
label = 'quarterly_reg',
title = 'Regression result from predicting FinStress Variance using quarterly explanatory variable data (OECD only)',
out = 'results_tables/quarterly_regressions.tex',
font.size = 'small',
omit.stat = 'f'
)
| /B_analysts_sources_github/christophergandrud/predicting_finstress/finstress_predict_v1.R | no_license | Irbis3/crantasticScrapper | R | false | false | 14,546 | r | # ---------------------------------------------------------------------------- #
# Merge FinStress, WDI, OECD, Run basic regressions on FinStress Variance
# Christopher Gandrud
# MIT LICENSE
# ---------------------------------------------------------------------------- #
# Load required packages
library(simpleSetup)
pkgs <- c('rio', 'dplyr', 'lubridate', 'DataCombine',
'countrycode', 'WDI', 'plm', 'stargazer', 'tseries')
library_install(pkgs)
# Set working directory
possibles <- c('/git_repositories/predicting_finstress/analysis_data')
set_valid_wd(possibles)
# Load FinStress -------------------------------------------------
FinStress <- rio::import(
"https://raw.githubusercontent.com/christophergandrud/EIUCrisesMeasure/master/data/FinStress.csv")
# Annual data --------
FinStress$year <- year(FinStress$date)
finstress <- FinStress %>% select(iso3c, date, year, FinStress) %>%
rename(finstress = FinStress)
finstress$iso2c <- countrycode(finstress$iso3c, origin = 'iso3c',
destination = 'iso2c', warn = TRUE)
# Annual mean
finstress_yr_mean <- finstress %>% group_by(iso2c, year) %>%
summarise(finstress_mean = mean(finstress, na.rm = T))
# Annual variance
finstress_yr_var <- finstress %>% group_by(iso2c, year) %>%
summarise(finstress_var = var(finstress, na.rm = T))
# Annual variance
finstress_yr_sd <- finstress %>% group_by(iso2c, year) %>%
summarise(finstress_sd = sd(finstress, na.rm = T))
finstress_yr <- merge(finstress_yr_mean, finstress_yr_var,
by = c('iso2c', 'year'), all = T)
finstress_yr <- merge(finstress_yr, finstress_yr_sd,
by = c('iso2c', 'year'), all = T)
# rescale to make coefficients more easily interpretable
finstress_yr$finstress_var <- finstress_yr$finstress_var * 1000
finstress_yr <- finstress_yr %>% arrange(iso2c, year)
FindDups(finstress_yr, Vars = c('iso2c', 'year'))
# Check stationarity of FinStress Var
for (i in unique(finstress_yr$iso2c)) {
message(i)
sub <- subset(finstress_yr, iso2c == i)
sub <- sub[complete.cases(sub), ]
print(adf.test(sub$finstress_var))
}
# Lags and leads
finstress_yr <- slide(finstress_yr, Var = 'finstress_var', GroupVar = 'iso2c',
NewVar = 'finstress_var_lead1yr', slideBy = 1)
finstress_yr <- slide(finstress_yr, Var = 'finstress_sd', GroupVar = 'iso2c',
NewVar = 'finstress_sd_lead1yr', slideBy = 1)
finstress_yr <- slide(finstress_yr, Var = 'finstress_var', GroupVar = 'iso2c',
NewVar = 'finstress_var_lag1yr', slideBy = -1)
finstress_yr <- slide(finstress_yr, Var = 'finstress_sd', GroupVar = 'iso2c',
NewVar = 'finstress_sd_lag1yr', slideBy = -1)
finstress_yr <- slide(finstress_yr, Var = 'finstress_mean', GroupVar = 'iso2c',
NewVar = 'finstress_mean_lead1yr', slideBy = 1)
finstress_yr <- slide(finstress_yr, Var = 'finstress_mean', GroupVar = 'iso2c',
NewVar = 'finstress_mean_lag1yr', slideBy = -1)
# Download WDI gdp change & Stock Price Volatility -----------------------------
wdi <- WDI(indicator = c('NY.GDP.MKTP.KD.ZG', 'PA.NUS.FCRF', 'GFDD.SM.01',
'GFDD.OM.02'),
start = 2000, end = 2013, extra = T) %>%
rename(gdp_growth = NY.GDP.MKTP.KD.ZG) %>%
rename(exchange_rate_usd = PA.NUS.FCRF) %>%
rename(stock_price_volatility = GFDD.SM.01) %>%
rename(stock_returns = GFDD.OM.02)
# Drop poorly coded CV
wdi <- wdi %>% filter(iso2c != 'CV')
# Financial Fragility Indicators from Andrianova et al. (2015) -----------------
ff <- import('raw_data/Financial Fragility Database Stata.dta') %>%
select(-countryname, -countryid) %>%
dplyr::rename(iso2c = countrycode)
ff$log_imploans <- log(ff$ImpLoans)
# Laeven and Valencia Banking Crisis Dummy -------------------------------------
lv <- import('https://raw.githubusercontent.com/christophergandrud/EIUCrisesMeasure/master/data/alternative_measures/cleaned/laeven_valencia_banking_crisis.csv')
lv <- slide(lv, Var = 'lv_bank_crisis', GroupVar = 'iso2c',
NewVar = 'lv_lead1yr', slideBy = 1)
# Merge ------------------------------------------------------------------------
comb <- merge(finstress_yr, wdi, by = c('iso2c', 'year'), all.x = T)
comb <- merge(comb, ff, by = c('iso2c', 'year'), all.x = T)
comb <- merge(comb, lv, by = c('iso2c', 'year'), all.x = T)
comb <- FindDups(comb, c('iso2c', 'year'), NotDups = TRUE)
comb <- comb %>% filter(!is.na(iso2c))
# Save basic data ---------
export(comb, file = 'combined_data.csv')
comb <- import('combined_data.csv')
comb_high <- comb %>% filter(income == 'High income: OECD')
# Simple regression model ------------------------------------------------------
# Full sample --
## GDP and FinStress
comb_pd <- pdata.frame(comb, index = c('iso2c', 'year'))
mfull_1 <- plm(finstress_var_lead1yr ~ finstress_var + gdp_growth,
data = comb_pd)
mfull_2 <- plm(finstress_var_lead1yr ~ finstress_var + gdp_growth +
finstress_mean, data = comb_pd)
mfull_3 <- plm(finstress_var_lead1yr ~ finstress_var + finstress_mean +
stock_price_volatility, data = comb_pd)
## CAMELS
mfull_4 <- plm(finstress_var_lead1yr ~ finstress_var + log_imploans,
data = comb_pd)
# Using Stand. Dev. instead of Variance
mfull_1_sd <- plm(finstress_sd_lead1yr ~ finstress_sd + gdp_growth,
data = comb_pd)
mfull_2_sd <- plm(finstress_sd_lead1yr ~ finstress_sd + gdp_growth +
finstress_mean, data = comb_pd)
mfull_3_sd <- plm(finstress_sd_lead1yr ~ finstress_sd + finstress_mean +
stock_price_volatility, data = comb_pd)
mfull_4_sd <- plm(finstress_sd_lead1yr ~ finstress_sd + log_imploans,
data = comb_pd)
mfull_5_sd <- plm(finstress_sd_lead1yr ~ finstress_sd + Liquid,
data = comb_pd)
# High Income
## GDP and FinStress
comb_high_pd <- pdata.frame(comb_high, index = c('iso2c', 'year'))
moecd_1 <- plm(finstress_var_lead1yr ~ finstress_var + gdp_growth,
data = comb_high_pd)
moecd_2 <- plm(finstress_var_lead1yr ~ finstress_var + gdp_growth +
finstress_mean, data = comb_high_pd)
moecd_3 <- plm(finstress_var_lead1yr ~ finstress_var +
finstress_mean + stock_price_volatility,
data = comb_high_pd)
## CAMELS
moecd_4 <- plm(finstress_var_lead1yr ~ finstress_var + log_imploans,
data = comb_high_pd)
# Using Stand. Dev. instead of Variance
mfull_1_sd <- plm(finstress_sd_lead1yr ~ finstress_sd + gdp_growth,
data = comb_pd)
mfull_2_sd <- plm(finstress_sd_lead1yr ~ finstress_sd + gdp_growth +
finstress_mean, data = comb_pd)
mfull_3_sd <- plm(finstress_sd_lead1yr ~ finstress_sd + log_imploans,
data = comb_pd)
moecd_1_sd <- plm(finstress_sd_lead1yr ~ finstress_sd + gdp_growth,
data = comb_high_pd)
moecd_2_sd <- plm(finstress_sd_lead1yr ~ finstress_sd + gdp_growth +
finstress_mean, data = comb_high_pd)
moecd_3_sd <- plm(finstress_sd_lead1yr ~ finstress_sd + log_imploans,
data = comb_high_pd)
## No lagged DV ------------
mfull_nolagdv_1 <- plm(finstress_var_lead1yr ~ gdp_growth,
data = comb_pd)
mfull_nolagdv_2 <- plm(finstress_var_lead1yr ~ gdp_growth +
finstress_mean, data = comb_pd)
mfull_nolagdv_3 <- plm(finstress_var_lead1yr ~ finstress_mean +
stock_price_volatility, data = comb_pd)
mfull_nolagdv_4 <- plm(finstress_var_lead1yr ~ log_imploans,
data = comb_pd)
moecd_nolagdv_1 <- plm(finstress_var_lead1yr ~ gdp_growth,
data = comb_high_pd)
moecd_nolagdv_2 <- plm(finstress_var_lead1yr ~ gdp_growth +
finstress_mean, data = comb_high_pd)
moecd_nolagdv_3 <- plm(finstress_var_lead1yr ~ finstress_mean + stock_price_volatility,
data = comb_high_pd)
moecd_nolagdv_4 <- plm(finstress_var_lead1yr ~ log_imploans,
data = comb_high_pd)
## Annual For Paper ------
stargazer(mfull_1, mfull_2, mfull_3, mfull_4,
moecd_1, moecd_2, moecd_3, moecd_4,
type = 'latex',
dep.var.labels = 'Var(FinStress)$_{year+1}$',
covariate.labels = c('Var(FinStress)$_{year+0}$',
'GDP Growth (\\%)', 'FinStress Mean$_{year}$',
'Stock Price Volatility',
'Impaired Loans (log)') ,
column.labels = c(rep('Full Sample', 4), rep('OECD', 4)),
add.lines = list(c('Fixed Effects', rep('y', 8))),
label = 'annual_reg',
title = 'Regression result from predicting FinStress Variance using annual explanatory variable data',
out = 'results_tables/annual_regressions.tex',
font.size = 'tiny',
omit.stat = 'f'
)
## Annual No Lagged DV For Paper ------
stargazer(mfull_nolagdv_1, mfull_nolagdv_2, mfull_nolagdv_3, mfull_nolagdv_4,
moecd_nolagdv_1, moecd_nolagdv_2, moecd_nolagdv_3, moecd_nolagdv_4,
type = 'latex',
dep.var.labels = 'Var(FinStress)$_{year+1}$',
covariate.labels = c('GDP Growth (\\%)', 'FinStress Mean$_{year}$',
'Stock Price Volatility',
'Impaired Loans (log)') ,
column.labels = c(rep('Full Sample', 4), rep('OECD', 4)),
add.lines = list(c('Fixed Effects', rep('y', 8))),
label = 'annual_reg_nolag_dv',
title = 'Regression result from predicting FinStress Variance using annual explanatory variable data (without lagged dependent variable)',
out = 'results_tables/annual_noDVlag_regressions.tex',
font.size = 'tiny',
omit.stat = 'f'
)
## Annual For Presentation ------
stargazer(mfull_1, mfull_2, mfull_3, mfull_4,
type = 'latex',
dep.var.labels = 'Var(FinStress)$_{year+1}$',
covariate.labels = c('Var(FinStress)$_{year+0}$',
'GDP Growth (\\%)', 'FinStress Mean$_{year}$',
'Stock Price Volatility',
'Impaired Loans (log)') ,
column.labels = c(rep('Full Sample', 4)),
add.lines = list(c('Fixed Effects', rep('y', 4))),
label = 'annual_reg',
title = 'Regression result from predicting FinStress Variance using annual explanatory variable data (Full Sample)',
out = 'results_tables/annual_regressions_all.tex',
font.size = 'tiny',
omit.stat = 'f'
)
## Annual For Paper ------
stargazer(moecd_1, moecd_2, moecd_3, moecd_4,
type = 'latex',
dep.var.labels = 'Var(FinStress)$_{year+1}$',
covariate.labels = c('Var(FinStress)$_{year+0}$',
'GDP Growth (\\%)', 'FinStress Mean$_{year}$',
'Stock Price Volatility',
'Impaired Loans (log)', 'Liquid Assets Ratio') ,
column.labels = rep('OECD', 4),
add.lines = list(c('Fixed Effects', rep('y', 4))),
label = 'annual_reg',
title = 'Regression result from predicting FinStress Variance using annual explanatory variable data (OECD Sample)',
out = 'results_tables/annual_regressions_oecd.tex',
font.size = 'tiny',
omit.stat = 'f'
)
#------------------------------------------------------------------------------#
# Quarterly data ----------------------
finstress <- FinStress %>% select(iso2c, date, FinStress) %>%
rename(finstress = FinStress) %>% rename(quarter = date)
finstress$quarter <- quarter(finstress$quarter, with_year = T)
# Quarterly mean
finstress_qt_mean <- finstress %>% group_by(iso2c, quarter) %>%
summarise(finstress_mean = mean(finstress, na.rm = T))
# Quarterly variance
finstress_qt <- finstress %>% group_by(iso2c, quarter) %>%
summarise(finstress_var = var(finstress, na.rm = T))
finstress_qt <- merge(finstress_qt_mean, finstress_qt,
by = c('iso2c', 'quarter'), all = T)
# rescale to make coefficients more easily interpretable
finstress_qt$finstress_var <- finstress_qt$finstress_var * 1000
finstress_qt <- finstress_qt %>% arrange(iso2c, quarter)
FindDups(finstress_qt, Vars = c('iso2c', 'quarter'))
finstress_qt <- slide(finstress_qt, Var = 'finstress_var', GroupVar = 'iso2c',
NewVar = 'finstress_var_lead1qt', slideBy = 1)
finstress_qt <- slide(finstress_qt, Var = 'finstress_var', GroupVar = 'iso2c',
NewVar = 'finstress_var_lag1qt', slideBy = -1)
# Load quarterly gdp growth (seasonally adjusted): Originally downloaded from:
# https://stats.oecd.org
oecd <- import('raw_data/QNA_06112015123914289.csv')
oecd <- oecd %>%
filter(Measure == "Growth rate compared to the same quarter of previous year, seasonally adjusted" &
Subject == 'Gross domestic product - expenditure approach')
oecd <- oecd[, c(2, 9, 17)]
names(oecd) <- c('country', 'quarter', 'gdp_growth_oecd')
oecd$quarter <- gsub('-Q', '\\.', oecd$quarter)
oecd$iso2c <- countrycode(oecd$country, origin = 'country.name',
destination = 'iso2c')
oecd <- oecd %>% select(-country)
FindDups(oecd, c('iso2c', 'quarter'))
# Merge together
comb_qt <- merge(oecd, finstress_qt, by = c('iso2c', 'quarter'))
comb_qt <- DropNA(comb_qt, 'iso2c') # NA is Euro area
# Quarterly regressions -------
comb_qt_pd <- pdata.frame(comb_qt, index = c('iso2c', 'quarter'))
mqt_1 <- plm(finstress_var_lead1qt ~ finstress_var + gdp_growth_oecd,
data = comb_qt_pd)
mqt_2 <- plm(finstress_var_lead1qt ~ finstress_var + gdp_growth_oecd +
finstress_mean,
data = comb_qt_pd)
stargazer(mqt_1, mqt_2, type = 'latex',
dep.var.labels = 'Var(FinStress)$_{quarter+1}$',
covariate.labels = c('Var(FinStress)$_{quarter + 0}$',
'GDP Growth (\\%)',
'FinStress Mean$_{quarter + 0}$'),
add.lines = list(c('Fixed Effects', 'y', 'y')),
label = 'quarterly_reg',
title = 'Regression result from predicting FinStress Variance using quarterly explanatory variable data (OECD only)',
out = 'results_tables/quarterly_regressions.tex',
font.size = 'small',
omit.stat = 'f'
)
|
#' Make request to Zillow API GetMonthlyPayments Web Service
#'
#' For a specific loan amount, the GetMonthlyPayments API returns the estimated
#' monthly payment that includes principal and interest based on today's
#' mortgage rate. The API returns the estimated monthly payment per loan type
#' (30-year fixed, 15-year fixed, and 5/1 ARM). If a ZIP code is entered, the
#' estimated taxes and insurance are returned in the result set.
#'
#' @param price The price of the property for which monthly payment data will be
#' calculated. Required.
#' @param down The percentage of the total property price that will be placed as
#' a down payment. If omitted, a 20% down payment is assumed. If the down
#' payment is less than 20%, a monthly private mortgage insurance amount is
#' specified for each returned loan type.
#' @param dollarsdown The dollar amount that will be placed as a down payment.
#' This amount will be used for the down payment if the 'down' parameter is
#' omitted. If the down payment is less than 20% of the purchase price, a
#' monthly private mortgage insurance amount is specified for each returned
#' loan type.
#' @param zip The ZIP code in which the property is located. If omitted, monthly
#' property tax and hazard insurance data will not be returned.
#' @param zws_id The Zillow Web Service Identifier. Required.
#' @param url URL for the GetMonthlyPayments Web Service. Required.
#'
#' @return A named list with the following elements:
#' \describe{
#' \item{\strong{request}}{a list with the request parameters}
#' \item{\strong{message}}{a list of status code(s) and message(s)
#' returned by the API}
#' \item{\strong{response}}{an XMLNode with the API-specific response
#' values. At this time, no further coercion is performed, so you
#' may have to use functions from the \code{XML} package to extract
#' the desired output.}
#' }
#'
#' @export
#' @importFrom RCurl getURL
#'
#' @examples
#' \dontrun{
#' GetMonthlyPayments(price = 300000L)
#' GetMonthlyPayments(price = 300000L, down = 10)
#' GetMonthlyPayments(price = 300000L, dollarsdown = 10000L)
#' GetMonthlyPayments(price = 300000L, zip = 98109)}
GetMonthlyPayments <- function(
price = NULL,
down = NULL, dollarsdown = NULL, zip = NULL,
zws_id = getOption('ZillowR-zws_id'),
url = 'http://www.zillow.com/webservice/GetMonthlyPayments.htm'
) {
validation_errors <- c(
validate_arg(price, required = TRUE, format = '^\\d+$', length_min = 1, length_max = 1),
validate_arg(down, format = '^\\d+$', length_min = 1, length_max = 1, value_min = 0, value_max = 100),
validate_arg(dollarsdown, format = '^\\d+$', length_min = 1, length_max = 1, value_min = 0),
validate_arg(zip, format = '^\\d+$', length_min = 1, length_max = 1),
validate_arg(zws_id, required = TRUE, class = 'character', length_min = 1, length_max = 1),
validate_arg(url, required = TRUE, class = 'character', length_min = 1, length_max = 1)
)
if (length(validation_errors) > 0) {
stop(paste(validation_errors, collapse = '\n'))
}
request <- url_encode_request(url,
'price' = price,
'down' = down,
'dollarsdown' = dollarsdown,
'zip' = zip,
'zws-id' = zws_id
)
response <- tryCatch(
RCurl::getURL(request),
error = function(e) {stop(sprintf("Zillow API call with request '%s' failed with %s", request, e))}
)
return(preprocess_response(response))
}
| /ZillowR/R/GetMonthlyPayments.R | no_license | jkosteck/South_Bend_Clustering | R | false | false | 3,592 | r |
#' Make request to Zillow API GetMonthlyPayments Web Service
#'
#' For a specific loan amount, the GetMonthlyPayments API returns the estimated
#' monthly payment that includes principal and interest based on today's
#' mortgage rate. The API returns the estimated monthly payment per loan type
#' (30-year fixed, 15-year fixed, and 5/1 ARM). If a ZIP code is entered, the
#' estimated taxes and insurance are returned in the result set.
#'
#' @param price The price of the property for which monthly payment data will be
#' calculated. Required.
#' @param down The percentage of the total property price that will be placed as
#' a down payment. If omitted, a 20% down payment is assumed. If the down
#' payment is less than 20%, a monthly private mortgage insurance amount is
#' specified for each returned loan type.
#' @param dollarsdown The dollar amount that will be placed as a down payment.
#' This amount will be used for the down payment if the 'down' parameter is
#' omitted. If the down payment is less than 20% of the purchase price, a
#' monthly private mortgage insurance amount is specified for each returned
#' loan type.
#' @param zip The ZIP code in which the property is located. If omitted, monthly
#' property tax and hazard insurance data will not be returned.
#' @param zws_id The Zillow Web Service Identifier. Required.
#' @param url URL for the GetMonthlyPayments Web Service. Required.
#'
#' @return A named list with the following elements:
#' \describe{
#' \item{\strong{request}}{a list with the request parameters}
#' \item{\strong{message}}{a list of status code(s) and message(s)
#' returned by the API}
#' \item{\strong{response}}{an XMLNode with the API-specific response
#' values. At this time, no further coercion is performed, so you
#' may have to use functions from the \code{XML} package to extract
#' the desired output.}
#' }
#'
#' @export
#' @importFrom RCurl getURL
#'
#' @examples
#' \dontrun{
#' GetMonthlyPayments(price = 300000L)
#' GetMonthlyPayments(price = 300000L, down = 10)
#' GetMonthlyPayments(price = 300000L, dollarsdown = 10000L)
#' GetMonthlyPayments(price = 300000L, zip = 98109)}
GetMonthlyPayments <- function(
price = NULL,
down = NULL, dollarsdown = NULL, zip = NULL,
zws_id = getOption('ZillowR-zws_id'),
url = 'http://www.zillow.com/webservice/GetMonthlyPayments.htm'
) {
validation_errors <- c(
validate_arg(price, required = TRUE, format = '^\\d+$', length_min = 1, length_max = 1),
validate_arg(down, format = '^\\d+$', length_min = 1, length_max = 1, value_min = 0, value_max = 100),
validate_arg(dollarsdown, format = '^\\d+$', length_min = 1, length_max = 1, value_min = 0),
validate_arg(zip, format = '^\\d+$', length_min = 1, length_max = 1),
validate_arg(zws_id, required = TRUE, class = 'character', length_min = 1, length_max = 1),
validate_arg(url, required = TRUE, class = 'character', length_min = 1, length_max = 1)
)
if (length(validation_errors) > 0) {
stop(paste(validation_errors, collapse = '\n'))
}
request <- url_encode_request(url,
'price' = price,
'down' = down,
'dollarsdown' = dollarsdown,
'zip' = zip,
'zws-id' = zws_id
)
response <- tryCatch(
RCurl::getURL(request),
error = function(e) {stop(sprintf("Zillow API call with request '%s' failed with %s", request, e))}
)
return(preprocess_response(response))
}
|
#!/bin/Rscript
inputDir <-'readCounts/'
outputDir <- 'DESeqOutput/'
suppressMessages(library(DESeq2))
suppressMessages(library(apeglm))
for (i in list.files(inputDir)){
file<-read.csv(file=paste(inputDir,i,sep=''), header=TRUE, sep=",", row.names=1)
sampleTable <- data.frame(condition = factor(c("Mock","Mock","Mock","Fungus","Fungus","Fungus")))
file<-file[grep("AT*",rownames(file)),]
dds<-DESeqDataSetFromMatrix(countData = file,colData = sampleTable,design = ~condition)
dds$condition <- relevel(dds$condition, ref = "Mock")
analysis <- DESeq(dds)
res <- results(analysis)
results.all <- lfcShrink(analysis, coef="condition_Fungus_vs_Mock", type="apeglm")
write.table(results.all, paste(outputDir,i,'.all.csv',sep=''))
results.de <- subset(results.all, padj < 0.05)
write.table(results.de, paste(outputDir,i,'.de.csv',sep=''))
results.oe <- subset(results.all, padj < 0.05 & log2FoldChange > 0)
write.table(results.oe, paste(outputDir,i,'.oe.csv',sep=''))
results.ue <- subset(results.all, padj < 0.05 & log2FoldChange < 0)
write.table(results.ue, paste(outputDir,i,'.ue.csv',sep=''))
pdf(paste(outputDir,i,'.volcano.pdf',sep=''))
plotMA(results.all, ylim=c(-2,2), alpha=0.05)
dev.off()
}
| /transcriptomics/7_DESeq_Ath.R | no_license | fantin-mesny/Scripts-from-Mesny-et-al.-2021 | R | false | false | 1,214 | r | #!/bin/Rscript
inputDir <-'readCounts/'
outputDir <- 'DESeqOutput/'
suppressMessages(library(DESeq2))
suppressMessages(library(apeglm))
for (i in list.files(inputDir)){
file<-read.csv(file=paste(inputDir,i,sep=''), header=TRUE, sep=",", row.names=1)
sampleTable <- data.frame(condition = factor(c("Mock","Mock","Mock","Fungus","Fungus","Fungus")))
file<-file[grep("AT*",rownames(file)),]
dds<-DESeqDataSetFromMatrix(countData = file,colData = sampleTable,design = ~condition)
dds$condition <- relevel(dds$condition, ref = "Mock")
analysis <- DESeq(dds)
res <- results(analysis)
results.all <- lfcShrink(analysis, coef="condition_Fungus_vs_Mock", type="apeglm")
write.table(results.all, paste(outputDir,i,'.all.csv',sep=''))
results.de <- subset(results.all, padj < 0.05)
write.table(results.de, paste(outputDir,i,'.de.csv',sep=''))
results.oe <- subset(results.all, padj < 0.05 & log2FoldChange > 0)
write.table(results.oe, paste(outputDir,i,'.oe.csv',sep=''))
results.ue <- subset(results.all, padj < 0.05 & log2FoldChange < 0)
write.table(results.ue, paste(outputDir,i,'.ue.csv',sep=''))
pdf(paste(outputDir,i,'.volcano.pdf',sep=''))
plotMA(results.all, ylim=c(-2,2), alpha=0.05)
dev.off()
}
|
## ----setup, include = FALSE---------------------------------------------------
if ("xaringan" %in% loadedNamespaces()) {
options(htmltools.dir.version = FALSE)
knitr::opts_chunk$set(fig.height = 5, fig.width = 6)
xaringanExtra::use_tile_view()
xaringanExtra::use_clipboard()
xaringanExtra::use_search(show_icon = TRUE)
}
knitr::opts_chunk$set(collapse = TRUE, warning = FALSE)
library(ggplot2)
theme_set(theme_minimal() +
theme(text = element_text(size = 16)) +
theme(panel.border = element_rect(color = "grey30", fill = NA)))
here_rel <- function(path)
if (file.exists(path)) path else file.path("..", path)
## .content-box-blue { background-color: lightblue; }
## .small-font { font-size: 70%; }
## .width-20 { width: 20% }
## .width-30 { width: 30% }
## .width-60 { width: 60% }
## .width-70 { width: 70% }
## .note {
## padding: 15px;
## margin-bottom: 20px;
## border: 1px solid transparent;
## border-radius: 4px;
## background-color: #d9edf7;
## border-color: #bce8f1;
## color: #31708f;
## }
## ---- message = FALSE---------------------------------------------------------
library(tidyverse)
## ---- include = FALSE---------------------------------------------------------
tutorial <- here_rel("tutorial/penguins.Rmd")
## ---- prompt = TRUE, comment = ""---------------------------------------------
1 + 2
## ----prompt = TRUE, comment = ""----------------------------------------------
c(2, 4, 6)
## ----prompt = TRUE, comment = ""----------------------------------------------
1 : 4
## ---- prompt = TRUE, comment = ""---------------------------------------------
x <- c(2, 4, 6)
## ----prompt = TRUE, comment = ""----------------------------------------------
x + 1
## ----prompt = TRUE, comment = ""----------------------------------------------
x + x
## ----prompt = TRUE, comment = ""----------------------------------------------
log(x)
## ----prompt = TRUE, comment = ""----------------------------------------------
x[[1]]
## ----prompt = TRUE, comment = ""----------------------------------------------
x[1 : 2]
## ---- prompt = TRUE, comment = ""---------------------------------------------
x[x > 2]
## ---- prompt = TRUE, comment = ""---------------------------------------------
d <- data.frame(x, y = log(x))
d
## ---- prompt = TRUE, comment = ""---------------------------------------------
d$x
## ---- include = FALSE---------------------------------------------------------
library(nomnoml)
## #padding: 25
## #fontsize: 18
## #fill: #E1DAFF; #D4A9FF
## #stroke: #8515C7
## #linewidth: 2
##
## [Import] -> [Understand]
## [Understand |
## [Wrangle] -> [Visualize]
## [Visualize] -> [Model]
## [Model] -> [Wrangle]
## ]
## [Understand] -> [Communicate]
## -----------------------------------------------------------------------------
data(geyser, package = "MASS")
dim(geyser)
head(geyser, 4)
## ----geyser-hist, echo = FALSE------------------------------------------------
ggplot(geyser) +
geom_histogram(aes(x = duration),
bins = 15,
color = "black",
fill = "grey")
## ----geyser-hist, eval = FALSE------------------------------------------------
## ggplot(geyser) +
## geom_histogram(aes(x = duration),
## bins = 15,
## color = "black",
## fill = "grey")
## ----geyser-scatter, echo = FALSE---------------------------------------------
ggplot(geyser) +
geom_point(aes(x = lag(duration),
y = waiting))
## ----geyser-scatter, eval = FALSE---------------------------------------------
## ggplot(geyser) +
## geom_point(aes(x = lag(duration),
## y = waiting))
## ----geyser-hist-narrow, echo = FALSE-----------------------------------------
p <- ggplot(geyser) +
geom_histogram(aes(x = duration,
y = stat(density)),
fill = "grey",
color = "black",
binwidth = 0.1)
p
## ----geyser-hist-narrow, eval = FALSE-----------------------------------------
## p <- ggplot(geyser) +
## geom_histogram(aes(x = duration,
## y = stat(density)),
## fill = "grey",
## color = "black",
## binwidth = 0.1)
## p
## -----------------------------------------------------------------------------
d <- geyser$duration
d_short <- d[d < 3]
d_long <- d[d >= 3]
## -----------------------------------------------------------------------------
mean(d_short)
sd(d_short)
mean(d_long)
sd(d_long)
mean(d >= 3)
## -----------------------------------------------------------------------------
geyser <- mutate(geyser, type = ifelse(duration < 3, "short", "long"))
## -----------------------------------------------------------------------------
sgd <- summarize(group_by(geyser, type),
mean = mean(duration),
sd = sd(duration),
n = n())
(sgd <- mutate(sgd, prop = n / sum(n)))
## -----------------------------------------------------------------------------
sgd <-
group_by(geyser, type) %>%
summarize(mean = mean(duration),
sd = sd(duration),
n = n()) %>%
ungroup() %>%
mutate(prop = n / sum(n))
sgd
## ----geyser-hist-dens, echo = FALSE-------------------------------------------
f1 <- function(x)
sgd$prop[1] * dnorm(x, sgd$mean[1], sgd$sd[1])
f2 <- function(x)
sgd$prop[2] * dnorm(x, sgd$mean[2], sgd$sd[2])
p <- p +
stat_function(color = "red", fun = f1) +
stat_function(color = "blue", fun = f2)
p
## ----geyser-hist-dens, eval = FALSE-------------------------------------------
## f1 <- function(x)
## sgd$prop[1] * dnorm(x, sgd$mean[1], sgd$sd[1])
## f2 <- function(x)
## sgd$prop[2] * dnorm(x, sgd$mean[2], sgd$sd[2])
## p <- p +
## stat_function(color = "red", fun = f1) +
## stat_function(color = "blue", fun = f2)
## p
## -----------------------------------------------------------------------------
geyser2 <- filter(geyser, duration != 2, duration != 4)
sgd2 <-
group_by(geyser2, type) %>%
summarize(mean = mean(duration),
sd = sd(duration),
n = n()) %>%
ungroup() %>%
mutate(prop = n / sum(n))
sgd2
## ----geyser-hist-dens-2, echo = FALSE-----------------------------------------
f1_2 <- function(x)
sgd2$prop[1] * dnorm(x, sgd2$mean[1], sgd2$sd[1])
f2_2 <- function(x)
sgd2$prop[2] * dnorm(x, sgd2$mean[2], sgd2$sd[2])
p <- p +
stat_function(color = "red",
linetype = 2,
fun = f1_2) +
stat_function(color = "blue",
linetype = 2,
fun = f2_2)
p
## ----geyser-hist-dens-2, eval = FALSE-----------------------------------------
## f1_2 <- function(x)
## sgd2$prop[1] * dnorm(x, sgd2$mean[1], sgd2$sd[1])
## f2_2 <- function(x)
## sgd2$prop[2] * dnorm(x, sgd2$mean[2], sgd2$sd[2])
## p <- p +
## stat_function(color = "red",
## linetype = 2,
## fun = f1_2) +
## stat_function(color = "blue",
## linetype = 2,
## fun = f2_2)
## p
## ---- eval = FALSE, echo = FALSE----------------------------------------------
## ## Fancier version that gets a color legend.
## ## Could also get a line type legend.
## p <- ggplot(geyser) +
## geom_histogram(aes(x = duration, y = stat(density)),
## fill = "grey", color = "black", bins = 50)
## p <- p +
## stat_function(aes(color = type),
## data = filter(sgd, type == "long"),
## fun = function(x)
## sgd$prop[1] * dnorm(x, sgd$mean[1], sgd$sd[1])) +
## stat_function(aes(color = type),
## data = filter(sgd, type == "short"),
## fun = function(x)
## sgd$prop[2] * dnorm(x, sgd$mean[2], sgd$sd[2]))
## p
##
## p <- p +
## stat_function(aes(color = type),
## data = filter(sgd2, type == "long"),
## linetype = 2,
## fun = function(x)
## sgd2$prop[1] * dnorm(x, sgd2$mean[1], sgd2$sd[1])) +
## stat_function(aes(color = type),
## data = filter(sgd2, type == "short"),
## linetype = 2,
## fun = function(x)
## sgd2$prop[2] * dnorm(x, sgd2$mean[2], sgd2$sd[2]))
## p
## -----------------------------------------------------------------------------
data(barley, package = "lattice")
head(barley)
## ---- fig.width = 10----------------------------------------------------------
p1 <- ggplot(barley) + geom_point(aes(x = yield, y = variety))
p2 <- ggplot(barley) + geom_point(aes(x = yield, y = site))
cowplot::plot_grid(p1, p2)
## ---- fig.width = 12----------------------------------------------------------
p1 <- ggplot(barley) + geom_point(aes(x = yield, y = variety, color = year))
p2 <- ggplot(barley) + geom_point(aes(x = yield, y = site, color = year))
cowplot::plot_grid(p1, p2)
## ----barley-color-sym, echo = FALSE, fig.width = 7----------------------------
ggplot(barley) +
geom_point(aes(x = yield,
y = variety,
color = year,
shape = site))
## ----barley-color-sym, eval = FALSE-------------------------------------------
## ggplot(barley) +
## geom_point(aes(x = yield,
## y = variety,
## color = year,
## shape = site))
## ----barley-color-sym-2, echo = FALSE, fig.width = 7--------------------------
ggplot(barley) +
geom_point(aes(x = yield,
y = variety,
color = year,
shape = site),
size = 2.5)
## ----barley-color-sym-2, eval = FALSE-----------------------------------------
## ggplot(barley) +
## geom_point(aes(x = yield,
## y = variety,
## color = year,
## shape = site),
## size = 2.5)
## ----barley-color-sym-3, echo = FALSE, fig.width = 7--------------------------
ggplot(barley) +
geom_point(aes(x = yield,
y = variety,
color = year,
shape = site),
size = 2.5,
position =
position_jitter(
height = 0.15,
width = 0))
## ----barley-color-sym-3, eval = FALSE-----------------------------------------
## ggplot(barley) +
## geom_point(aes(x = yield,
## y = variety,
## color = year,
## shape = site),
## size = 2.5,
## position =
## position_jitter(
## height = 0.15,
## width = 0))
## ----barley-facet, eval = FALSE-----------------------------------------------
## ggplot(barley) +
## geom_point(aes(x = yield,
## y = variety,
## color = year)) +
## facet_wrap(~site, ncol = 2)
## ----barley-facet, echo = FALSE, fig.width = 7, fig.height = 7----------------
ggplot(barley) +
geom_point(aes(x = yield,
y = variety,
color = year)) +
facet_wrap(~site, ncol = 2)
## ----barley-avg-dot, echo = FALSE, message = FALSE, fig.width = 7-------------
barley_site_year <-
group_by(barley, site, year) %>%
summarize(yield = mean(yield)) %>%
ungroup()
ggplot(barley_site_year) +
geom_point(aes(y = site,
x = yield,
color = year),
size = 3)
## ----barley-avg-dot, eval = FALSE---------------------------------------------
## barley_site_year <-
## group_by(barley, site, year) %>%
## summarize(yield = mean(yield)) %>%
## ungroup()
##
## ggplot(barley_site_year) +
## geom_point(aes(y = site,
## x = yield,
## color = year),
## size = 3)
## ----barley-avg-dot-2, echo = FALSE, message = FALSE, fig.width = 7-----------
barley_site_year <-
group_by(barley, site, year) %>%
summarize(yield = mean(yield)) %>%
ungroup()
ggplot(barley_site_year) +
geom_line(aes(y = site,
x = yield,
group = site),
color = "darkgrey",
size = 2) +
geom_point(aes(y = site,
x = yield,
color = year),
size = 4)
## ----barley-avg-dot-2, eval = FALSE-------------------------------------------
## barley_site_year <-
## group_by(barley, site, year) %>%
## summarize(yield = mean(yield)) %>%
## ungroup()
##
## ggplot(barley_site_year) +
## geom_line(aes(y = site,
## x = yield,
## group = site),
## color = "darkgrey",
## size = 2) +
## geom_point(aes(y = site,
## x = yield,
## color = year),
## size = 4)
## ---- class.source = "fold-hide"----------------------------------------------
library(ggrepel)
barley_site_year <-
mutate(barley_site_year, year = fct_rev(year))
barley_site_year_1932 <-
filter(barley_site_year, year == "1932")
ggplot(barley_site_year,
aes(x = year, y = yield, group = site)) +
geom_line() +
geom_text_repel(aes(label = site),
data = barley_site_year_1932,
hjust = "left",
direction = "y") +
scale_x_discrete(expand = expansion(mult = c(0.1, .25)),
position = "top") +
labs(x = NULL, y = "Average Yield")
## ----barley-avg-bar, echo = FALSE, message = FALSE, fig.width = 7-------------
ggplot(barley_site_year) +
geom_col(aes(x = yield,
y = site,
fill = year),
size = 3,
position = "dodge",
width = .4)
## ----barley-avg-bar, eval = FALSE---------------------------------------------
## ggplot(barley_site_year) +
## geom_col(aes(x = yield,
## y = site,
## fill = year),
## size = 3,
## position = "dodge",
## width = .4)
## -----------------------------------------------------------------------------
HairEyeDF <- as.data.frame(HairEyeColor)
head(HairEyeDF)
## ----eye-bar, echo = FALSE----------------------------------------------------
eye <-
group_by(HairEyeDF, Eye) %>%
summarize(Freq = sum(Freq)) %>%
ungroup()
ggplot(eye) +
geom_col(aes(x = Eye,
y = Freq),
position = "dodge")
## ----eye-bar, eval = FALSE----------------------------------------------------
## eye <-
## group_by(HairEyeDF, Eye) %>%
## summarize(Freq = sum(Freq)) %>%
## ungroup()
##
## ggplot(eye) +
## geom_col(aes(x = Eye,
## y = Freq),
## position = "dodge")
## ----eye-bar-2, echo = FALSE--------------------------------------------------
eye <-
group_by(HairEyeDF, Eye) %>%
summarize(Freq = sum(Freq)) %>%
ungroup()
ggplot(eye) +
geom_col(aes(x = Eye,
y = Freq,
fill = Eye),
position = "dodge")
## ----eye-bar-2, eval = FALSE--------------------------------------------------
## eye <-
## group_by(HairEyeDF, Eye) %>%
## summarize(Freq = sum(Freq)) %>%
## ungroup()
##
## ggplot(eye) +
## geom_col(aes(x = Eye,
## y = Freq,
## fill = Eye),
## position = "dodge")
## ----eye-bar-3, echo = FALSE--------------------------------------------------
hazel_rgb <-
col2rgb("brown") * 0.75 + col2rgb("green") * 0.25
hazel <-
do.call(rgb, as.list(hazel_rgb / 255))
cols <-
c(Blue = colorspace::lighten(colorspace::desaturate("blue", 0.3), 0.3),
Green = colorspace::lighten("forestgreen", 0.1),
Brown = colorspace::lighten("brown", 0.0001), ## 0.3?
Hazel = colorspace::lighten(hazel, 0.3))
pb <- ggplot(eye) +
geom_col(aes(x = Eye,
y = Freq,
fill = Eye),
position = "dodge") +
scale_fill_manual(values = cols)
pb
## ----eye-bar-3, eval = FALSE--------------------------------------------------
## hazel_rgb <-
## col2rgb("brown") * 0.75 + col2rgb("green") * 0.25
## hazel <-
## do.call(rgb, as.list(hazel_rgb / 255))
##
## cols <-
## c(Blue = colorspace::lighten(colorspace::desaturate("blue", 0.3), 0.3),
## Green = colorspace::lighten("forestgreen", 0.1),
## Brown = colorspace::lighten("brown", 0.0001), ## 0.3?
## Hazel = colorspace::lighten(hazel, 0.3))
##
## pb <- ggplot(eye) +
## geom_col(aes(x = Eye,
## y = Freq,
## fill = Eye),
## position = "dodge") +
## scale_fill_manual(values = cols)
## pb
## ----eye-bar-stacked, echo = FALSE--------------------------------------------
psb <- ggplot(eye) +
geom_col(aes(x = "", y = Freq, fill = Eye), color = "lightgrey") +
scale_fill_manual(values = cols)
psb
## ----eye-bar-stacked, eval = FALSE--------------------------------------------
## psb <- ggplot(eye) +
## geom_col(aes(x = "", y = Freq, fill = Eye), color = "lightgrey") +
## scale_fill_manual(values = cols)
## psb
## ----eye-pie, echo = FALSE----------------------------------------------------
(pp <- psb + coord_polar("y"))
## ----eye-pie, eval = FALSE----------------------------------------------------
## (pp <- psb + coord_polar("y"))
## ----eye-pie-2, echo = FALSE--------------------------------------------------
(pp <- pp + theme_void())
## ----eye-pie-2, eval = FALSE--------------------------------------------------
## (pp <- pp + theme_void())
## ---- echo = FALSE, fig.height = 4, fig.width = 8-----------------------------
cowplot::plot_grid(pb, pp)
## ---- fig.width = 14, fig.height = 6, class.source = "fold-hide"--------------
eye_hairsex <-
group_by(HairEyeDF, Hair, Sex) %>%
mutate(Prop = Freq / sum(Freq)) %>%
ungroup()
p1 <- ggplot(eye_hairsex) +
geom_col(aes(x = Eye, y = Prop, fill = Eye)) +
scale_fill_manual(values = cols) +
facet_grid(Hair ~ Sex)
p2 <- ggplot(eye_hairsex) +
geom_col(aes(x = "", y = Prop, fill = Eye)) +
scale_fill_manual(values = cols) +
coord_polar("y") +
facet_grid(Hair ~ Sex) +
theme_void()
cowplot::plot_grid(p1, p2)
## ---- fig.width = 8, class.source = "fold-hide"-------------------------------
library(ggplot2)
river <- scan(here::here("data/river.dat"))
rd <- data.frame(flow = river, month = seq_along(river))
(pp <- ggplot(rd) + geom_point(aes(x = month, y = flow)))
## ---- fig.width = 12, fig.height = 4, class.source = "fold-hide"--------------
pp + coord_fixed(3.5)
## ---- fig.width = 12, fig.height = 4, class.source = "fold-hide"--------------
pl <- ggplot(rd) + geom_line(aes(x = month, y = flow))
pl + coord_fixed(3.5)
## ---- fig.width = 8, class.source = "fold-hide"-------------------------------
pl
## -----------------------------------------------------------------------------
wind_turbines <- read.csv(here::here("data/us_wind.csv"), comment = "#")
## -----------------------------------------------------------------------------
wt_IA <- filter(wind_turbines, t_fips %/% 1000 == 19)
## -----------------------------------------------------------------------------
wt_IA <- filter(wt_IA, ! is.na(xlong), ! is.na(ylat))
## -----------------------------------------------------------------------------
wt_IA <- mutate(wt_IA, p_year = replace(p_year, p_year < 0, NA))
## ----iowa_sf_map, eval = FALSE------------------------------------------------
## iowa_sf <-
## sf::st_as_sf(maps::map("county", "iowa",
## plot = FALSE,
## fill = TRUE))
##
## p <- ggplot() +
## geom_sf(data = iowa_sf) +
## ggthemes::theme_map()
## p
## ----iowa_sf_map, echo = FALSE, fig.width = 8---------------------------------
iowa_sf <-
sf::st_as_sf(maps::map("county", "iowa",
plot = FALSE,
fill = TRUE))
p <- ggplot() +
geom_sf(data = iowa_sf) +
ggthemes::theme_map()
p
## ----wt-IA-all, eval = FALSE--------------------------------------------------
## p + geom_point(aes(xlong, ylat),
## data = wt_IA)
## ----wt-IA-all, echo = FALSE, fig.width = 8-----------------------------------
p + geom_point(aes(xlong, ylat),
data = wt_IA)
## ----wt-IA-color, eval = FALSE------------------------------------------------
## year_brk <- c(0, 2005, 2010, 2015, 2020)
## year_lab <- c("before 2005",
## "2005-2009",
## "2010-2014",
## "2015-2020")
## wt_IA <-
## mutate(wt_IA,
## year = cut(p_year,
## breaks = year_brk,
## labels = year_lab,
## right = FALSE))
## p + geom_point(aes(xlong,
## ylat,
## color = year),
## data = wt_IA,
## size = 3)
## ----wt-IA-color, echo = FALSE, fig.width = 8---------------------------------
year_brk <- c(0, 2005, 2010, 2015, 2020)
year_lab <- c("before 2005",
"2005-2009",
"2010-2014",
"2015-2020")
wt_IA <-
mutate(wt_IA,
year = cut(p_year,
breaks = year_brk,
labels = year_lab,
right = FALSE))
p + geom_point(aes(xlong,
ylat,
color = year),
data = wt_IA,
size = 3)
## ----eval = FALSE, echo = FALSE-----------------------------------------------
## library(tidyverse)
## p <- ggplot() + geom_sf(data = iowa_sf) + ggthemes::theme_map()
## p + geom_point(aes(xlong, ylat), data = wt_IA)
##
## wt_IA_sf <- sf::st_as_sf(wt_IA, coords = c("xlong", "ylat"), crs = 4326)
##
## p + geom_sf(data = filter(wt_IA_sf, year <= 2020))
##
## library(gganimate)
## pa <- p + geom_sf(data = wt_IA_sf) +
## transition_manual(year, cumulative = TRUE) +
## labs(title = "Wind turbines in Iowa",
## subtitle = "Year = {current_frame}")
## anim_save("foo.gif", animate(pa, fps = 10, nframes = 100))
## ---- include = FALSE---------------------------------------------------------
cancer_data_file <- here_rel("data/Invasive-Cancer-Incidence-Rates-by-County-in-Iowa-Lung-and-Bronchus-2011.csv")
## ---- message = FALSE---------------------------------------------------------
fname <- here::here("data/Invasive-Cancer-Incidence-Rates-by-County-in-Iowa-Lung-and-Bronchus-2011.csv")
d <- read_csv(fname, skip = 2)
head(d)
## -----------------------------------------------------------------------------
d <- select(d, county = 1, population = 2, count = 3)
## -----------------------------------------------------------------------------
tail(d)
## -----------------------------------------------------------------------------
d <- filter(d, ! is.na(population))
d <- filter(d, county != "STATE")
## -----------------------------------------------------------------------------
d <- mutate(d, count = as.numeric(count))
## -----------------------------------------------------------------------------
count(d, count == 0)
## -----------------------------------------------------------------------------
any(d$count == 0, na.rm = TRUE)
## -----------------------------------------------------------------------------
d <- replace_na(d, list(count = 0))
## -----------------------------------------------------------------------------
d$county[1]
iowa_sf$ID[1]
## -----------------------------------------------------------------------------
d <- mutate(d, cname = county, county = tolower(county))
iowa_sf <- mutate(iowa_sf, ID = sub("iowa,", "", ID))
iowa_sf <- rename(iowa_sf, county = ID)
## -----------------------------------------------------------------------------
setdiff(d$county, iowa_sf$county)
setdiff(iowa_sf$county, d$county)
## -----------------------------------------------------------------------------
d <- mutate(d, county = sub("'", "", county))
setdiff(d$county, iowa_sf$county)
setdiff(iowa_sf$county, d$county)
## -----------------------------------------------------------------------------
d <- mutate(d, rate1K = 1000 * (count / population))
md <- left_join(iowa_sf, d, "county")
head(md)
## ----cancer-map-1, eval = FALSE-----------------------------------------------
## ggplot(md) +
## geom_sf(aes(fill = rate1K))
## ----cancer-map-1, echo = FALSE, fig.width = 8--------------------------------
ggplot(md) +
geom_sf(aes(fill = rate1K))
## ----cancer-map-2, eval = FALSE-----------------------------------------------
## library(ggthemes)
## library(viridis)
## ggplot(md) +
## geom_sf(aes(fill = rate1K),
## color = "grey") +
## scale_fill_viridis(name = "Rate per 1000") +
## theme_map()
## ----cancer-map-2, echo = FALSE, fig.width = 8, message = FALSE---------------
library(ggthemes)
library(viridis)
ggplot(md) +
geom_sf(aes(fill = rate1K),
color = "grey") +
scale_fill_viridis(name = "Rate per 1000") +
theme_map()
## ----cancer-map-plotly, eval = FALSE------------------------------------------
## mdl <- mutate(md,
## label = paste(cname,
## round(rate1K, 1),
## population,
## sep = "\n"))
## p <- ggplot(mdl) +
## geom_sf(aes(fill = rate1K,
## text = label),
## color = "grey") +
## scale_fill_viridis(name = "Rate per 1000") +
## theme_map()
##
## plotly::ggplotly(p, tooltip = "text")
## ----cancer-map-plotly, echo = FALSE, fig.width = 8---------------------------
mdl <- mutate(md,
label = paste(cname,
round(rate1K, 1),
population,
sep = "\n"))
p <- ggplot(mdl) +
geom_sf(aes(fill = rate1K,
text = label),
color = "grey") +
scale_fill_viridis(name = "Rate per 1000") +
theme_map()
plotly::ggplotly(p, tooltip = "text")
## ---- fig.height = 6.5, fig.width = 9, class.source = "fold-hide"-------------
library(leaflet)
pal <- colorNumeric(palette = "viridis", domain = md$rate1K)
lab <- lapply(paste0(md$cname, "<BR>",
"Rate: ", round(md$rate1K, 1), "<BR>",
"Pop: ", scales::comma(md$population,
accuracy = 1)),
htmltools::HTML)
leaflet(sf::st_transform(md, 4326)) %>%
addPolygons(weight = 2,
color = "grey",
fillColor = ~ pal(rate1K),
fillOpacity = 1,
highlightOptions = highlightOptions(color = "white",
weight = 2,
bringToFront = TRUE),
label = lab) %>%
addLegend(pal = pal, values = ~ rate1K, opacity = 1)
## -----------------------------------------------------------------------------
lausURL <- here::here("data/laucntycur14-2020.txt")
lausUS <- read.table(lausURL,
col.names = c("LAUSAreaCode", "State", "County",
"Title", "Period",
"LaborForce", "Employed",
"Unemployed", "UnempRate"),
quote = '"', sep = "|", skip = 6,
stringsAsFactors = FALSE, strip.white = TRUE,
fill = TRUE)
footstart <- grep("------", lausUS$LAUSAreaCode)
lausUS <- lausUS[1 : (footstart - 1), ]
## -----------------------------------------------------------------------------
lausUS <- separate(lausUS, Title, c("cname", "scode"),
sep = ", ", fill = "right")
## -----------------------------------------------------------------------------
sapply(lausUS, class)
## -----------------------------------------------------------------------------
lausUS <- mutate(lausUS, UnempRate = as.numeric(UnempRate))
## -----------------------------------------------------------------------------
select_if(lausUS, anyNA) %>% names()
## -----------------------------------------------------------------------------
select(lausUS, cname, scode) %>%
filter(is.na(scode)) %>%
unique()
## -----------------------------------------------------------------------------
select(lausUS, scode, Period, UnempRate) %>%
filter(is.na(UnempRate)) %>%
unique()
## -----------------------------------------------------------------------------
lausUS <- mutate(lausUS,
Period = fct_inorder(Period),
LaborForce = as.numeric(gsub(",", "", LaborForce)),
Unemployed = as.numeric(gsub(",", "", Unemployed)))
## ---- fig.width = 10, class.source = "fold-hide"------------------------------
group_by(lausUS, Period) %>%
summarize(Unemployed = sum(Unemployed, na.rm = TRUE),
LaborForce = sum(LaborForce, na.rm = TRUE),
UnempRate = 100 * (Unemployed / LaborForce)) %>%
ggplot(aes(Period, UnempRate, group = 1)) +
geom_line()
## -----------------------------------------------------------------------------
lausUS <- mutate(lausUS, fips = State * 1000 + County)
## -----------------------------------------------------------------------------
counties_sf <- sf::st_as_sf(maps::map("county", plot = FALSE, fill = TRUE))
county.fips <-
mutate(maps::county.fips, polyname = sub(":.*", "", polyname)) %>%
unique()
counties_sf <- left_join(counties_sf, county.fips, c("ID" = "polyname"))
states_sf <- sf::st_as_sf(maps::map("state", plot = FALSE, fill = TRUE))
## -----------------------------------------------------------------------------
summaryUS <- group_by(lausUS, County, State, fips) %>%
summarize(avg_unemp = mean(UnempRate, na.rm = TRUE),
max_unemp = max(UnempRate, na.rm = TRUE),
apr_unemp = UnempRate[Period == "Apr-20"]) %>%
ungroup()
head(summaryUS)
## ---- fig.width = 9, fig.height = 6, class.source = "fold-hide"---------------
left_join(counties_sf, summaryUS, "fips") %>%
ggplot() +
geom_sf(aes(fill = apr_unemp)) +
scale_fill_viridis(name = "Rate", na.value = "red") +
theme_map() +
geom_sf(data = states_sf, col = "grey", fill = NA)
## -----------------------------------------------------------------------------
anti_join(counties_sf, summaryUS, "fips")
## -----------------------------------------------------------------------------
counties_sf <- mutate(counties_sf, fips = replace(fips, fips == 46113, 46102))
## ---- fig.width = 9, fig.height = 6, class.source = "fold-hide"---------------
left_join(counties_sf, summaryUS, "fips") %>%
ggplot() +
geom_sf(aes(fill = apr_unemp)) +
scale_fill_viridis(name = "Rate", na.value = "red") +
theme_map() +
geom_sf(data = states_sf, col = "grey", fill = NA)
## ---- echo = FALSE, eval = FALSE----------------------------------------------
## ggpoly2sf <- function(poly, coords = c("long", "lat"),
## id = "group", region = "region", crs = 4326) {
## sf::st_as_sf(poly, coords = coords, crs = crs) %>%
## group_by(!! as.name(id), !! as.name(region)) %>%
## summarize(do_union = FALSE) %>%
## sf::st_cast("POLYGON") %>%
## ungroup() %>%
## group_by(!! as.name(region)) %>%
## summarize(do_union = FALSE) %>%
## ungroup()
## }
## m_sf <- ggpoly2sf(socviz::county_map, c("long", "lat"), "group", "id")
## m_sf <- mutate(m_sf, fips = as.numeric(id))
## m_sf <- mutate(m_sf, fips = replace(fips, fips == 46113, 46102))
## ggplot(m_sf) + geom_sf()
## au <- group_by(lausUS, fips) %>%
## summarize(avg_ur = mean(UnempRate, na.rm = TRUE))
## mu <- group_by(lausUS, fips) %>%
## summarize(max_ur = max(UnempRate, na.rm = TRUE))
## da <- left_join(m_sf, au, "fips")
## dm <- left_join(m_sf, mu, "fips")
## ggplot(da, aes(fill = avg_ur)) +
## geom_sf(size = 0.1) +
## scale_fill_viridis(name = "Rate", na.value = "red")
## ggplot(dm, aes(fill = max_ur)) +
## geom_sf(size = 0.1) +
## scale_fill_viridis(name = "Rate", na.value = "red")
## ggplot(left_join(m_sf, filter(lausUS, Period == "Apr-20"), "fips"),
## aes(fill = UnempRate)) +
## geom_sf(size = 0.1) +
## scale_fill_viridis(name = "Rate", na.value = "red")
## -----------------------------------------------------------------------------
library(readxl)
gcm <- read_excel(here::here("data/gapminder-under5mortality.xlsx"))
## -----------------------------------------------------------------------------
head(gcm, 3)
## -----------------------------------------------------------------------------
names(gcm)[1] <- "country"
## -----------------------------------------------------------------------------
tgcm <-
pivot_longer(gcm, -1, names_to = "year", values_to = "u5mort") %>%
mutate(year = as.numeric(year))
head(tgcm, 3)
## ----u5-1, eval = FALSE-------------------------------------------------------
## p <- ggplot(tgcm) +
## geom_line(aes(year,
## u5mort,
## group = country),
## alpha = 0.3)
## plotly::ggplotly(p)
## ----u5-1, echo = FALSE, fig.height = 6, fig.width = 8------------------------
p <- ggplot(tgcm) +
geom_line(aes(year,
u5mort,
group = country),
alpha = 0.3)
plotly::ggplotly(p)
## ----u5-2, eval = FALSE-------------------------------------------------------
## countries <- c("United States",
## "United Kingdom",
## "Germany",
## "China",
## "Egypt")
## filter(tgcm, country %in% countries) %>%
## ggplot() +
## geom_line(aes(x = year,
## y = u5mort,
## color = country))
## ----u5-2, echo = FALSE, fig.height = 6, fig.width = 8------------------------
countries <- c("United States",
"United Kingdom",
"Germany",
"China",
"Egypt")
filter(tgcm, country %in% countries) %>%
ggplot() +
geom_line(aes(x = year,
y = u5mort,
color = country))
## ----u5-3, eval = FALSE-------------------------------------------------------
## tgcm_miss <-
## group_by(tgcm, country) %>%
## summarize(anyNA = anyNA(u5mort)) %>%
## filter(anyNA) %>%
## pull(country)
##
## p <- filter(tgcm,
## country %in% tgcm_miss) %>%
## ggplot(aes(x = year,
## y = u5mort,
## group = country)) +
## geom_line(na.rm = TRUE) +
## xlim(c(1940, 2020))
## plotly::ggplotly(p)
## ----u5-3, echo = FALSE, fig.height = 6, fig.width = 7------------------------
tgcm_miss <-
group_by(tgcm, country) %>%
summarize(anyNA = anyNA(u5mort)) %>%
filter(anyNA) %>%
pull(country)
p <- filter(tgcm,
country %in% tgcm_miss) %>%
ggplot(aes(x = year,
y = u5mort,
group = country)) +
geom_line(na.rm = TRUE) +
xlim(c(1940, 2020))
plotly::ggplotly(p)
| /sibs.R | no_license | ltierney/SIBS-WV-2021 | R | false | false | 35,499 | r | ## ----setup, include = FALSE---------------------------------------------------
if ("xaringan" %in% loadedNamespaces()) {
options(htmltools.dir.version = FALSE)
knitr::opts_chunk$set(fig.height = 5, fig.width = 6)
xaringanExtra::use_tile_view()
xaringanExtra::use_clipboard()
xaringanExtra::use_search(show_icon = TRUE)
}
knitr::opts_chunk$set(collapse = TRUE, warning = FALSE)
library(ggplot2)
theme_set(theme_minimal() +
theme(text = element_text(size = 16)) +
theme(panel.border = element_rect(color = "grey30", fill = NA)))
here_rel <- function(path)
if (file.exists(path)) path else file.path("..", path)
## .content-box-blue { background-color: lightblue; }
## .small-font { font-size: 70%; }
## .width-20 { width: 20% }
## .width-30 { width: 30% }
## .width-60 { width: 60% }
## .width-70 { width: 70% }
## .note {
## padding: 15px;
## margin-bottom: 20px;
## border: 1px solid transparent;
## border-radius: 4px;
## background-color: #d9edf7;
## border-color: #bce8f1;
## color: #31708f;
## }
## ---- message = FALSE---------------------------------------------------------
library(tidyverse)
## ---- include = FALSE---------------------------------------------------------
tutorial <- here_rel("tutorial/penguins.Rmd")
## ---- prompt = TRUE, comment = ""---------------------------------------------
1 + 2
## ----prompt = TRUE, comment = ""----------------------------------------------
c(2, 4, 6)
## ----prompt = TRUE, comment = ""----------------------------------------------
1 : 4
## ---- prompt = TRUE, comment = ""---------------------------------------------
x <- c(2, 4, 6)
## ----prompt = TRUE, comment = ""----------------------------------------------
x + 1
## ----prompt = TRUE, comment = ""----------------------------------------------
x + x
## ----prompt = TRUE, comment = ""----------------------------------------------
log(x)
## ----prompt = TRUE, comment = ""----------------------------------------------
x[[1]]
## ----prompt = TRUE, comment = ""----------------------------------------------
x[1 : 2]
## ---- prompt = TRUE, comment = ""---------------------------------------------
x[x > 2]
## ---- prompt = TRUE, comment = ""---------------------------------------------
d <- data.frame(x, y = log(x))
d
## ---- prompt = TRUE, comment = ""---------------------------------------------
d$x
## ---- include = FALSE---------------------------------------------------------
library(nomnoml)
## #padding: 25
## #fontsize: 18
## #fill: #E1DAFF; #D4A9FF
## #stroke: #8515C7
## #linewidth: 2
##
## [Import] -> [Understand]
## [Understand |
## [Wrangle] -> [Visualize]
## [Visualize] -> [Model]
## [Model] -> [Wrangle]
## ]
## [Understand] -> [Communicate]
## -----------------------------------------------------------------------------
data(geyser, package = "MASS")
dim(geyser)
head(geyser, 4)
## ----geyser-hist, echo = FALSE------------------------------------------------
ggplot(geyser) +
geom_histogram(aes(x = duration),
bins = 15,
color = "black",
fill = "grey")
## ----geyser-hist, eval = FALSE------------------------------------------------
## ggplot(geyser) +
## geom_histogram(aes(x = duration),
## bins = 15,
## color = "black",
## fill = "grey")
## ----geyser-scatter, echo = FALSE---------------------------------------------
ggplot(geyser) +
geom_point(aes(x = lag(duration),
y = waiting))
## ----geyser-scatter, eval = FALSE---------------------------------------------
## ggplot(geyser) +
## geom_point(aes(x = lag(duration),
## y = waiting))
## ----geyser-hist-narrow, echo = FALSE-----------------------------------------
p <- ggplot(geyser) +
geom_histogram(aes(x = duration,
y = stat(density)),
fill = "grey",
color = "black",
binwidth = 0.1)
p
## ----geyser-hist-narrow, eval = FALSE-----------------------------------------
## p <- ggplot(geyser) +
## geom_histogram(aes(x = duration,
## y = stat(density)),
## fill = "grey",
## color = "black",
## binwidth = 0.1)
## p
## -----------------------------------------------------------------------------
d <- geyser$duration
d_short <- d[d < 3]
d_long <- d[d >= 3]
## -----------------------------------------------------------------------------
mean(d_short)
sd(d_short)
mean(d_long)
sd(d_long)
mean(d >= 3)
## -----------------------------------------------------------------------------
geyser <- mutate(geyser, type = ifelse(duration < 3, "short", "long"))
## -----------------------------------------------------------------------------
sgd <- summarize(group_by(geyser, type),
mean = mean(duration),
sd = sd(duration),
n = n())
(sgd <- mutate(sgd, prop = n / sum(n)))
## -----------------------------------------------------------------------------
sgd <-
group_by(geyser, type) %>%
summarize(mean = mean(duration),
sd = sd(duration),
n = n()) %>%
ungroup() %>%
mutate(prop = n / sum(n))
sgd
## ----geyser-hist-dens, echo = FALSE-------------------------------------------
f1 <- function(x)
sgd$prop[1] * dnorm(x, sgd$mean[1], sgd$sd[1])
f2 <- function(x)
sgd$prop[2] * dnorm(x, sgd$mean[2], sgd$sd[2])
p <- p +
stat_function(color = "red", fun = f1) +
stat_function(color = "blue", fun = f2)
p
## ----geyser-hist-dens, eval = FALSE-------------------------------------------
## f1 <- function(x)
## sgd$prop[1] * dnorm(x, sgd$mean[1], sgd$sd[1])
## f2 <- function(x)
## sgd$prop[2] * dnorm(x, sgd$mean[2], sgd$sd[2])
## p <- p +
## stat_function(color = "red", fun = f1) +
## stat_function(color = "blue", fun = f2)
## p
## -----------------------------------------------------------------------------
geyser2 <- filter(geyser, duration != 2, duration != 4)
sgd2 <-
group_by(geyser2, type) %>%
summarize(mean = mean(duration),
sd = sd(duration),
n = n()) %>%
ungroup() %>%
mutate(prop = n / sum(n))
sgd2
## ----geyser-hist-dens-2, echo = FALSE-----------------------------------------
f1_2 <- function(x)
sgd2$prop[1] * dnorm(x, sgd2$mean[1], sgd2$sd[1])
f2_2 <- function(x)
sgd2$prop[2] * dnorm(x, sgd2$mean[2], sgd2$sd[2])
p <- p +
stat_function(color = "red",
linetype = 2,
fun = f1_2) +
stat_function(color = "blue",
linetype = 2,
fun = f2_2)
p
## ----geyser-hist-dens-2, eval = FALSE-----------------------------------------
## f1_2 <- function(x)
## sgd2$prop[1] * dnorm(x, sgd2$mean[1], sgd2$sd[1])
## f2_2 <- function(x)
## sgd2$prop[2] * dnorm(x, sgd2$mean[2], sgd2$sd[2])
## p <- p +
## stat_function(color = "red",
## linetype = 2,
## fun = f1_2) +
## stat_function(color = "blue",
## linetype = 2,
## fun = f2_2)
## p
## ---- eval = FALSE, echo = FALSE----------------------------------------------
## ## Fancier version that gets a color legend.
## ## Could also get a line type legend.
## p <- ggplot(geyser) +
## geom_histogram(aes(x = duration, y = stat(density)),
## fill = "grey", color = "black", bins = 50)
## p <- p +
## stat_function(aes(color = type),
## data = filter(sgd, type == "long"),
## fun = function(x)
## sgd$prop[1] * dnorm(x, sgd$mean[1], sgd$sd[1])) +
## stat_function(aes(color = type),
## data = filter(sgd, type == "short"),
## fun = function(x)
## sgd$prop[2] * dnorm(x, sgd$mean[2], sgd$sd[2]))
## p
##
## p <- p +
## stat_function(aes(color = type),
## data = filter(sgd2, type == "long"),
## linetype = 2,
## fun = function(x)
## sgd2$prop[1] * dnorm(x, sgd2$mean[1], sgd2$sd[1])) +
## stat_function(aes(color = type),
## data = filter(sgd2, type == "short"),
## linetype = 2,
## fun = function(x)
## sgd2$prop[2] * dnorm(x, sgd2$mean[2], sgd2$sd[2]))
## p
## -----------------------------------------------------------------------------
data(barley, package = "lattice")
head(barley)
## ---- fig.width = 10----------------------------------------------------------
p1 <- ggplot(barley) + geom_point(aes(x = yield, y = variety))
p2 <- ggplot(barley) + geom_point(aes(x = yield, y = site))
cowplot::plot_grid(p1, p2)
## ---- fig.width = 12----------------------------------------------------------
p1 <- ggplot(barley) + geom_point(aes(x = yield, y = variety, color = year))
p2 <- ggplot(barley) + geom_point(aes(x = yield, y = site, color = year))
cowplot::plot_grid(p1, p2)
## ----barley-color-sym, echo = FALSE, fig.width = 7----------------------------
ggplot(barley) +
geom_point(aes(x = yield,
y = variety,
color = year,
shape = site))
## ----barley-color-sym, eval = FALSE-------------------------------------------
## ggplot(barley) +
## geom_point(aes(x = yield,
## y = variety,
## color = year,
## shape = site))
## ----barley-color-sym-2, echo = FALSE, fig.width = 7--------------------------
ggplot(barley) +
geom_point(aes(x = yield,
y = variety,
color = year,
shape = site),
size = 2.5)
## ----barley-color-sym-2, eval = FALSE-----------------------------------------
## ggplot(barley) +
## geom_point(aes(x = yield,
## y = variety,
## color = year,
## shape = site),
## size = 2.5)
## ----barley-color-sym-3, echo = FALSE, fig.width = 7--------------------------
ggplot(barley) +
geom_point(aes(x = yield,
y = variety,
color = year,
shape = site),
size = 2.5,
position =
position_jitter(
height = 0.15,
width = 0))
## ----barley-color-sym-3, eval = FALSE-----------------------------------------
## ggplot(barley) +
## geom_point(aes(x = yield,
## y = variety,
## color = year,
## shape = site),
## size = 2.5,
## position =
## position_jitter(
## height = 0.15,
## width = 0))
## ----barley-facet, eval = FALSE-----------------------------------------------
## ggplot(barley) +
## geom_point(aes(x = yield,
## y = variety,
## color = year)) +
## facet_wrap(~site, ncol = 2)
## ----barley-facet, echo = FALSE, fig.width = 7, fig.height = 7----------------
ggplot(barley) +
geom_point(aes(x = yield,
y = variety,
color = year)) +
facet_wrap(~site, ncol = 2)
## ----barley-avg-dot, echo = FALSE, message = FALSE, fig.width = 7-------------
barley_site_year <-
group_by(barley, site, year) %>%
summarize(yield = mean(yield)) %>%
ungroup()
ggplot(barley_site_year) +
geom_point(aes(y = site,
x = yield,
color = year),
size = 3)
## ----barley-avg-dot, eval = FALSE---------------------------------------------
## barley_site_year <-
## group_by(barley, site, year) %>%
## summarize(yield = mean(yield)) %>%
## ungroup()
##
## ggplot(barley_site_year) +
## geom_point(aes(y = site,
## x = yield,
## color = year),
## size = 3)
## ----barley-avg-dot-2, echo = FALSE, message = FALSE, fig.width = 7-----------
barley_site_year <-
group_by(barley, site, year) %>%
summarize(yield = mean(yield)) %>%
ungroup()
ggplot(barley_site_year) +
geom_line(aes(y = site,
x = yield,
group = site),
color = "darkgrey",
size = 2) +
geom_point(aes(y = site,
x = yield,
color = year),
size = 4)
## ----barley-avg-dot-2, eval = FALSE-------------------------------------------
## barley_site_year <-
## group_by(barley, site, year) %>%
## summarize(yield = mean(yield)) %>%
## ungroup()
##
## ggplot(barley_site_year) +
## geom_line(aes(y = site,
## x = yield,
## group = site),
## color = "darkgrey",
## size = 2) +
## geom_point(aes(y = site,
## x = yield,
## color = year),
## size = 4)
## ---- class.source = "fold-hide"----------------------------------------------
library(ggrepel)
barley_site_year <-
mutate(barley_site_year, year = fct_rev(year))
barley_site_year_1932 <-
filter(barley_site_year, year == "1932")
ggplot(barley_site_year,
aes(x = year, y = yield, group = site)) +
geom_line() +
geom_text_repel(aes(label = site),
data = barley_site_year_1932,
hjust = "left",
direction = "y") +
scale_x_discrete(expand = expansion(mult = c(0.1, .25)),
position = "top") +
labs(x = NULL, y = "Average Yield")
## ----barley-avg-bar, echo = FALSE, message = FALSE, fig.width = 7-------------
ggplot(barley_site_year) +
geom_col(aes(x = yield,
y = site,
fill = year),
size = 3,
position = "dodge",
width = .4)
## ----barley-avg-bar, eval = FALSE---------------------------------------------
## ggplot(barley_site_year) +
## geom_col(aes(x = yield,
## y = site,
## fill = year),
## size = 3,
## position = "dodge",
## width = .4)
## -----------------------------------------------------------------------------
HairEyeDF <- as.data.frame(HairEyeColor)
head(HairEyeDF)
## ----eye-bar, echo = FALSE----------------------------------------------------
eye <-
group_by(HairEyeDF, Eye) %>%
summarize(Freq = sum(Freq)) %>%
ungroup()
ggplot(eye) +
geom_col(aes(x = Eye,
y = Freq),
position = "dodge")
## ----eye-bar, eval = FALSE----------------------------------------------------
## eye <-
## group_by(HairEyeDF, Eye) %>%
## summarize(Freq = sum(Freq)) %>%
## ungroup()
##
## ggplot(eye) +
## geom_col(aes(x = Eye,
## y = Freq),
## position = "dodge")
## ----eye-bar-2, echo = FALSE--------------------------------------------------
eye <-
group_by(HairEyeDF, Eye) %>%
summarize(Freq = sum(Freq)) %>%
ungroup()
ggplot(eye) +
geom_col(aes(x = Eye,
y = Freq,
fill = Eye),
position = "dodge")
## ----eye-bar-2, eval = FALSE--------------------------------------------------
## eye <-
## group_by(HairEyeDF, Eye) %>%
## summarize(Freq = sum(Freq)) %>%
## ungroup()
##
## ggplot(eye) +
## geom_col(aes(x = Eye,
## y = Freq,
## fill = Eye),
## position = "dodge")
## ----eye-bar-3, echo = FALSE--------------------------------------------------
hazel_rgb <-
col2rgb("brown") * 0.75 + col2rgb("green") * 0.25
hazel <-
do.call(rgb, as.list(hazel_rgb / 255))
cols <-
c(Blue = colorspace::lighten(colorspace::desaturate("blue", 0.3), 0.3),
Green = colorspace::lighten("forestgreen", 0.1),
Brown = colorspace::lighten("brown", 0.0001), ## 0.3?
Hazel = colorspace::lighten(hazel, 0.3))
pb <- ggplot(eye) +
geom_col(aes(x = Eye,
y = Freq,
fill = Eye),
position = "dodge") +
scale_fill_manual(values = cols)
pb
## ----eye-bar-3, eval = FALSE--------------------------------------------------
## hazel_rgb <-
## col2rgb("brown") * 0.75 + col2rgb("green") * 0.25
## hazel <-
## do.call(rgb, as.list(hazel_rgb / 255))
##
## cols <-
## c(Blue = colorspace::lighten(colorspace::desaturate("blue", 0.3), 0.3),
## Green = colorspace::lighten("forestgreen", 0.1),
## Brown = colorspace::lighten("brown", 0.0001), ## 0.3?
## Hazel = colorspace::lighten(hazel, 0.3))
##
## pb <- ggplot(eye) +
## geom_col(aes(x = Eye,
## y = Freq,
## fill = Eye),
## position = "dodge") +
## scale_fill_manual(values = cols)
## pb
## ----eye-bar-stacked, echo = FALSE--------------------------------------------
psb <- ggplot(eye) +
geom_col(aes(x = "", y = Freq, fill = Eye), color = "lightgrey") +
scale_fill_manual(values = cols)
psb
## ----eye-bar-stacked, eval = FALSE--------------------------------------------
## psb <- ggplot(eye) +
## geom_col(aes(x = "", y = Freq, fill = Eye), color = "lightgrey") +
## scale_fill_manual(values = cols)
## psb
## ----eye-pie, echo = FALSE----------------------------------------------------
(pp <- psb + coord_polar("y"))
## ----eye-pie, eval = FALSE----------------------------------------------------
## (pp <- psb + coord_polar("y"))
## ----eye-pie-2, echo = FALSE--------------------------------------------------
(pp <- pp + theme_void())
## ----eye-pie-2, eval = FALSE--------------------------------------------------
## (pp <- pp + theme_void())
## ---- echo = FALSE, fig.height = 4, fig.width = 8-----------------------------
cowplot::plot_grid(pb, pp)
## ---- fig.width = 14, fig.height = 6, class.source = "fold-hide"--------------
eye_hairsex <-
group_by(HairEyeDF, Hair, Sex) %>%
mutate(Prop = Freq / sum(Freq)) %>%
ungroup()
p1 <- ggplot(eye_hairsex) +
geom_col(aes(x = Eye, y = Prop, fill = Eye)) +
scale_fill_manual(values = cols) +
facet_grid(Hair ~ Sex)
p2 <- ggplot(eye_hairsex) +
geom_col(aes(x = "", y = Prop, fill = Eye)) +
scale_fill_manual(values = cols) +
coord_polar("y") +
facet_grid(Hair ~ Sex) +
theme_void()
cowplot::plot_grid(p1, p2)
## ---- fig.width = 8, class.source = "fold-hide"-------------------------------
library(ggplot2)
river <- scan(here::here("data/river.dat"))
rd <- data.frame(flow = river, month = seq_along(river))
(pp <- ggplot(rd) + geom_point(aes(x = month, y = flow)))
## ---- fig.width = 12, fig.height = 4, class.source = "fold-hide"--------------
pp + coord_fixed(3.5)
## ---- fig.width = 12, fig.height = 4, class.source = "fold-hide"--------------
pl <- ggplot(rd) + geom_line(aes(x = month, y = flow))
pl + coord_fixed(3.5)
## ---- fig.width = 8, class.source = "fold-hide"-------------------------------
pl
## -----------------------------------------------------------------------------
wind_turbines <- read.csv(here::here("data/us_wind.csv"), comment = "#")
## -----------------------------------------------------------------------------
wt_IA <- filter(wind_turbines, t_fips %/% 1000 == 19)
## -----------------------------------------------------------------------------
wt_IA <- filter(wt_IA, ! is.na(xlong), ! is.na(ylat))
## -----------------------------------------------------------------------------
wt_IA <- mutate(wt_IA, p_year = replace(p_year, p_year < 0, NA))
## ----iowa_sf_map, eval = FALSE------------------------------------------------
## iowa_sf <-
## sf::st_as_sf(maps::map("county", "iowa",
## plot = FALSE,
## fill = TRUE))
##
## p <- ggplot() +
## geom_sf(data = iowa_sf) +
## ggthemes::theme_map()
## p
## ----iowa_sf_map, echo = FALSE, fig.width = 8---------------------------------
iowa_sf <-
sf::st_as_sf(maps::map("county", "iowa",
plot = FALSE,
fill = TRUE))
p <- ggplot() +
geom_sf(data = iowa_sf) +
ggthemes::theme_map()
p
## ----wt-IA-all, eval = FALSE--------------------------------------------------
## p + geom_point(aes(xlong, ylat),
## data = wt_IA)
## ----wt-IA-all, echo = FALSE, fig.width = 8-----------------------------------
p + geom_point(aes(xlong, ylat),
data = wt_IA)
## ----wt-IA-color, eval = FALSE------------------------------------------------
## year_brk <- c(0, 2005, 2010, 2015, 2020)
## year_lab <- c("before 2005",
## "2005-2009",
## "2010-2014",
## "2015-2020")
## wt_IA <-
## mutate(wt_IA,
## year = cut(p_year,
## breaks = year_brk,
## labels = year_lab,
## right = FALSE))
## p + geom_point(aes(xlong,
## ylat,
## color = year),
## data = wt_IA,
## size = 3)
## ----wt-IA-color, echo = FALSE, fig.width = 8---------------------------------
year_brk <- c(0, 2005, 2010, 2015, 2020)
year_lab <- c("before 2005",
"2005-2009",
"2010-2014",
"2015-2020")
wt_IA <-
mutate(wt_IA,
year = cut(p_year,
breaks = year_brk,
labels = year_lab,
right = FALSE))
p + geom_point(aes(xlong,
ylat,
color = year),
data = wt_IA,
size = 3)
## ----eval = FALSE, echo = FALSE-----------------------------------------------
## library(tidyverse)
## p <- ggplot() + geom_sf(data = iowa_sf) + ggthemes::theme_map()
## p + geom_point(aes(xlong, ylat), data = wt_IA)
##
## wt_IA_sf <- sf::st_as_sf(wt_IA, coords = c("xlong", "ylat"), crs = 4326)
##
## p + geom_sf(data = filter(wt_IA_sf, year <= 2020))
##
## library(gganimate)
## pa <- p + geom_sf(data = wt_IA_sf) +
## transition_manual(year, cumulative = TRUE) +
## labs(title = "Wind turbines in Iowa",
## subtitle = "Year = {current_frame}")
## anim_save("foo.gif", animate(pa, fps = 10, nframes = 100))
## ---- include = FALSE---------------------------------------------------------
cancer_data_file <- here_rel("data/Invasive-Cancer-Incidence-Rates-by-County-in-Iowa-Lung-and-Bronchus-2011.csv")
## ---- message = FALSE---------------------------------------------------------
fname <- here::here("data/Invasive-Cancer-Incidence-Rates-by-County-in-Iowa-Lung-and-Bronchus-2011.csv")
d <- read_csv(fname, skip = 2)
head(d)
## -----------------------------------------------------------------------------
d <- select(d, county = 1, population = 2, count = 3)
## -----------------------------------------------------------------------------
tail(d)
## -----------------------------------------------------------------------------
d <- filter(d, ! is.na(population))
d <- filter(d, county != "STATE")
## -----------------------------------------------------------------------------
d <- mutate(d, count = as.numeric(count))
## -----------------------------------------------------------------------------
count(d, count == 0)
## -----------------------------------------------------------------------------
any(d$count == 0, na.rm = TRUE)
## -----------------------------------------------------------------------------
d <- replace_na(d, list(count = 0))
## -----------------------------------------------------------------------------
d$county[1]
iowa_sf$ID[1]
## -----------------------------------------------------------------------------
d <- mutate(d, cname = county, county = tolower(county))
iowa_sf <- mutate(iowa_sf, ID = sub("iowa,", "", ID))
iowa_sf <- rename(iowa_sf, county = ID)
## -----------------------------------------------------------------------------
setdiff(d$county, iowa_sf$county)
setdiff(iowa_sf$county, d$county)
## -----------------------------------------------------------------------------
d <- mutate(d, county = sub("'", "", county))
setdiff(d$county, iowa_sf$county)
setdiff(iowa_sf$county, d$county)
## -----------------------------------------------------------------------------
d <- mutate(d, rate1K = 1000 * (count / population))
md <- left_join(iowa_sf, d, "county")
head(md)
## ----cancer-map-1, eval = FALSE-----------------------------------------------
## ggplot(md) +
## geom_sf(aes(fill = rate1K))
## ----cancer-map-1, echo = FALSE, fig.width = 8--------------------------------
ggplot(md) +
geom_sf(aes(fill = rate1K))
## ----cancer-map-2, eval = FALSE-----------------------------------------------
## library(ggthemes)
## library(viridis)
## ggplot(md) +
## geom_sf(aes(fill = rate1K),
## color = "grey") +
## scale_fill_viridis(name = "Rate per 1000") +
## theme_map()
## ----cancer-map-2, echo = FALSE, fig.width = 8, message = FALSE---------------
library(ggthemes)
library(viridis)
ggplot(md) +
geom_sf(aes(fill = rate1K),
color = "grey") +
scale_fill_viridis(name = "Rate per 1000") +
theme_map()
## ----cancer-map-plotly, eval = FALSE------------------------------------------
## mdl <- mutate(md,
## label = paste(cname,
## round(rate1K, 1),
## population,
## sep = "\n"))
## p <- ggplot(mdl) +
## geom_sf(aes(fill = rate1K,
## text = label),
## color = "grey") +
## scale_fill_viridis(name = "Rate per 1000") +
## theme_map()
##
## plotly::ggplotly(p, tooltip = "text")
## ----cancer-map-plotly, echo = FALSE, fig.width = 8---------------------------
mdl <- mutate(md,
label = paste(cname,
round(rate1K, 1),
population,
sep = "\n"))
p <- ggplot(mdl) +
geom_sf(aes(fill = rate1K,
text = label),
color = "grey") +
scale_fill_viridis(name = "Rate per 1000") +
theme_map()
plotly::ggplotly(p, tooltip = "text")
## ---- fig.height = 6.5, fig.width = 9, class.source = "fold-hide"-------------
library(leaflet)
pal <- colorNumeric(palette = "viridis", domain = md$rate1K)
lab <- lapply(paste0(md$cname, "<BR>",
"Rate: ", round(md$rate1K, 1), "<BR>",
"Pop: ", scales::comma(md$population,
accuracy = 1)),
htmltools::HTML)
leaflet(sf::st_transform(md, 4326)) %>%
addPolygons(weight = 2,
color = "grey",
fillColor = ~ pal(rate1K),
fillOpacity = 1,
highlightOptions = highlightOptions(color = "white",
weight = 2,
bringToFront = TRUE),
label = lab) %>%
addLegend(pal = pal, values = ~ rate1K, opacity = 1)
## -----------------------------------------------------------------------------
lausURL <- here::here("data/laucntycur14-2020.txt")
lausUS <- read.table(lausURL,
col.names = c("LAUSAreaCode", "State", "County",
"Title", "Period",
"LaborForce", "Employed",
"Unemployed", "UnempRate"),
quote = '"', sep = "|", skip = 6,
stringsAsFactors = FALSE, strip.white = TRUE,
fill = TRUE)
footstart <- grep("------", lausUS$LAUSAreaCode)
lausUS <- lausUS[1 : (footstart - 1), ]
## -----------------------------------------------------------------------------
lausUS <- separate(lausUS, Title, c("cname", "scode"),
sep = ", ", fill = "right")
## -----------------------------------------------------------------------------
sapply(lausUS, class)
## -----------------------------------------------------------------------------
lausUS <- mutate(lausUS, UnempRate = as.numeric(UnempRate))
## -----------------------------------------------------------------------------
select_if(lausUS, anyNA) %>% names()
## -----------------------------------------------------------------------------
select(lausUS, cname, scode) %>%
filter(is.na(scode)) %>%
unique()
## -----------------------------------------------------------------------------
select(lausUS, scode, Period, UnempRate) %>%
filter(is.na(UnempRate)) %>%
unique()
## -----------------------------------------------------------------------------
lausUS <- mutate(lausUS,
Period = fct_inorder(Period),
LaborForce = as.numeric(gsub(",", "", LaborForce)),
Unemployed = as.numeric(gsub(",", "", Unemployed)))
## ---- fig.width = 10, class.source = "fold-hide"------------------------------
group_by(lausUS, Period) %>%
summarize(Unemployed = sum(Unemployed, na.rm = TRUE),
LaborForce = sum(LaborForce, na.rm = TRUE),
UnempRate = 100 * (Unemployed / LaborForce)) %>%
ggplot(aes(Period, UnempRate, group = 1)) +
geom_line()
## -----------------------------------------------------------------------------
lausUS <- mutate(lausUS, fips = State * 1000 + County)
## -----------------------------------------------------------------------------
counties_sf <- sf::st_as_sf(maps::map("county", plot = FALSE, fill = TRUE))
county.fips <-
mutate(maps::county.fips, polyname = sub(":.*", "", polyname)) %>%
unique()
counties_sf <- left_join(counties_sf, county.fips, c("ID" = "polyname"))
states_sf <- sf::st_as_sf(maps::map("state", plot = FALSE, fill = TRUE))
## -----------------------------------------------------------------------------
summaryUS <- group_by(lausUS, County, State, fips) %>%
summarize(avg_unemp = mean(UnempRate, na.rm = TRUE),
max_unemp = max(UnempRate, na.rm = TRUE),
apr_unemp = UnempRate[Period == "Apr-20"]) %>%
ungroup()
head(summaryUS)
## ---- fig.width = 9, fig.height = 6, class.source = "fold-hide"---------------
left_join(counties_sf, summaryUS, "fips") %>%
ggplot() +
geom_sf(aes(fill = apr_unemp)) +
scale_fill_viridis(name = "Rate", na.value = "red") +
theme_map() +
geom_sf(data = states_sf, col = "grey", fill = NA)
## -----------------------------------------------------------------------------
anti_join(counties_sf, summaryUS, "fips")
## -----------------------------------------------------------------------------
counties_sf <- mutate(counties_sf, fips = replace(fips, fips == 46113, 46102))
## ---- fig.width = 9, fig.height = 6, class.source = "fold-hide"---------------
left_join(counties_sf, summaryUS, "fips") %>%
ggplot() +
geom_sf(aes(fill = apr_unemp)) +
scale_fill_viridis(name = "Rate", na.value = "red") +
theme_map() +
geom_sf(data = states_sf, col = "grey", fill = NA)
## ---- echo = FALSE, eval = FALSE----------------------------------------------
## ggpoly2sf <- function(poly, coords = c("long", "lat"),
## id = "group", region = "region", crs = 4326) {
## sf::st_as_sf(poly, coords = coords, crs = crs) %>%
## group_by(!! as.name(id), !! as.name(region)) %>%
## summarize(do_union = FALSE) %>%
## sf::st_cast("POLYGON") %>%
## ungroup() %>%
## group_by(!! as.name(region)) %>%
## summarize(do_union = FALSE) %>%
## ungroup()
## }
## m_sf <- ggpoly2sf(socviz::county_map, c("long", "lat"), "group", "id")
## m_sf <- mutate(m_sf, fips = as.numeric(id))
## m_sf <- mutate(m_sf, fips = replace(fips, fips == 46113, 46102))
## ggplot(m_sf) + geom_sf()
## au <- group_by(lausUS, fips) %>%
## summarize(avg_ur = mean(UnempRate, na.rm = TRUE))
## mu <- group_by(lausUS, fips) %>%
## summarize(max_ur = max(UnempRate, na.rm = TRUE))
## da <- left_join(m_sf, au, "fips")
## dm <- left_join(m_sf, mu, "fips")
## ggplot(da, aes(fill = avg_ur)) +
## geom_sf(size = 0.1) +
## scale_fill_viridis(name = "Rate", na.value = "red")
## ggplot(dm, aes(fill = max_ur)) +
## geom_sf(size = 0.1) +
## scale_fill_viridis(name = "Rate", na.value = "red")
## ggplot(left_join(m_sf, filter(lausUS, Period == "Apr-20"), "fips"),
## aes(fill = UnempRate)) +
## geom_sf(size = 0.1) +
## scale_fill_viridis(name = "Rate", na.value = "red")
## -----------------------------------------------------------------------------
library(readxl)
gcm <- read_excel(here::here("data/gapminder-under5mortality.xlsx"))
## -----------------------------------------------------------------------------
head(gcm, 3)
## -----------------------------------------------------------------------------
names(gcm)[1] <- "country"
## -----------------------------------------------------------------------------
tgcm <-
pivot_longer(gcm, -1, names_to = "year", values_to = "u5mort") %>%
mutate(year = as.numeric(year))
head(tgcm, 3)
## ----u5-1, eval = FALSE-------------------------------------------------------
## p <- ggplot(tgcm) +
## geom_line(aes(year,
## u5mort,
## group = country),
## alpha = 0.3)
## plotly::ggplotly(p)
## ----u5-1, echo = FALSE, fig.height = 6, fig.width = 8------------------------
p <- ggplot(tgcm) +
geom_line(aes(year,
u5mort,
group = country),
alpha = 0.3)
plotly::ggplotly(p)
## ----u5-2, eval = FALSE-------------------------------------------------------
## countries <- c("United States",
## "United Kingdom",
## "Germany",
## "China",
## "Egypt")
## filter(tgcm, country %in% countries) %>%
## ggplot() +
## geom_line(aes(x = year,
## y = u5mort,
## color = country))
## ----u5-2, echo = FALSE, fig.height = 6, fig.width = 8------------------------
countries <- c("United States",
"United Kingdom",
"Germany",
"China",
"Egypt")
filter(tgcm, country %in% countries) %>%
ggplot() +
geom_line(aes(x = year,
y = u5mort,
color = country))
## ----u5-3, eval = FALSE-------------------------------------------------------
## tgcm_miss <-
## group_by(tgcm, country) %>%
## summarize(anyNA = anyNA(u5mort)) %>%
## filter(anyNA) %>%
## pull(country)
##
## p <- filter(tgcm,
## country %in% tgcm_miss) %>%
## ggplot(aes(x = year,
## y = u5mort,
## group = country)) +
## geom_line(na.rm = TRUE) +
## xlim(c(1940, 2020))
## plotly::ggplotly(p)
## ----u5-3, echo = FALSE, fig.height = 6, fig.width = 7------------------------
tgcm_miss <-
group_by(tgcm, country) %>%
summarize(anyNA = anyNA(u5mort)) %>%
filter(anyNA) %>%
pull(country)
p <- filter(tgcm,
country %in% tgcm_miss) %>%
ggplot(aes(x = year,
y = u5mort,
group = country)) +
geom_line(na.rm = TRUE) +
xlim(c(1940, 2020))
plotly::ggplotly(p)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OutcomeModels.R
\name{fitOutcomeModel}
\alias{fitOutcomeModel}
\title{Create an outcome model, and compute the relative risk}
\usage{
fitOutcomeModel(
population,
cohortMethodData = NULL,
modelType = "logistic",
stratified = FALSE,
useCovariates = FALSE,
inversePtWeighting = FALSE,
interactionCovariateIds = c(),
excludeCovariateIds = c(),
includeCovariateIds = c(),
prior = createPrior("laplace", useCrossValidation = TRUE),
control = createControl(cvType = "auto", seed = 1, startingVariance = 0.01, tolerance
= 2e-07, cvRepetitions = 10, noiseLevel = "quiet")
)
}
\arguments{
\item{population}{A population object generated by \code{\link[=createStudyPopulation]{createStudyPopulation()}},
potentially filtered by other functions.}
\item{cohortMethodData}{An object of type \link{CohortMethodData} as generated using
\code{\link[=getDbCohortMethodData]{getDbCohortMethodData()}}. Can be omitted if not using covariates and
not using interaction terms.}
\item{modelType}{The type of outcome model that will be used. Possible values are
"logistic", "poisson", or "cox".}
\item{stratified}{Should the regression be conditioned on the strata defined in the
population object (e.g. by matching or stratifying on propensity
scores)?}
\item{useCovariates}{Whether to use the covariates in the \code{cohortMethodData}
object in the outcome model.}
\item{inversePtWeighting}{Use inverse probability of treatment weighting (IPTW)? See details.}
\item{interactionCovariateIds}{An optional vector of covariate IDs to use to estimate interactions
with the main treatment effect.}
\item{excludeCovariateIds}{Exclude these covariates from the outcome model.}
\item{includeCovariateIds}{Include only these covariates in the outcome model.}
\item{prior}{The prior used to fit the model. See \code{\link[Cyclops:createPrior]{Cyclops::createPrior()}}
for details.}
\item{control}{The control object used to control the cross-validation used to
determine the hyperparameters of the prior (if applicable). See
\code{\link[Cyclops:createControl]{Cyclops::createControl()}} for details.}
}
\value{
An object of class \code{OutcomeModel}. Generic function \code{print}, \code{coef}, and
\code{confint} are available.
}
\description{
Create an outcome model, and computes the relative risk
}
\details{
IPTW estimates the average treatment effect using stabilized inverse propensity scores (Xu et al. 2010).
}
\references{
Xu S, Ross C, Raebel MA, Shetterly S, Blanchette C, Smith D. Use of stabilized inverse propensity scores
as weights to directly estimate relative risk and its confidence intervals. Value Health.
2010;13(2):273-277. doi:10.1111/j.1524-4733.2009.00671.x
}
| /man/fitOutcomeModel.Rd | permissive | estone96/CohortMethod | R | false | true | 2,769 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OutcomeModels.R
\name{fitOutcomeModel}
\alias{fitOutcomeModel}
\title{Create an outcome model, and compute the relative risk}
\usage{
fitOutcomeModel(
population,
cohortMethodData = NULL,
modelType = "logistic",
stratified = FALSE,
useCovariates = FALSE,
inversePtWeighting = FALSE,
interactionCovariateIds = c(),
excludeCovariateIds = c(),
includeCovariateIds = c(),
prior = createPrior("laplace", useCrossValidation = TRUE),
control = createControl(cvType = "auto", seed = 1, startingVariance = 0.01, tolerance
= 2e-07, cvRepetitions = 10, noiseLevel = "quiet")
)
}
\arguments{
\item{population}{A population object generated by \code{\link[=createStudyPopulation]{createStudyPopulation()}},
potentially filtered by other functions.}
\item{cohortMethodData}{An object of type \link{CohortMethodData} as generated using
\code{\link[=getDbCohortMethodData]{getDbCohortMethodData()}}. Can be omitted if not using covariates and
not using interaction terms.}
\item{modelType}{The type of outcome model that will be used. Possible values are
"logistic", "poisson", or "cox".}
\item{stratified}{Should the regression be conditioned on the strata defined in the
population object (e.g. by matching or stratifying on propensity
scores)?}
\item{useCovariates}{Whether to use the covariates in the \code{cohortMethodData}
object in the outcome model.}
\item{inversePtWeighting}{Use inverse probability of treatment weighting (IPTW)? See details.}
\item{interactionCovariateIds}{An optional vector of covariate IDs to use to estimate interactions
with the main treatment effect.}
\item{excludeCovariateIds}{Exclude these covariates from the outcome model.}
\item{includeCovariateIds}{Include only these covariates in the outcome model.}
\item{prior}{The prior used to fit the model. See \code{\link[Cyclops:createPrior]{Cyclops::createPrior()}}
for details.}
\item{control}{The control object used to control the cross-validation used to
determine the hyperparameters of the prior (if applicable). See
\code{\link[Cyclops:createControl]{Cyclops::createControl()}} for details.}
}
\value{
An object of class \code{OutcomeModel}. Generic function \code{print}, \code{coef}, and
\code{confint} are available.
}
\description{
Create an outcome model, and computes the relative risk
}
\details{
IPTW estimates the average treatment effect using stabilized inverse propensity scores (Xu et al. 2010).
}
\references{
Xu S, Ross C, Raebel MA, Shetterly S, Blanchette C, Smith D. Use of stabilized inverse propensity scores
as weights to directly estimate relative risk and its confidence intervals. Value Health.
2010;13(2):273-277. doi:10.1111/j.1524-4733.2009.00671.x
}
|
library(nnet)
data1=read.table("T6 NWP.txt" )
data2=read.table("T6 EXP.txt")
x1=data1[,1]
x2=data2[,1]
length(x1)
length(x2)
#x2=2*x2;
#df=data.frame(cbind(x1, x2))
rep1=30 # number of ensemble components
nn=240 # sample size if training data
nn2=384-nn #nn2=84
nn_bootstrap=sample(1:nn, nn,replace=T) ##a sequence for extracting training data from the whole data set
nn1=seq(1:nn)
df=cbind(data1,data2)
colnames(df)=c("y1","y2")
number=rep1*nn2
predict1_out=matrix(seq(1:number), ncol=nn2) # the whole matrix for the out-of-sample prediction
number=rep1*nn
predict1_in=matrix(seq(1:number), ncol=nn) # the whole matrix for the in-sample-prediction
cheese1=df[nn_bootstrap,]
#data_train=df[nn_bootstrap,]
nnfit<-nnet(y2~y1, data=cheese1, size=6,skip=F,trace=F, decay = 5e-4,maxit = 1000,linout=TRUE)
# data_=df[nn1,]
# cheese2=cheese[-nn,c(3,4,5)]
# data_pred_in=matrix(c(12.9, 13.4, 23,25), nrow=2)
# dd=data.frame(data_pred_in)
#
# colnames(dd)=c("y1")
#
# y.fit=predict(nnfit, dd)
#
data_pred_in=df[nn1,1]
data_pred_in=data.frame(data_pred_in)
colnames(data_pred_in)=c("y1")
y.fit=predict(nnfit, data_pred_in)
# y.fit=predict(nnfit, predict1_in)
#
write.table(matrix(y.fit, ncol=length(y.fit)), "wind prediction in.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
data_pred_out=df[-nn1,1]
data_pred_out=data.frame(data_pred_out)
colnames(data_pred_out)=c("y1")
#
# is.data.frame(data_pred_out)
y.fit1=predict(nnfit, data_pred_out)
write.table(matrix(y.fit1, ncol=length(y.fit1)), "wind prediction_out.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
# net.results_out1=net.results_out
#par(mfrow=c(2,1))
#plot(data2[-nn,], type="l", col="blue" )
#lines(net.results_out1$net.result, col="red", ylim=c(min(data2$exp[-nn], net.results_out1$net.result)-1,1+ max(data2$exp[-nn], net.results_out$net.result)) , xlim=c(1, nn2) )
numb=1
predict1_in[numb,]=matrix(y.fit, ncol=length(y.fit))
predict1_out[numb,]=matrix(y.fit1, ncol=length(y.fit1))
for( j in(1:5))
{
mm=rep1+5
for (i in (7:mm))
{
nn_bootstrap=sample(1:nn, nn,replace = TRUE) ##a sequence for extracting training data from the whole data set
data_train=df[nn_bootstrap,]
print (i)
nnfit<-nnet(y2~y1, data=data_train, size=i,skip=F,decay = 5e-4, trace=F,maxit=1000,linout=TRUE)
# data_pred_in=df[nn1,]
# data_pred_in=df[nn1,c(1)]
#
y.fit=predict(nnfit, data_pred_in)
write.table(matrix(y.fit, ncol=length(y.fit)), "wind prediction in.csv",append = T, sep=",", row.names=FALSE, col.names=FALSE)
# data_pred_out=df[-nn1,]
y.fit1=predict(nnfit, data_pred_out)
write.table(matrix(y.fit1, ncol=length(y.fit1)), "wind prediction_out.csv",append = T, sep=",", row.names=FALSE, col.names=FALSE)
# net.results_out1=net.results_out
#par(mfrow=c(2,1))
#plot(data2[-nn,], type="l", col="blue" )
#lines(net.results_out1$net.result, col="red", ylim=c(min(data2$exp[-nn], net.results_out1$net.result)-1,1+ max(data2$exp[-nn], net.results_out$net.result)) , xlim=c(1, nn2) )
numb=i-5
predict1_in[numb,]=matrix(y.fit, ncol=length(y.fit))
predict1_out[numb,]=matrix(y.fit1, ncol=length(y.fit1))
}
}
nn1=seq(1:nn)
y1_in=x2[1:nn]
y1_out=x2[(nn+1):384]
par(mfrow=c(2,2))
pred_ave_in=colMeans(predict1_in)
pred_ave_out=colMeans(predict1_out)
write.table(matrix(pred_ave_out, ncol=length(pred_ave_out)), "wind prediction_out_ave.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
write.table(matrix(pred_ave_in, ncol=length(pred_ave_in)), "wind prediction_in_ave.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
library(matrixStats)
predict_in_var=colVars(predict1_in)
predict_in_std=colSds(predict1_in)
predict_in_std_high=pred_ave_in +predict_in_std*1.96
predict_in_std_low =pred_ave_in -predict_in_std*1.96
write.table(matrix(predict_in_std_high, ncol=length(predict_in_std_high)), "wind prediction_in_ave high.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
write.table(matrix(predict_in_std_low, ncol=length(predict_in_std_low)), "wind prediction_in_ave low.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
predict_out_var=colVars(predict1_out)
predict_out_std=colSds(predict1_out)
predict_out_std_high=pred_ave_out +predict_out_std*1.96
predict_out_std_low=pred_ave_out -predict_out_std*1.96
write.table(matrix(predict_out_std_high, ncol=length(predict_out_std_high)), "wind prediction_out_ave high.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
write.table(matrix(predict_out_std_high, ncol=length(predict_out_std_high)), "wind prediction_out_ave low.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
ylim_low= min(data2$V1[nn1], y1_out,predict_out_std_low)-1
ylim_high= max(data2$V1[nn1],y1_out,predict_out_std_high)+1
nn1_out=seq(1:nn2)
plot(nn1_out,y1_out,type="l", col="blue" ,lwd = 2,ylim=c(ylim_low, ylim_high), ylab="Wind speed" , xlab="Hour", col.lab="blue" )
title(main = "Out-of-sample prediction (simple mean)", col.main="red")
lines(pred_ave_out, col="red",lwd = 2 )
lines(predict_out_std_low, col="green",lwd = 2 )
lines(predict_out_std_high, col="green",lwd = 2)
pdf("out ofsample mean.pdf")
plot(nn1_out,y1_out,type="l", col="blue" ,lwd = 2,ylim=c(ylim_low, ylim_high), ylab="Wind speed" , xlab="Hour", col.lab="blue" )
title(main = "Out-of-sample prediction (simple mean)", col.main="red")
lines(pred_ave_out, col="red",lwd = 2 )
lines(predict_out_std_low, col="green",lwd = 2 )
lines(predict_out_std_high, col="green",lwd = 2)
dev.off()
#pred_ave_in=colMeans(predict1_in)
ylim_low= min(data2$V1[nn1], y1_in,predict_in_std_low)-1
ylim_high= max(data2$V1[nn1],y1_in,predict_in_std_high)+1
plot(nn1,y1_in, ylim=c(ylim_low, ylim_high) , xlim=c(1, nn), type="l", col="blue", lwd = 2 ,ylab="Wind speed" , xlab="Hour", col.lab="blue" )
title(main = "In-sample prediction (simple mean)",col.main="red")
lines(pred_ave_in, col="red",lwd = 2 )
lines(predict_in_std_low, col="green",lwd = 2 )
lines(predict_in_std_high, col="green",lwd = 2)
pdf("in sample simple mean.pdf")
plot(nn1,y1_in, ylim=c(ylim_low, ylim_high) , xlim=c(1, nn), type="l", col="blue", lwd = 2 ,ylab="Wind speed" , xlab="Hour", col.lab="blue" )
title(main = "In-sample prediction (simple mean)",col.main="red")
lines(pred_ave_in, col="red",lwd = 2 )
lines(predict_in_std_low, col="green",lwd = 2 )
lines(predict_in_std_high, col="green",lwd = 2)
dev.off()
# pred_ave_in=colMeans(predict1_in)
# pred_ave_out=colMeans(predict1_out)
#
# write.table(matrix(pred_ave_out, ncol=length(pred_ave_out)), "wind prediction_out_weight ave.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
#
# write.table(matrix(pred_ave_in, ncol=length(pred_ave_in)), "wind prediction_in_weight ave.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
#
mean_matrix_in=t(matrix(rep(pred_ave_in, rep1), nrow=nn))
temp=(predict1_in-mean_matrix_in)
P=temp%*%t(temp)+diag(rep1)
library(quadprog)
d=matrix(0, nrow=1, ncol=rep1)
A=diag(rep1)
a1=matrix(1, nrow=rep1)
A=cbind(a1, A)
b=matrix(0, ncol=rep1+1)
b[1]=1
sol = solve.QP (P, -d, A, b,meq=1)
weight=sol$solution
pred_out_ave=weight%*%predict1_out
pred_in_ave=weight%*%predict1_in
write.table(matrix(pred_ave_out, ncol=length(pred_ave_out)), "wind prediction_out_weight ave.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
write.table(matrix(pred_ave_in, ncol=length(pred_ave_in)), "wind prediction_in_weight ave.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
# calculate confidence intervals
predict_out_matrix=t(matrix(rep(pred_out_ave[1,],rep1),nrow=nn2, ncol=rep1))
predict_out_std=sqrt( weight%*%(predict_out_matrix-predict1_out)^2/(1-sum(weight^2)))
predict_out_std_high=pred_out_ave[1,] +predict_out_std[1,]*1.96
predict_out_std_low=pred_out_ave[1,] -predict_out_std[1,]*1.96
write.table(matrix(predict_out_std_high, ncol=length(predict_out_std_high)), "wind prediction_out_weight ave high.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
write.table(matrix(predict_out_std_high, ncol=length(predict_out_std_high)), "wind prediction_out_weight ave low.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
ylim_low= min( data2[-nn1,],predict_out_std_low)-1
ylim_high= max(data2[-nn1,],predict_out_std_high)+1
plot(nn1_out, data2[-nn1,], ylim=c(ylim_low, ylim_high) , xlim=c(1, nn2),type="l", col="blue" ,lwd = 2 ,ylab="Wind speed" , xlab="Hour", col.lab="blue")
title(main = "Out-of-sample prediction (weighted mean)",col.main="red")
lines(pred_out_ave[1,], col="red",lwd = 2 )
lines(predict_out_std_low, lwd=2, col="green")
lines(predict_out_std_high, lwd=2,col="green")
pdf("out of sample weighted mean.pdf")
plot(nn1_out, data2[-nn1,], ylim=c(ylim_low, ylim_high) , xlim=c(1, nn2),type="l", col="blue" ,lwd = 2 ,ylab="Wind speed" , xlab="Hour", col.lab="blue")
title(main = "Out-of-sample prediction (weighted mean)",col.main="red")
lines(pred_out_ave[1,], col="red",lwd = 2 )
lines(predict_out_std_low, lwd=2, col="green")
lines(predict_out_std_high, lwd=2,col="green")
dev.off()
predict_in_matrix=t(matrix(rep(pred_in_ave[1,],rep1),nrow=nn, ncol=rep1))
predict_in_std=sqrt( weight%*%(predict_in_matrix-predict1_in)^2/(1-sum(weight^2)))
ylim_low= min( data2[nn1,],predict_in_std_low)-1
ylim_high= max(data2[nn1,],predict_in_std_high)+1
predict_in_std_high=pred_in_ave[1,] +predict_in_std[1,]*1.96
predict_in_std_low=pred_in_ave[1,] -predict_in_std[1,]*1.96
write.table(matrix(predict_in_std_high, ncol=length(predict_in_std_high)), "wind prediction_in_weight ave high.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
write.table(matrix(predict_in_std_low, ncol=length(predict_in_std_low)), "wind prediction_in_weight ave low.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
plot(nn1, data2[nn1,], ylim=c(ylim_low, ylim_high) , xlim=c(1, nn), type="l", col="blue", lwd = 2 ,ylab="Wind speed" , xlab="Hour", col.lab="blue")
title(main = "In-sample prediction (weighted mean)",col.main="red")
lines(pred_in_ave[1,], col="red",lwd = 2)
lines(predict_in_std_low, col="green",lwd = 2)
lines(predict_in_std_high, col="green",lwd = 2)
pdf("in sample weighted mean.pdf")
plot(nn1, data2[nn1,], ylim=c(ylim_low, ylim_high) , xlim=c(1, nn), type="l", col="blue", lwd = 2 ,ylab="Wind speed" , xlab="Hour", col.lab="blue")
title(main = "In-sample prediction (weighted mean)",col.main="red")
lines(pred_in_ave[1,], col="red",lwd = 2)
lines(predict_in_std_low, col="green",lwd = 2)
lines(predict_in_std_high, col="green",lwd = 2)
dev.off()
#
#
# pred_in=weight%*%predict1_in
# plot(data2[nn1,], type="l", col="blue", lwd = 2 ,ylab="Wind speed" , xlab="Hour", col.lab="blue",cex.lab=1.5 )
# title(main = "In sample prediction",col.main="red",cex.main= 1.5)
#
# lines(pred_in[1,], col="red",lwd = 2 , ylim=c(min(data2$exp[nn], pred_in[1,])-1,1+ max(data2$exp[nn],pred_in) , xlim=c(1, 300) ))
#
#
#
# lines(predict_in_std_low, col="green",lwd = 2 , ylim=c(min(data2$exp[nn], pred_in[1,])-1,1+ max(data2$exp[nn],pred_in) , xlim=c(1, 300) ))
#
# lines(predict_in_std_high, col="green",lwd = 2 , ylim=c(min(data2$exp[nn], pred_in[1,])-1,1+ max(data2$exp[nn],pred_in) , xlim=c(1, 300) ))
#
#library(matrixStats)
#predict_in_var=colVars(predict1_in)
#predict_in_std=sqrt()
| /WATCFD_software/Rwindspeed_nnet/ws_nnet_bootstrap.R | permissive | younfor/windPredictSystem | R | false | false | 12,047 | r |
library(nnet)
data1=read.table("T6 NWP.txt" )
data2=read.table("T6 EXP.txt")
x1=data1[,1]
x2=data2[,1]
length(x1)
length(x2)
#x2=2*x2;
#df=data.frame(cbind(x1, x2))
rep1=30 # number of ensemble components
nn=240 # sample size if training data
nn2=384-nn #nn2=84
nn_bootstrap=sample(1:nn, nn,replace=T) ##a sequence for extracting training data from the whole data set
nn1=seq(1:nn)
df=cbind(data1,data2)
colnames(df)=c("y1","y2")
number=rep1*nn2
predict1_out=matrix(seq(1:number), ncol=nn2) # the whole matrix for the out-of-sample prediction
number=rep1*nn
predict1_in=matrix(seq(1:number), ncol=nn) # the whole matrix for the in-sample-prediction
cheese1=df[nn_bootstrap,]
#data_train=df[nn_bootstrap,]
nnfit<-nnet(y2~y1, data=cheese1, size=6,skip=F,trace=F, decay = 5e-4,maxit = 1000,linout=TRUE)
# data_=df[nn1,]
# cheese2=cheese[-nn,c(3,4,5)]
# data_pred_in=matrix(c(12.9, 13.4, 23,25), nrow=2)
# dd=data.frame(data_pred_in)
#
# colnames(dd)=c("y1")
#
# y.fit=predict(nnfit, dd)
#
data_pred_in=df[nn1,1]
data_pred_in=data.frame(data_pred_in)
colnames(data_pred_in)=c("y1")
y.fit=predict(nnfit, data_pred_in)
# y.fit=predict(nnfit, predict1_in)
#
write.table(matrix(y.fit, ncol=length(y.fit)), "wind prediction in.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
data_pred_out=df[-nn1,1]
data_pred_out=data.frame(data_pred_out)
colnames(data_pred_out)=c("y1")
#
# is.data.frame(data_pred_out)
y.fit1=predict(nnfit, data_pred_out)
write.table(matrix(y.fit1, ncol=length(y.fit1)), "wind prediction_out.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
# net.results_out1=net.results_out
#par(mfrow=c(2,1))
#plot(data2[-nn,], type="l", col="blue" )
#lines(net.results_out1$net.result, col="red", ylim=c(min(data2$exp[-nn], net.results_out1$net.result)-1,1+ max(data2$exp[-nn], net.results_out$net.result)) , xlim=c(1, nn2) )
numb=1
predict1_in[numb,]=matrix(y.fit, ncol=length(y.fit))
predict1_out[numb,]=matrix(y.fit1, ncol=length(y.fit1))
for( j in(1:5))
{
mm=rep1+5
for (i in (7:mm))
{
nn_bootstrap=sample(1:nn, nn,replace = TRUE) ##a sequence for extracting training data from the whole data set
data_train=df[nn_bootstrap,]
print (i)
nnfit<-nnet(y2~y1, data=data_train, size=i,skip=F,decay = 5e-4, trace=F,maxit=1000,linout=TRUE)
# data_pred_in=df[nn1,]
# data_pred_in=df[nn1,c(1)]
#
y.fit=predict(nnfit, data_pred_in)
write.table(matrix(y.fit, ncol=length(y.fit)), "wind prediction in.csv",append = T, sep=",", row.names=FALSE, col.names=FALSE)
# data_pred_out=df[-nn1,]
y.fit1=predict(nnfit, data_pred_out)
write.table(matrix(y.fit1, ncol=length(y.fit1)), "wind prediction_out.csv",append = T, sep=",", row.names=FALSE, col.names=FALSE)
# net.results_out1=net.results_out
#par(mfrow=c(2,1))
#plot(data2[-nn,], type="l", col="blue" )
#lines(net.results_out1$net.result, col="red", ylim=c(min(data2$exp[-nn], net.results_out1$net.result)-1,1+ max(data2$exp[-nn], net.results_out$net.result)) , xlim=c(1, nn2) )
numb=i-5
predict1_in[numb,]=matrix(y.fit, ncol=length(y.fit))
predict1_out[numb,]=matrix(y.fit1, ncol=length(y.fit1))
}
}
nn1=seq(1:nn)
y1_in=x2[1:nn]
y1_out=x2[(nn+1):384]
par(mfrow=c(2,2))
pred_ave_in=colMeans(predict1_in)
pred_ave_out=colMeans(predict1_out)
write.table(matrix(pred_ave_out, ncol=length(pred_ave_out)), "wind prediction_out_ave.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
write.table(matrix(pred_ave_in, ncol=length(pred_ave_in)), "wind prediction_in_ave.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
library(matrixStats)
predict_in_var=colVars(predict1_in)
predict_in_std=colSds(predict1_in)
predict_in_std_high=pred_ave_in +predict_in_std*1.96
predict_in_std_low =pred_ave_in -predict_in_std*1.96
write.table(matrix(predict_in_std_high, ncol=length(predict_in_std_high)), "wind prediction_in_ave high.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
write.table(matrix(predict_in_std_low, ncol=length(predict_in_std_low)), "wind prediction_in_ave low.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
predict_out_var=colVars(predict1_out)
predict_out_std=colSds(predict1_out)
predict_out_std_high=pred_ave_out +predict_out_std*1.96
predict_out_std_low=pred_ave_out -predict_out_std*1.96
write.table(matrix(predict_out_std_high, ncol=length(predict_out_std_high)), "wind prediction_out_ave high.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
write.table(matrix(predict_out_std_high, ncol=length(predict_out_std_high)), "wind prediction_out_ave low.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
ylim_low= min(data2$V1[nn1], y1_out,predict_out_std_low)-1
ylim_high= max(data2$V1[nn1],y1_out,predict_out_std_high)+1
nn1_out=seq(1:nn2)
plot(nn1_out,y1_out,type="l", col="blue" ,lwd = 2,ylim=c(ylim_low, ylim_high), ylab="Wind speed" , xlab="Hour", col.lab="blue" )
title(main = "Out-of-sample prediction (simple mean)", col.main="red")
lines(pred_ave_out, col="red",lwd = 2 )
lines(predict_out_std_low, col="green",lwd = 2 )
lines(predict_out_std_high, col="green",lwd = 2)
pdf("out ofsample mean.pdf")
plot(nn1_out,y1_out,type="l", col="blue" ,lwd = 2,ylim=c(ylim_low, ylim_high), ylab="Wind speed" , xlab="Hour", col.lab="blue" )
title(main = "Out-of-sample prediction (simple mean)", col.main="red")
lines(pred_ave_out, col="red",lwd = 2 )
lines(predict_out_std_low, col="green",lwd = 2 )
lines(predict_out_std_high, col="green",lwd = 2)
dev.off()
#pred_ave_in=colMeans(predict1_in)
ylim_low= min(data2$V1[nn1], y1_in,predict_in_std_low)-1
ylim_high= max(data2$V1[nn1],y1_in,predict_in_std_high)+1
plot(nn1,y1_in, ylim=c(ylim_low, ylim_high) , xlim=c(1, nn), type="l", col="blue", lwd = 2 ,ylab="Wind speed" , xlab="Hour", col.lab="blue" )
title(main = "In-sample prediction (simple mean)",col.main="red")
lines(pred_ave_in, col="red",lwd = 2 )
lines(predict_in_std_low, col="green",lwd = 2 )
lines(predict_in_std_high, col="green",lwd = 2)
pdf("in sample simple mean.pdf")
plot(nn1,y1_in, ylim=c(ylim_low, ylim_high) , xlim=c(1, nn), type="l", col="blue", lwd = 2 ,ylab="Wind speed" , xlab="Hour", col.lab="blue" )
title(main = "In-sample prediction (simple mean)",col.main="red")
lines(pred_ave_in, col="red",lwd = 2 )
lines(predict_in_std_low, col="green",lwd = 2 )
lines(predict_in_std_high, col="green",lwd = 2)
dev.off()
# pred_ave_in=colMeans(predict1_in)
# pred_ave_out=colMeans(predict1_out)
#
# write.table(matrix(pred_ave_out, ncol=length(pred_ave_out)), "wind prediction_out_weight ave.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
#
# write.table(matrix(pred_ave_in, ncol=length(pred_ave_in)), "wind prediction_in_weight ave.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
#
mean_matrix_in=t(matrix(rep(pred_ave_in, rep1), nrow=nn))
temp=(predict1_in-mean_matrix_in)
P=temp%*%t(temp)+diag(rep1)
library(quadprog)
d=matrix(0, nrow=1, ncol=rep1)
A=diag(rep1)
a1=matrix(1, nrow=rep1)
A=cbind(a1, A)
b=matrix(0, ncol=rep1+1)
b[1]=1
sol = solve.QP (P, -d, A, b,meq=1)
weight=sol$solution
pred_out_ave=weight%*%predict1_out
pred_in_ave=weight%*%predict1_in
write.table(matrix(pred_ave_out, ncol=length(pred_ave_out)), "wind prediction_out_weight ave.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
write.table(matrix(pred_ave_in, ncol=length(pred_ave_in)), "wind prediction_in_weight ave.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
# calculate confidence intervals
predict_out_matrix=t(matrix(rep(pred_out_ave[1,],rep1),nrow=nn2, ncol=rep1))
predict_out_std=sqrt( weight%*%(predict_out_matrix-predict1_out)^2/(1-sum(weight^2)))
predict_out_std_high=pred_out_ave[1,] +predict_out_std[1,]*1.96
predict_out_std_low=pred_out_ave[1,] -predict_out_std[1,]*1.96
write.table(matrix(predict_out_std_high, ncol=length(predict_out_std_high)), "wind prediction_out_weight ave high.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
write.table(matrix(predict_out_std_high, ncol=length(predict_out_std_high)), "wind prediction_out_weight ave low.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
ylim_low= min( data2[-nn1,],predict_out_std_low)-1
ylim_high= max(data2[-nn1,],predict_out_std_high)+1
plot(nn1_out, data2[-nn1,], ylim=c(ylim_low, ylim_high) , xlim=c(1, nn2),type="l", col="blue" ,lwd = 2 ,ylab="Wind speed" , xlab="Hour", col.lab="blue")
title(main = "Out-of-sample prediction (weighted mean)",col.main="red")
lines(pred_out_ave[1,], col="red",lwd = 2 )
lines(predict_out_std_low, lwd=2, col="green")
lines(predict_out_std_high, lwd=2,col="green")
pdf("out of sample weighted mean.pdf")
plot(nn1_out, data2[-nn1,], ylim=c(ylim_low, ylim_high) , xlim=c(1, nn2),type="l", col="blue" ,lwd = 2 ,ylab="Wind speed" , xlab="Hour", col.lab="blue")
title(main = "Out-of-sample prediction (weighted mean)",col.main="red")
lines(pred_out_ave[1,], col="red",lwd = 2 )
lines(predict_out_std_low, lwd=2, col="green")
lines(predict_out_std_high, lwd=2,col="green")
dev.off()
predict_in_matrix=t(matrix(rep(pred_in_ave[1,],rep1),nrow=nn, ncol=rep1))
predict_in_std=sqrt( weight%*%(predict_in_matrix-predict1_in)^2/(1-sum(weight^2)))
ylim_low= min( data2[nn1,],predict_in_std_low)-1
ylim_high= max(data2[nn1,],predict_in_std_high)+1
predict_in_std_high=pred_in_ave[1,] +predict_in_std[1,]*1.96
predict_in_std_low=pred_in_ave[1,] -predict_in_std[1,]*1.96
write.table(matrix(predict_in_std_high, ncol=length(predict_in_std_high)), "wind prediction_in_weight ave high.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
write.table(matrix(predict_in_std_low, ncol=length(predict_in_std_low)), "wind prediction_in_weight ave low.csv",append = FALSE, sep=",", row.names=FALSE, col.names=FALSE)
plot(nn1, data2[nn1,], ylim=c(ylim_low, ylim_high) , xlim=c(1, nn), type="l", col="blue", lwd = 2 ,ylab="Wind speed" , xlab="Hour", col.lab="blue")
title(main = "In-sample prediction (weighted mean)",col.main="red")
lines(pred_in_ave[1,], col="red",lwd = 2)
lines(predict_in_std_low, col="green",lwd = 2)
lines(predict_in_std_high, col="green",lwd = 2)
pdf("in sample weighted mean.pdf")
plot(nn1, data2[nn1,], ylim=c(ylim_low, ylim_high) , xlim=c(1, nn), type="l", col="blue", lwd = 2 ,ylab="Wind speed" , xlab="Hour", col.lab="blue")
title(main = "In-sample prediction (weighted mean)",col.main="red")
lines(pred_in_ave[1,], col="red",lwd = 2)
lines(predict_in_std_low, col="green",lwd = 2)
lines(predict_in_std_high, col="green",lwd = 2)
dev.off()
#
#
# pred_in=weight%*%predict1_in
# plot(data2[nn1,], type="l", col="blue", lwd = 2 ,ylab="Wind speed" , xlab="Hour", col.lab="blue",cex.lab=1.5 )
# title(main = "In sample prediction",col.main="red",cex.main= 1.5)
#
# lines(pred_in[1,], col="red",lwd = 2 , ylim=c(min(data2$exp[nn], pred_in[1,])-1,1+ max(data2$exp[nn],pred_in) , xlim=c(1, 300) ))
#
#
#
# lines(predict_in_std_low, col="green",lwd = 2 , ylim=c(min(data2$exp[nn], pred_in[1,])-1,1+ max(data2$exp[nn],pred_in) , xlim=c(1, 300) ))
#
# lines(predict_in_std_high, col="green",lwd = 2 , ylim=c(min(data2$exp[nn], pred_in[1,])-1,1+ max(data2$exp[nn],pred_in) , xlim=c(1, 300) ))
#
#library(matrixStats)
#predict_in_var=colVars(predict1_in)
#predict_in_std=sqrt()
|
# 1. Natural logarithm of 5
# 2. Square root of 121
# 3. Absolute value of 10 and -11
# 4. 8 x 7 x 6 x 5 x 4 x 3 x 2 x 1 (the factorial of 8)
# 5. Round pi (3.141593) to 3 decimal places
# 6. The lograithm of 100 to the base 10
# 7. The exponential of 4
# 8. Print your name using the message function
| /ex12.R | no_license | Yuyu19/data-science-for-demography- | R | false | false | 326 | r | # 1. Natural logarithm of 5
# 2. Square root of 121
# 3. Absolute value of 10 and -11
# 4. 8 x 7 x 6 x 5 x 4 x 3 x 2 x 1 (the factorial of 8)
# 5. Round pi (3.141593) to 3 decimal places
# 6. The lograithm of 100 to the base 10
# 7. The exponential of 4
# 8. Print your name using the message function
|
library(reshape)
library(ggplot2)
library(plyr)
library(gtools)
#source('~/Documents/Rscripts/120704-sortDataFrame.R')
# Intialise the package at the end by building a list containing all the functions in this script
# Transpose the 384 well map (tab text) using a python script and output into another file
transposeLinear = function(well384Map, linearMapFile='output.txt') {
pythonCall = paste('~/Documents/Eclipseworkspace/Bioinformatics/Filtering/transposeLinear.py -i', well384Map, '>', linearMapFile, sep=' ')
system(pythonCall)
sampleLabels = read.delim(linearMapFile, header=F)
colnames(sampleLabels) = c('location', 'sample')
return (sampleLabels)
}
#split sample names by whitespace. Takes a dataFrame and splits anything with whitespace into 2 columns
splitSampleName = function(plateMap) {
# The column with sample is the vector containing the sample names you wish to split
splitted = colsplit.factor(plateMap[['sample']], split = " ", names = c('origin', 'gene'))
result = cbind(plateMap, splitted)
return (result)
}
buildDataFrameFromddCT = function(plateMap, CtData) {
#Bind the dataframes containing the sample labels with the raw data itself and remove useless columns
rawData = CtData[,c(3,4,5,8)]
result = merge.data.frame(plateMap, rawData, by.x='location', by.y='Pos')
return (result)
}
extractReplicates <- function (indexes, ctData) {
# Retreive the indexes of the 384 wellplate
indexes = c(1:384)
# Keep only cases with data in them as the merge function doesn't work with NAs
CtData = ctData[complete.cases(ctData[,3]),]
# Subset each Cp into its replicates. Takes a vector with the indexes to to subset and then takes the
# even entries and odd entries separately from the dataframe containing cp values
even = as.character(indexes[indexes%%2 == 0])
odd = as.character(indexes[indexes%%2 == 1])
even = paste('Sample', even, sep=' ')
odd = paste('Sample', odd, sep=' ')
#rep1 = CtData[odd, c(1:6)]
rep1 = CtData[CtData$Name %in% odd, c(1:6)]
#rep1 = rep1[complete.cases(rep1$sample),]
#rep2 = CtData[even, c(1:6)]
rep2 = CtData[CtData$Name %in% even, c(1:6)]
#rep2 = rep2[complete.cases(rep2$sample),]
boundData = merge(rep1, rep2, by.x='sample', by.y='sample')
################ Remove columns that do not add information
usefulData = boundData[,c(1,2,3,4,6,7,11)]
# Compute the mean and the standard deviation of the replicates
usefulData$meanCP = rowMeans(cbind(usefulData$Cp.x, usefulData$Cp.y), na.rm=T)
usefulData$stdDevCP = apply(cbind(usefulData$Cp.x, usefulData$Cp.y), 1, sd, na.rm=T)
# Package the output in a list
result = list(rep1, rep2, usefulData)
return (result)
}
ddCTcalculate = function(geneOfInterest, sampleOfInterest='020_N', houseKeepingGene='GAPDH', referenceSample='020_N', data=rawData)
{
sampleHouse = unique(paste(sampleOfInterest, houseKeepingGene))
sampleGene = paste(sampleOfInterest, geneOfInterest)
# Extract the Cp of the hosue keeping gene
houseCp = data[sampleHouse, 'meanCp']
geneCp = data[sampleGene, 'meanCp']
# dCt calculation for the sample of interest
dCt = houseCp - geneCp
# Extract the meanCP for the reference sample. First get the index of the housekeeping gene, then the gene of interest
refDctRowHouse = paste(referenceSample, houseKeepingGene)
refDctRowGene = paste(referenceSample, geneOfInterest)
# Calculate dCt for the reference sample
referenceSample_dCt = data[refDctRowHouse, 'meanCp'] - data[refDctRowGene, 'meanCp']
# Calculate ddCt
ddCt = dCt - referenceSample_dCt
return (ddCt)
}
foldChangecalculate = function(geneOfInterest, sampleOfInterest='020_N', houseKeepingGene='GAPDH', referenceSample='020_N', data=rawData)
{
sampleHouse = unique(paste(sampleOfInterest, houseKeepingGene))
sampleGene = paste(sampleOfInterest, geneOfInterest)
# Extract the Cp of the hosue keeping gene
houseCp = data[sampleHouse, 'meanCp']
geneCp = data[sampleGene, 'meanCp']
# dCt calculation for the sample of interest
dCt = houseCp - geneCp
# Extract the meanCP for the reference sample. First get the index of the housekeeping gene, then the gene of interest
refDctRowHouse = paste(referenceSample, houseKeepingGene)
refDctRowGene = paste(referenceSample, geneOfInterest)
# Calculate dCt for the reference sample
referenceSample_dCt = data[refDctRowHouse, 'meanCp'] - data[refDctRowGene, 'meanCp']
# Calculate ddCt
ddCt = dCt - referenceSample_dCt
foldChange = 2^ddCt
return (foldChange)
}
plot_ddCt = function(Expressionformula, dataFrame, graphTitle='A grouped barchart', yaxisLabel='y axis') {
# This will make barcharts without error bars
# Expression formula is of the type ddCt ~ cell type or whatever you want the bars to be grouped by
p = barchart(Expressionformula, data = dataFrame, groups = gene.x,
scales = list(x = list(rot=90,cex=0.8)), main = graphTitle, ylab=yaxisLabel,
auto.key=list(space="top", columns=3,
title="genes", cex.title=1))
#returns a plot object that when you look at it plots stuff
return (p)
}
niceGroupedBarPlot <- function (dataFrame, ddCt='ddCt', sampleOrigin="origin.x", gene="gene.x", graphTitle="A pretty plot") {
ggplot(data=dataFrame, aes(x=sampleOrigin, y=ddCt, fill=gene)) +
geom_bar(stat="identity", position=position_dodge(), colour="black") +
scale_fill_hue(name="Gene") + # Set legend title
xlab("Sample") + ylab("ddCt") + # Set axis labels
ggtitle(graphTitle) + # Set title
theme_bw(base_size=18)
}
niceErrorBarPlot <- function (summarisedData, xAxis=gene.x, yAxis=mean, groupVariable=cd133,
title='A title', xLabel='Gene', yLabel='Expression') {
p = ggplot(summarisedData, aes(x=xAxis, y=yAxis, fill=groupVariable)) +
geom_bar(position=position_dodge(), stat="identity") +
geom_errorbar(aes(ymin=yAxis-se, ymax=yAxis+se),
width=.2, # Width of the error bars
position=position_dodge(.9)) +
xlab(xLabel) +
ylab(yLabel) +
scale_fill_hue(name="CD133")+#, Legend label, use darker colors
ggtitle(title) +
scale_y_continuous(breaks=0:20*4) +
# Setting vjust to a negative number moves the asterix up a little bit to make the graph prettier
geom_text(aes(label=star), colour="black", vjust=-2, size=10) +
theme_bw(base_size=16)
return (p)
}
build_ddCTmatrix = function(ddCtFile, originColumn=2, geneColumn=3, ddCtColumn=9, output='matrix.txt') {
# A function to coerce ddCT values and genes into a double matrix for statistical analysis
# origin is the name of the sample eg #020 CD133 negative
pythonCall = paste('./buildNumericMatrix_ddCt.py', '-i', ddCtFile,
'-o', originColumn, '-g', geneColumn, '-d', ddCtColumn,
'>', output, sep=' ')
system(pythonCall)
f = read.delim('matrix.txt', header=T)
# sort the dataframe
g = f[with(f, order(f[,1], decreasing=F)),]
# set rownames then remove
row.names(g) = g[,1]
return (g)
}
summariseStatistics_ddCt <- function (dataFrame, groupVariableA='cd133', gropVariableB='gene.x') {
# Generate N, mean, sd and se statistics for a dataframe
cData = ddply(cd133negPos, c(groupVariableA, gropVariableB), summarise,
N = sum(!is.na(ddCt)),
mean = mean(ddCt, na.rm=TRUE),
sd = sd(ddCt, na.rm=TRUE),
se = sd / sqrt(N) )
# Add a column with stars describing if a test is significant
# percentData$star <- " "
# percentData$star[percentData$adjust < .05] = "*"
# percentData$star[percentData$adjust < .01] <- "**"
# percentData$star[percentData$adjust < .001] <- "***"
return (cData)
# The dataFrame of input should conform to the type below
# sample location origin gene Cp.x location Cp meanCP stdDevCP ddCt cd133
#020_N ATP5G3 B2 020_N ATP5G3 25.68 B3 25.11 25.395 0.40305087 1 negative
#020_N B2M B4 020_N B2M 21.34 B5 21.52 21.430 0.12727922 1 negative
#020_N CREB1 B6 020_N CREB1 26.47 B7 26.22 26.345 0.17677670 1 negative
}
# # Run this at the end to intialise the package
# package.skeleton(name = 'qPCRcustomFunctions',
# list=c('buildDataFrameForddCT', 'ddCTcalculate','extractReplicates',
# 'plot_ddCt', 'splitSampleName', 'transposeLinear', 'cp', 'map'),
# path='~/Documents/Rscripts/', force=F)
# # path='/Library/Frameworks/R.framework/Versions/3.0/Resources/library/', force=F) | /Templates/qPCRFunctions.R | no_license | dvbrown/Rscripts | R | false | false | 8,821 | r | library(reshape)
library(ggplot2)
library(plyr)
library(gtools)
#source('~/Documents/Rscripts/120704-sortDataFrame.R')
# Intialise the package at the end by building a list containing all the functions in this script
# Transpose the 384 well map (tab text) using a python script and output into another file
transposeLinear = function(well384Map, linearMapFile='output.txt') {
pythonCall = paste('~/Documents/Eclipseworkspace/Bioinformatics/Filtering/transposeLinear.py -i', well384Map, '>', linearMapFile, sep=' ')
system(pythonCall)
sampleLabels = read.delim(linearMapFile, header=F)
colnames(sampleLabels) = c('location', 'sample')
return (sampleLabels)
}
#split sample names by whitespace. Takes a dataFrame and splits anything with whitespace into 2 columns
splitSampleName = function(plateMap) {
# The column with sample is the vector containing the sample names you wish to split
splitted = colsplit.factor(plateMap[['sample']], split = " ", names = c('origin', 'gene'))
result = cbind(plateMap, splitted)
return (result)
}
buildDataFrameFromddCT = function(plateMap, CtData) {
#Bind the dataframes containing the sample labels with the raw data itself and remove useless columns
rawData = CtData[,c(3,4,5,8)]
result = merge.data.frame(plateMap, rawData, by.x='location', by.y='Pos')
return (result)
}
extractReplicates <- function (indexes, ctData) {
# Retreive the indexes of the 384 wellplate
indexes = c(1:384)
# Keep only cases with data in them as the merge function doesn't work with NAs
CtData = ctData[complete.cases(ctData[,3]),]
# Subset each Cp into its replicates. Takes a vector with the indexes to to subset and then takes the
# even entries and odd entries separately from the dataframe containing cp values
even = as.character(indexes[indexes%%2 == 0])
odd = as.character(indexes[indexes%%2 == 1])
even = paste('Sample', even, sep=' ')
odd = paste('Sample', odd, sep=' ')
#rep1 = CtData[odd, c(1:6)]
rep1 = CtData[CtData$Name %in% odd, c(1:6)]
#rep1 = rep1[complete.cases(rep1$sample),]
#rep2 = CtData[even, c(1:6)]
rep2 = CtData[CtData$Name %in% even, c(1:6)]
#rep2 = rep2[complete.cases(rep2$sample),]
boundData = merge(rep1, rep2, by.x='sample', by.y='sample')
################ Remove columns that do not add information
usefulData = boundData[,c(1,2,3,4,6,7,11)]
# Compute the mean and the standard deviation of the replicates
usefulData$meanCP = rowMeans(cbind(usefulData$Cp.x, usefulData$Cp.y), na.rm=T)
usefulData$stdDevCP = apply(cbind(usefulData$Cp.x, usefulData$Cp.y), 1, sd, na.rm=T)
# Package the output in a list
result = list(rep1, rep2, usefulData)
return (result)
}
ddCTcalculate = function(geneOfInterest, sampleOfInterest='020_N', houseKeepingGene='GAPDH', referenceSample='020_N', data=rawData)
{
sampleHouse = unique(paste(sampleOfInterest, houseKeepingGene))
sampleGene = paste(sampleOfInterest, geneOfInterest)
# Extract the Cp of the hosue keeping gene
houseCp = data[sampleHouse, 'meanCp']
geneCp = data[sampleGene, 'meanCp']
# dCt calculation for the sample of interest
dCt = houseCp - geneCp
# Extract the meanCP for the reference sample. First get the index of the housekeeping gene, then the gene of interest
refDctRowHouse = paste(referenceSample, houseKeepingGene)
refDctRowGene = paste(referenceSample, geneOfInterest)
# Calculate dCt for the reference sample
referenceSample_dCt = data[refDctRowHouse, 'meanCp'] - data[refDctRowGene, 'meanCp']
# Calculate ddCt
ddCt = dCt - referenceSample_dCt
return (ddCt)
}
foldChangecalculate = function(geneOfInterest, sampleOfInterest='020_N', houseKeepingGene='GAPDH', referenceSample='020_N', data=rawData)
{
sampleHouse = unique(paste(sampleOfInterest, houseKeepingGene))
sampleGene = paste(sampleOfInterest, geneOfInterest)
# Extract the Cp of the hosue keeping gene
houseCp = data[sampleHouse, 'meanCp']
geneCp = data[sampleGene, 'meanCp']
# dCt calculation for the sample of interest
dCt = houseCp - geneCp
# Extract the meanCP for the reference sample. First get the index of the housekeeping gene, then the gene of interest
refDctRowHouse = paste(referenceSample, houseKeepingGene)
refDctRowGene = paste(referenceSample, geneOfInterest)
# Calculate dCt for the reference sample
referenceSample_dCt = data[refDctRowHouse, 'meanCp'] - data[refDctRowGene, 'meanCp']
# Calculate ddCt
ddCt = dCt - referenceSample_dCt
foldChange = 2^ddCt
return (foldChange)
}
plot_ddCt = function(Expressionformula, dataFrame, graphTitle='A grouped barchart', yaxisLabel='y axis') {
# This will make barcharts without error bars
# Expression formula is of the type ddCt ~ cell type or whatever you want the bars to be grouped by
p = barchart(Expressionformula, data = dataFrame, groups = gene.x,
scales = list(x = list(rot=90,cex=0.8)), main = graphTitle, ylab=yaxisLabel,
auto.key=list(space="top", columns=3,
title="genes", cex.title=1))
#returns a plot object that when you look at it plots stuff
return (p)
}
niceGroupedBarPlot <- function (dataFrame, ddCt='ddCt', sampleOrigin="origin.x", gene="gene.x", graphTitle="A pretty plot") {
ggplot(data=dataFrame, aes(x=sampleOrigin, y=ddCt, fill=gene)) +
geom_bar(stat="identity", position=position_dodge(), colour="black") +
scale_fill_hue(name="Gene") + # Set legend title
xlab("Sample") + ylab("ddCt") + # Set axis labels
ggtitle(graphTitle) + # Set title
theme_bw(base_size=18)
}
niceErrorBarPlot <- function (summarisedData, xAxis=gene.x, yAxis=mean, groupVariable=cd133,
title='A title', xLabel='Gene', yLabel='Expression') {
p = ggplot(summarisedData, aes(x=xAxis, y=yAxis, fill=groupVariable)) +
geom_bar(position=position_dodge(), stat="identity") +
geom_errorbar(aes(ymin=yAxis-se, ymax=yAxis+se),
width=.2, # Width of the error bars
position=position_dodge(.9)) +
xlab(xLabel) +
ylab(yLabel) +
scale_fill_hue(name="CD133")+#, Legend label, use darker colors
ggtitle(title) +
scale_y_continuous(breaks=0:20*4) +
# Setting vjust to a negative number moves the asterix up a little bit to make the graph prettier
geom_text(aes(label=star), colour="black", vjust=-2, size=10) +
theme_bw(base_size=16)
return (p)
}
build_ddCTmatrix = function(ddCtFile, originColumn=2, geneColumn=3, ddCtColumn=9, output='matrix.txt') {
# A function to coerce ddCT values and genes into a double matrix for statistical analysis
# origin is the name of the sample eg #020 CD133 negative
pythonCall = paste('./buildNumericMatrix_ddCt.py', '-i', ddCtFile,
'-o', originColumn, '-g', geneColumn, '-d', ddCtColumn,
'>', output, sep=' ')
system(pythonCall)
f = read.delim('matrix.txt', header=T)
# sort the dataframe
g = f[with(f, order(f[,1], decreasing=F)),]
# set rownames then remove
row.names(g) = g[,1]
return (g)
}
summariseStatistics_ddCt <- function (dataFrame, groupVariableA='cd133', gropVariableB='gene.x') {
# Generate N, mean, sd and se statistics for a dataframe
cData = ddply(cd133negPos, c(groupVariableA, gropVariableB), summarise,
N = sum(!is.na(ddCt)),
mean = mean(ddCt, na.rm=TRUE),
sd = sd(ddCt, na.rm=TRUE),
se = sd / sqrt(N) )
# Add a column with stars describing if a test is significant
# percentData$star <- " "
# percentData$star[percentData$adjust < .05] = "*"
# percentData$star[percentData$adjust < .01] <- "**"
# percentData$star[percentData$adjust < .001] <- "***"
return (cData)
# The dataFrame of input should conform to the type below
# sample location origin gene Cp.x location Cp meanCP stdDevCP ddCt cd133
#020_N ATP5G3 B2 020_N ATP5G3 25.68 B3 25.11 25.395 0.40305087 1 negative
#020_N B2M B4 020_N B2M 21.34 B5 21.52 21.430 0.12727922 1 negative
#020_N CREB1 B6 020_N CREB1 26.47 B7 26.22 26.345 0.17677670 1 negative
}
# # Run this at the end to intialise the package
# package.skeleton(name = 'qPCRcustomFunctions',
# list=c('buildDataFrameForddCT', 'ddCTcalculate','extractReplicates',
# 'plot_ddCt', 'splitSampleName', 'transposeLinear', 'cp', 'map'),
# path='~/Documents/Rscripts/', force=F)
# # path='/Library/Frameworks/R.framework/Versions/3.0/Resources/library/', force=F) |
# Day 8 - boot computer with R6
library(tidyverse)
library(R6)
# https://r6.r-lib.org/articles/Introduction.html
# a program is a series of instructions
# A computer runs a program
Computer <-
R6Class("Computer",
public = list(
# binding to be initialized
pgm = NULL,
line = NULL,
acc_g = NULL,
it = NULL,
# called when an instance is created with $new()
initialize = function(pgm = NA) {
self$pgm <- pgm
self$line <- 1
self$acc_g <- 0
# decided to make this a state of the computer, not the program.
# Not sure.
self$it <- rep(0, times = nrow(self$pgm))
},
# called with $run() on the instance
run = function() {
repeat {
line <- self$line
# update state
self$it[line] <- self$it[line] + 1
# oh oh, loop
if(self$it[line] == 2) {
term <- "loop"
break()
}
# read the program op and execute it
# note the use of private$ vs. self$
if(self$pgm$op[line] == "nop") {
private$nop()
} else if(self$pgm$op[line] == "jmp") {
private$jmp(self$pgm$arg[line])
} else {
private$acc(self$pgm$arg[line])
}
# check if done
if(self$line > nrow(self$pgm)) {
term <- "normal"
break() # normal termination
}
}
return(c(self$acc_g, term))
},
print = function() {
print(self$pgm)
}
),
# ops are accessed by run method
private = list(
nop = function() {
self$line <- self$line + 1
},
jmp = function(arg) {
self$line <- self$line + arg
},
acc = function(arg) {
self$acc_g <- self$acc_g + arg
self$line <- self$line + 1
}
)
)
# Test
boot_pgm <-
read_delim(file = "data-naa/input8_test.txt", delim = " ", col_names = c("op", "arg"))
boot_cmp <- Computer$new(pgm = boot_pgm) # create a computer with boot_pgm
boot_cmp$run()
# Part 1
boot_pgm <-
read_delim(file = "data-naa/input8.txt", delim = " ", col_names = c("op", "arg"))
boot_cmp <- Computer$new(pgm = boot_pgm) # create a computer with boot_pgm
boot_cmp$run()
# Part 2
# Part 2
# jmp to nop or nop to jmp
# edit program, run, until "normal" termination
edit_pgm <- function(pgm, line) {
if (pgm$op[line] == "jmp") {
pgm$op[line] <- "nop"
} else if (pgm$op[line] == "nop") {
pgm$op[line] <- "jmp"
} else {
}
return(pgm)
}
fix_pgm <- function(pgm) {
for(line in seq(1:nrow(pgm))) {
pgm_edited <- edit_pgm(pgm, line)
# Only difference with R6
boot_cmp <- Computer$new(pgm = pgm_edited) # create a computer with boot_pgm
termination <- boot_cmp$run()
if(termination[2] == "normal") break()
}
return(termination)
}
fix_pgm(boot_pgm)
| /aoc2020_day8_R6.R | no_license | nalsalam/aoc2020 | R | false | false | 3,000 | r | # Day 8 - boot computer with R6
library(tidyverse)
library(R6)
# https://r6.r-lib.org/articles/Introduction.html
# a program is a series of instructions
# A computer runs a program
Computer <-
R6Class("Computer",
public = list(
# binding to be initialized
pgm = NULL,
line = NULL,
acc_g = NULL,
it = NULL,
# called when an instance is created with $new()
initialize = function(pgm = NA) {
self$pgm <- pgm
self$line <- 1
self$acc_g <- 0
# decided to make this a state of the computer, not the program.
# Not sure.
self$it <- rep(0, times = nrow(self$pgm))
},
# called with $run() on the instance
run = function() {
repeat {
line <- self$line
# update state
self$it[line] <- self$it[line] + 1
# oh oh, loop
if(self$it[line] == 2) {
term <- "loop"
break()
}
# read the program op and execute it
# note the use of private$ vs. self$
if(self$pgm$op[line] == "nop") {
private$nop()
} else if(self$pgm$op[line] == "jmp") {
private$jmp(self$pgm$arg[line])
} else {
private$acc(self$pgm$arg[line])
}
# check if done
if(self$line > nrow(self$pgm)) {
term <- "normal"
break() # normal termination
}
}
return(c(self$acc_g, term))
},
print = function() {
print(self$pgm)
}
),
# ops are accessed by run method
private = list(
nop = function() {
self$line <- self$line + 1
},
jmp = function(arg) {
self$line <- self$line + arg
},
acc = function(arg) {
self$acc_g <- self$acc_g + arg
self$line <- self$line + 1
}
)
)
# Test
boot_pgm <-
read_delim(file = "data-naa/input8_test.txt", delim = " ", col_names = c("op", "arg"))
boot_cmp <- Computer$new(pgm = boot_pgm) # create a computer with boot_pgm
boot_cmp$run()
# Part 1
boot_pgm <-
read_delim(file = "data-naa/input8.txt", delim = " ", col_names = c("op", "arg"))
boot_cmp <- Computer$new(pgm = boot_pgm) # create a computer with boot_pgm
boot_cmp$run()
# Part 2
# Part 2
# jmp to nop or nop to jmp
# edit program, run, until "normal" termination
edit_pgm <- function(pgm, line) {
if (pgm$op[line] == "jmp") {
pgm$op[line] <- "nop"
} else if (pgm$op[line] == "nop") {
pgm$op[line] <- "jmp"
} else {
}
return(pgm)
}
fix_pgm <- function(pgm) {
for(line in seq(1:nrow(pgm))) {
pgm_edited <- edit_pgm(pgm, line)
# Only difference with R6
boot_cmp <- Computer$new(pgm = pgm_edited) # create a computer with boot_pgm
termination <- boot_cmp$run()
if(termination[2] == "normal") break()
}
return(termination)
}
fix_pgm(boot_pgm)
|
#' Parsimony score of random postorder tree
#'
#' @param nTip number of tips (minimum 3)
#' @template morphyObjParam
#'
#' @return the parsimony score of a random tree, for the given Morphy object.
#'
#' @export
RandomTreeScore <- function (nTip, morphyObj) {
if (nTip < 3) {
warning("nTip < 3 not implemented, as there's only one unrooted topology.")
return (0)
}
# Return:
.Call('RANDOM_TREE_SCORE', as.integer(nTip), morphyObj)
}
#' Random postorder tree
#'
#' @param nTip Integer specifying the number of tips to include in the tree
#' (minimum 2).
#'
#' @return A list with three elements, each a vector of integers, respectively
#' containing:
#'
#' - The parent of each tip and node, in order
#'
#' - The left child of each node
#'
#' - The right child of each node.
#'
#' @family tree generation functions
#' @export
RandomMorphyTree <- function (nTip) {
if (nTip < 2) {
stop("nTip < 2 not implemented: a tip is not a tree.")
}
# Return:
.Call('RANDOM_TREE', as.integer(nTip))
}
#' @importFrom graphics plot
plot.morphyTree <- function (morphyTree) {
parentOf <- morphyTree[[1]]
left <- morphyTree[[2]]
right <- morphyTree[[3]]
nTip <- length(left) + 1L
edge <- matrix(c(rep(seq(nTip, len=nTip - 1L), 2), right, left), ncol=2) + 1L
tree <- structure(list(edge=edge, Nnode=nTip - 1L, tip.label=seq_len(nTip) - 1L),
class = 'phylo')
plot(tree)
}
| /R/RandomTreeScore.R | no_license | gitter-badger/TreeSearch | R | false | false | 1,448 | r | #' Parsimony score of random postorder tree
#'
#' @param nTip number of tips (minimum 3)
#' @template morphyObjParam
#'
#' @return the parsimony score of a random tree, for the given Morphy object.
#'
#' @export
RandomTreeScore <- function (nTip, morphyObj) {
if (nTip < 3) {
warning("nTip < 3 not implemented, as there's only one unrooted topology.")
return (0)
}
# Return:
.Call('RANDOM_TREE_SCORE', as.integer(nTip), morphyObj)
}
#' Random postorder tree
#'
#' @param nTip Integer specifying the number of tips to include in the tree
#' (minimum 2).
#'
#' @return A list with three elements, each a vector of integers, respectively
#' containing:
#'
#' - The parent of each tip and node, in order
#'
#' - The left child of each node
#'
#' - The right child of each node.
#'
#' @family tree generation functions
#' @export
RandomMorphyTree <- function (nTip) {
if (nTip < 2) {
stop("nTip < 2 not implemented: a tip is not a tree.")
}
# Return:
.Call('RANDOM_TREE', as.integer(nTip))
}
#' @importFrom graphics plot
plot.morphyTree <- function (morphyTree) {
parentOf <- morphyTree[[1]]
left <- morphyTree[[2]]
right <- morphyTree[[3]]
nTip <- length(left) + 1L
edge <- matrix(c(rep(seq(nTip, len=nTip - 1L), 2), right, left), ncol=2) + 1L
tree <- structure(list(edge=edge, Nnode=nTip - 1L, tip.label=seq_len(nTip) - 1L),
class = 'phylo')
plot(tree)
}
|
# input으로 Pooled vehicles이 들어왔을 때 각 차량들의 좌표값을 계산
# 이 좌표값에 따라 차량들이 움직일 수 있도록
# X_intra_relo; Y_intra_relo
IntraZonalPositionCalculator <- function(PoolVehicle,type="intra_s1",time){
if (type=="intra_s1"){
num_pod_per_grid<-data.frame(table(PoolVehicle$grid_id))
colnames(num_pod_per_grid)<-c("grid_id","num_pod")
num_pod_per_grid$grid_id<-as.numeric(as.character(num_pod_per_grid$grid_id))
PoolVehicle_output<-NULL
for (k in 1:nrow(num_pod_per_grid)){
id_tmp<-num_pod_per_grid$grid_id[k]
PoolVehicle_tmp<- PoolVehicle %>% filter(grid_id==id_tmp)
destination<-PodCoord[[nrow(PoolVehicle_tmp)]]
destination$x <-destination$x+ grid %>% filter(dprt_grid==id_tmp) %>% pull(X_lower_left)
destination$y <-destination$y+ grid %>% filter(dprt_grid==id_tmp) %>% pull(Y_lower_left)
X_intra_relo<-NULL
Y_intra_relo<-NULL
for (m in 1:nrow(PoolVehicle_tmp)){
vehicle_position_match <- which.min(abs(destination$x-PoolVehicle_tmp[m,]$X)+
abs(destination$y-PoolVehicle_tmp[m,]$Y))
X_intra_tmp <- destination$x[vehicle_position_match]
Y_intra_tmp <- destination$y[vehicle_position_match]
destination <- destination[-vehicle_position_match,]
X_intra_relo<-c(X_intra_relo,X_intra_tmp)
Y_intra_relo<-c(Y_intra_relo,Y_intra_tmp)
}
PoolVehicle_tmp$X_intra_relo<-X_intra_relo
PoolVehicle_tmp$Y_intra_relo<-Y_intra_relo
PoolVehicle_output <- rbind(PoolVehicle_output,PoolVehicle_tmp)}
return(PoolVehicle_output)} else if (type=="intra_s2"){
# Pod id를 만들고 pod의 차대수를 모니터링 해야함
# Pod사이의 차량수가 균형에 맞도록
num_pod_per_grid<-PodInfo[[time]]
target_grid<-sort(unique(PoolVehicle$grid_id))
PoolVehicle_output<-NULL
pod_info_update<-NULL
for (k in 1:length(target_grid)){
id_tmp<-target_grid[k]
PoolVehicle_tmp<- PoolVehicle %>% filter(grid_id==id_tmp)
destination<-num_pod_per_grid %>% filter(grid_id==id_tmp)
X_intra_relo<-NULL
Y_intra_relo<-NULL
# Pod에 있는 차량의 수가 가장 적은곳으로 차를 보냄 >> pod에 있는 차량의 균형을 유지
# 여러 후보가 있으면 가장 가까운 곳으로 보냄
for (m in 1:nrow(PoolVehicle_tmp)){
min_num_pod <- min(destination$num_veh)
destination_tmp <- destination %>% filter(num_veh==min_num_pod)
vehicle_position_match <- which.min(abs(destination_tmp$x-PoolVehicle_tmp[m,]$X)+
abs(destination_tmp$y-PoolVehicle_tmp[m,]$Y))
X_intra_tmp <- destination_tmp$x[vehicle_position_match]
Y_intra_tmp <- destination_tmp$y[vehicle_position_match]
pod_id_tmp <- destination_tmp$pod_id[vehicle_position_match]
destination[destination$pod_id==pod_id_tmp,]$num_veh <- destination[destination$pod_id==pod_id_tmp,]$num_veh+1
X_intra_relo<-c(X_intra_relo,X_intra_tmp)
Y_intra_relo<-c(Y_intra_relo,Y_intra_tmp)
}
PoolVehicle_tmp$X_intra_relo<-X_intra_relo
PoolVehicle_tmp$Y_intra_relo<-Y_intra_relo
PoolVehicle_output <- rbind(PoolVehicle_output,PoolVehicle_tmp)
}
return(PoolVehicle_output)
}
}
| /module/utils/IntraZonalPositionCalculator.R | no_license | jihoyeo/taxi-relocation | R | false | false | 3,569 | r | # input으로 Pooled vehicles이 들어왔을 때 각 차량들의 좌표값을 계산
# 이 좌표값에 따라 차량들이 움직일 수 있도록
# X_intra_relo; Y_intra_relo
IntraZonalPositionCalculator <- function(PoolVehicle,type="intra_s1",time){
if (type=="intra_s1"){
num_pod_per_grid<-data.frame(table(PoolVehicle$grid_id))
colnames(num_pod_per_grid)<-c("grid_id","num_pod")
num_pod_per_grid$grid_id<-as.numeric(as.character(num_pod_per_grid$grid_id))
PoolVehicle_output<-NULL
for (k in 1:nrow(num_pod_per_grid)){
id_tmp<-num_pod_per_grid$grid_id[k]
PoolVehicle_tmp<- PoolVehicle %>% filter(grid_id==id_tmp)
destination<-PodCoord[[nrow(PoolVehicle_tmp)]]
destination$x <-destination$x+ grid %>% filter(dprt_grid==id_tmp) %>% pull(X_lower_left)
destination$y <-destination$y+ grid %>% filter(dprt_grid==id_tmp) %>% pull(Y_lower_left)
X_intra_relo<-NULL
Y_intra_relo<-NULL
for (m in 1:nrow(PoolVehicle_tmp)){
vehicle_position_match <- which.min(abs(destination$x-PoolVehicle_tmp[m,]$X)+
abs(destination$y-PoolVehicle_tmp[m,]$Y))
X_intra_tmp <- destination$x[vehicle_position_match]
Y_intra_tmp <- destination$y[vehicle_position_match]
destination <- destination[-vehicle_position_match,]
X_intra_relo<-c(X_intra_relo,X_intra_tmp)
Y_intra_relo<-c(Y_intra_relo,Y_intra_tmp)
}
PoolVehicle_tmp$X_intra_relo<-X_intra_relo
PoolVehicle_tmp$Y_intra_relo<-Y_intra_relo
PoolVehicle_output <- rbind(PoolVehicle_output,PoolVehicle_tmp)}
return(PoolVehicle_output)} else if (type=="intra_s2"){
# Pod id를 만들고 pod의 차대수를 모니터링 해야함
# Pod사이의 차량수가 균형에 맞도록
num_pod_per_grid<-PodInfo[[time]]
target_grid<-sort(unique(PoolVehicle$grid_id))
PoolVehicle_output<-NULL
pod_info_update<-NULL
for (k in 1:length(target_grid)){
id_tmp<-target_grid[k]
PoolVehicle_tmp<- PoolVehicle %>% filter(grid_id==id_tmp)
destination<-num_pod_per_grid %>% filter(grid_id==id_tmp)
X_intra_relo<-NULL
Y_intra_relo<-NULL
# Pod에 있는 차량의 수가 가장 적은곳으로 차를 보냄 >> pod에 있는 차량의 균형을 유지
# 여러 후보가 있으면 가장 가까운 곳으로 보냄
for (m in 1:nrow(PoolVehicle_tmp)){
min_num_pod <- min(destination$num_veh)
destination_tmp <- destination %>% filter(num_veh==min_num_pod)
vehicle_position_match <- which.min(abs(destination_tmp$x-PoolVehicle_tmp[m,]$X)+
abs(destination_tmp$y-PoolVehicle_tmp[m,]$Y))
X_intra_tmp <- destination_tmp$x[vehicle_position_match]
Y_intra_tmp <- destination_tmp$y[vehicle_position_match]
pod_id_tmp <- destination_tmp$pod_id[vehicle_position_match]
destination[destination$pod_id==pod_id_tmp,]$num_veh <- destination[destination$pod_id==pod_id_tmp,]$num_veh+1
X_intra_relo<-c(X_intra_relo,X_intra_tmp)
Y_intra_relo<-c(Y_intra_relo,Y_intra_tmp)
}
PoolVehicle_tmp$X_intra_relo<-X_intra_relo
PoolVehicle_tmp$Y_intra_relo<-Y_intra_relo
PoolVehicle_output <- rbind(PoolVehicle_output,PoolVehicle_tmp)
}
return(PoolVehicle_output)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aggregate.R
\name{aggregate.stars}
\alias{aggregate.stars}
\title{spatially or temporally aggregate stars object}
\usage{
\method{aggregate}{stars}(x, by, FUN, ..., drop = FALSE,
join = st_intersects, as_points = any(st_dimension(by) == 2, na.rm =
TRUE), rightmost.closed = FALSE, left.open = FALSE)
}
\arguments{
\item{x}{object of class \code{stars} with information to be aggregated}
\item{by}{object of class \code{sf} or \code{sfc} for spatial aggregation, for temporal aggregation a vector with time values (\code{Date}, \code{POSIXct}, or \code{PCICt}) that is interpreted as a sequence of left-closed, right-open time intervals or a string like "months", "5 days" or the like (see \link{cut.POSIXt}); if by is an object of class \code{stars}, it is converted to sfc by \code{st_as_sfc(by, as_points = FALSE)} thus ignoring its time component.}
\item{FUN}{aggregation function, such as \code{mean}}
\item{...}{arguments passed on to \code{FUN}, such as \code{na.rm=TRUE}}
\item{drop}{logical; ignored}
\item{join}{join function to find matches of x to by}
\item{as_points}{see \link[stars]{st_as_sf}: shall raster pixels be taken as points, or small square polygons?}
\item{rightmost.closed}{see \link{findInterval}}
\item{left.open}{logical; used for time intervals, see \link{findInterval} and \link{cut.POSIXt}}
}
\description{
spatially or temporally aggregate stars object, returning a data cube with lower spatial or temporal resolution
}
\examples{
# aggregate time dimension in format Date
tif = system.file("tif/L7_ETMs.tif", package = "stars")
t1 = as.Date("2018-07-31")
x = read_stars(c(tif, tif, tif, tif), along = list(time = c(t1, t1+1, t1+2, t1+3)))[,1:30,1:30]
st_get_dimension_values(x, "time")
x_agg_time = aggregate(x, by = t1 + c(0, 2, 4), FUN = max)
# aggregate time dimension in format Date - interval
by_t = "2 days"
x_agg_time2 = aggregate(x, by = by_t, FUN = max)
st_get_dimension_values(x_agg_time2, "time")
x_agg_time - x_agg_time2
# aggregate time dimension in format POSIXct
x = st_set_dimensions(x, 4, values = as.POSIXct(c("2018-07-31",
"2018-08-01",
"2018-08-02",
"2018-08-03")),
names = "time")
by_t = as.POSIXct(c("2018-07-31", "2018-08-02"))
x_agg_posix = aggregate(x, by = by_t, FUN = max)
st_get_dimension_values(x_agg_posix, "time")
x_agg_time - x_agg_posix
}
| /man/aggregate.stars.Rd | permissive | flahn/stars | R | false | true | 2,575 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aggregate.R
\name{aggregate.stars}
\alias{aggregate.stars}
\title{spatially or temporally aggregate stars object}
\usage{
\method{aggregate}{stars}(x, by, FUN, ..., drop = FALSE,
join = st_intersects, as_points = any(st_dimension(by) == 2, na.rm =
TRUE), rightmost.closed = FALSE, left.open = FALSE)
}
\arguments{
\item{x}{object of class \code{stars} with information to be aggregated}
\item{by}{object of class \code{sf} or \code{sfc} for spatial aggregation, for temporal aggregation a vector with time values (\code{Date}, \code{POSIXct}, or \code{PCICt}) that is interpreted as a sequence of left-closed, right-open time intervals or a string like "months", "5 days" or the like (see \link{cut.POSIXt}); if by is an object of class \code{stars}, it is converted to sfc by \code{st_as_sfc(by, as_points = FALSE)} thus ignoring its time component.}
\item{FUN}{aggregation function, such as \code{mean}}
\item{...}{arguments passed on to \code{FUN}, such as \code{na.rm=TRUE}}
\item{drop}{logical; ignored}
\item{join}{join function to find matches of x to by}
\item{as_points}{see \link[stars]{st_as_sf}: shall raster pixels be taken as points, or small square polygons?}
\item{rightmost.closed}{see \link{findInterval}}
\item{left.open}{logical; used for time intervals, see \link{findInterval} and \link{cut.POSIXt}}
}
\description{
spatially or temporally aggregate stars object, returning a data cube with lower spatial or temporal resolution
}
\examples{
# aggregate time dimension in format Date
tif = system.file("tif/L7_ETMs.tif", package = "stars")
t1 = as.Date("2018-07-31")
x = read_stars(c(tif, tif, tif, tif), along = list(time = c(t1, t1+1, t1+2, t1+3)))[,1:30,1:30]
st_get_dimension_values(x, "time")
x_agg_time = aggregate(x, by = t1 + c(0, 2, 4), FUN = max)
# aggregate time dimension in format Date - interval
by_t = "2 days"
x_agg_time2 = aggregate(x, by = by_t, FUN = max)
st_get_dimension_values(x_agg_time2, "time")
x_agg_time - x_agg_time2
# aggregate time dimension in format POSIXct
x = st_set_dimensions(x, 4, values = as.POSIXct(c("2018-07-31",
"2018-08-01",
"2018-08-02",
"2018-08-03")),
names = "time")
by_t = as.POSIXct(c("2018-07-31", "2018-08-02"))
x_agg_posix = aggregate(x, by = by_t, FUN = max)
st_get_dimension_values(x_agg_posix, "time")
x_agg_time - x_agg_posix
}
|
library("testthat")
library("syberiaMungebits2")
test_check("syberiaMungebits2")
| /tests/test-all.R | permissive | syberia/syberiaMungebits2 | R | false | false | 81 | r | library("testthat")
library("syberiaMungebits2")
test_check("syberiaMungebits2")
|
#' Tibble for intervals.
#'
#' Required column names are `chrom`, `start` and `end`.
#'
#' @param x A `data_frame`
#' @param ... params for [tibble::tibble()]
#' @param .validate check valid column names
#'
#' @rdname tbl_interval
#'
#' @examples
#' x <- tibble::tribble(
#' ~chrom, ~start, ~end,
#' 'chr1', 1, 50,
#' 'chr1', 10, 75,
#' 'chr1', 100, 120
#' )
#'
#' is.tbl_interval(x)
#'
#' x <- tbl_interval(x)
#' is.tbl_interval(x)
#'
#' @export
tbl_interval <- function(x, ..., .validate = TRUE) {
if(tibble::is_tibble(x)){
out <- x
} else {
out <- tibble::as_tibble(x, ...)
}
if (.validate) {
out <- check_interval(out)
}
class(out) <- union("tbl_ivl", class(out))
out
}
#' Coerce objects to tbl_intervals.
#'
#' This is an S3 generic. valr includes methods to coerce tbl_df and GRanges
#' objects.
#'
#' @param x object to convert to tbl_interval.
#'
#' @return [tbl_interval()]
#'
#' @examples
#' \dontrun{
#' gr <- GenomicRanges::GRanges(
#' seqnames = S4Vectors::Rle(
#' c("chr1", "chr2", "chr1", "chr3"),
#' c(1, 1, 1, 1)),
#' ranges = IRanges::IRanges(
#' start = c(1, 10, 50, 100),
#' end = c(100, 500, 1000, 2000),
#' names = head(letters, 4)),
#' strand = S4Vectors::Rle(
#' c("-", "+"), c(2, 2))
#' )
#'
#' as.tbl_interval(gr)
#'
#' # There are two ways to convert a tbl_interval to GRanges:
#'
#' gr <- GenomicRanges::GRanges(
#' seqnames = S4Vectors::Rle(x$chrom),
#' ranges = IRanges::IRanges(
#' start = x$start + 1,
#' end = x$end,
#' names = x$name),
#' strand = S4Vectors::Rle(x$strand)
#' )
#' # or:
#'
#' gr <- GenomicRanges::makeGRangesFromDataFrame(dplyr::mutate(x, start = start +1))
#'
#' }
#'
#' @export
as.tbl_interval <- function(x) {
UseMethod("as.tbl_interval")
}
#' @export
#' @rdname as.tbl_interval
as.tbl_interval.tbl_df <- function(x) {
tbl_interval(x)
}
#' @export
#' @rdname as.tbl_interval
as.tbl_interval.data.frame <- function(x) {
tbl_interval(x)
}
#' @export
#' @rdname as.tbl_interval
as.tbl_interval.GRanges <- function(x) {
# https://www.biostars.org/p/89341/
res <- tibble(
chrom = as.character(x@seqnames),
start = x@ranges@start - 1,
end = x@ranges@start - 1 + x@ranges@width,
name = rep(".", length(x)),
score = rep(".", length(x)),
strand = as.character(x@strand)
)
res <- mutate(res, strand = ifelse(strand == "*", ".", strand))
tbl_interval(res)
}
#' Construct a tbl_interval using tribble formatting.
#'
#' @rdname tbl_interval
#'
#' @return [tbl_interval()]
#
#' @export
trbl_interval <- function(...) {
out <- tibble::tribble(...)
out <- as.tbl_interval(out)
out
}
#' Test if the object is a tbl_interval.
#'
#' @param x An object
#' @return `TRUE` if the object inherits from the [tbl_interval()] class.
#' @export
is.tbl_interval <- function(x) {
"tbl_ivl" %in% class(x)
}
#' Tibble for reference sizes.
#'
#' Equivalent to information in UCSC "chromSizes" files. Required column names are:
#' `chrom` and `size`
#'
#' @param x A `data_frame`
#' @param ... params for [tibble::tibble()]
#' @param .validate check valid column names
#'
#' @rdname tbl_genome
#'
#' @examples
#' genome <- tibble::tribble(
#' ~chrom, ~size,
#' 'chr1', 1e6,
#' 'chr2', 1e7
#' )
#'
#' is.tbl_genome(genome)
#' genome <- tbl_genome(genome)
#' is.tbl_genome(genome)
#'
#' @export
tbl_genome <- function(x, ..., .validate = TRUE) {
out <- tibble::as_tibble(x, ...)
if (.validate) {
out <- check_genome(out)
}
class(out) <- union("tbl_gnm", class(out))
out
}
#' Coerce objects to tbl_genome.
#'
#' This is an S3 generic. valr includes methods to coerce tbl_df and data.frame
#' objects.
#'
#' @param x object to convert to tbl_genome.
#'
#' @return [tbl_genome()]
#'
#' @export
as.tbl_genome <- function(x) {
UseMethod("as.tbl_genome")
}
#' @export
#' @rdname as.tbl_genome
as.tbl_genome.tbl_df <- function(x) {
tbl_genome(x)
}
#' @export
#' @rdname as.tbl_genome
as.tbl_genome.data.frame <- function(x) {
tbl_genome(x)
}
#' Construct a tbl_genome using tribble formatting.
#'
#' @return [tbl_genome()]
#'
#' @rdname tbl_genome
#'
#' @examples
#' trbl_genome(
#' ~chrom, ~size,
#' 'chr1', 1e6
#' )
#'
#' @export
trbl_genome <- function(...) {
out <- tibble::tribble(...)
out <- tbl_genome(out)
out
}
#' Test if the object is a tbl_genome.
#'
#' @param x An object
#' @return `TRUE` if the object inherits from the [tbl_genome()] class.
#' @export
is.tbl_genome <- function(x) {
"tbl_gnm" %in% class(x)
}
# Validity checks ---------------------------------------------------
check_interval <- function(x) {
expect_names <- c("chrom", "start", "end")
check_names(x, expect_names)
x
}
check_genome <- function(x) {
expect_names <- c("chrom", "size")
check_names(x, expect_names)
# check for unique refs
chroms <- x[["chrom"]]
dups <- duplicated(chroms)
if (any(dups)) {
stop(sprintf(
"duplicate chroms in genome: %s",
paste0(chroms[dups], collapse = ", ")
))
}
x
}
check_names <- function(x, expected) {
missing <- setdiff(expected, names(x))
if (length(missing) != 0) {
stop(sprintf(
"expected %d required names, missing: %s",
length(expected),
paste0(missing, collapse = ", ")
))
}
}
| /R/tbls.r | permissive | romainfrancois/valr | R | false | false | 5,501 | r | #' Tibble for intervals.
#'
#' Required column names are `chrom`, `start` and `end`.
#'
#' @param x A `data_frame`
#' @param ... params for [tibble::tibble()]
#' @param .validate check valid column names
#'
#' @rdname tbl_interval
#'
#' @examples
#' x <- tibble::tribble(
#' ~chrom, ~start, ~end,
#' 'chr1', 1, 50,
#' 'chr1', 10, 75,
#' 'chr1', 100, 120
#' )
#'
#' is.tbl_interval(x)
#'
#' x <- tbl_interval(x)
#' is.tbl_interval(x)
#'
#' @export
tbl_interval <- function(x, ..., .validate = TRUE) {
if(tibble::is_tibble(x)){
out <- x
} else {
out <- tibble::as_tibble(x, ...)
}
if (.validate) {
out <- check_interval(out)
}
class(out) <- union("tbl_ivl", class(out))
out
}
#' Coerce objects to tbl_intervals.
#'
#' This is an S3 generic. valr includes methods to coerce tbl_df and GRanges
#' objects.
#'
#' @param x object to convert to tbl_interval.
#'
#' @return [tbl_interval()]
#'
#' @examples
#' \dontrun{
#' gr <- GenomicRanges::GRanges(
#' seqnames = S4Vectors::Rle(
#' c("chr1", "chr2", "chr1", "chr3"),
#' c(1, 1, 1, 1)),
#' ranges = IRanges::IRanges(
#' start = c(1, 10, 50, 100),
#' end = c(100, 500, 1000, 2000),
#' names = head(letters, 4)),
#' strand = S4Vectors::Rle(
#' c("-", "+"), c(2, 2))
#' )
#'
#' as.tbl_interval(gr)
#'
#' # There are two ways to convert a tbl_interval to GRanges:
#'
#' gr <- GenomicRanges::GRanges(
#' seqnames = S4Vectors::Rle(x$chrom),
#' ranges = IRanges::IRanges(
#' start = x$start + 1,
#' end = x$end,
#' names = x$name),
#' strand = S4Vectors::Rle(x$strand)
#' )
#' # or:
#'
#' gr <- GenomicRanges::makeGRangesFromDataFrame(dplyr::mutate(x, start = start +1))
#'
#' }
#'
#' @export
as.tbl_interval <- function(x) {
UseMethod("as.tbl_interval")
}
#' @export
#' @rdname as.tbl_interval
as.tbl_interval.tbl_df <- function(x) {
tbl_interval(x)
}
#' @export
#' @rdname as.tbl_interval
as.tbl_interval.data.frame <- function(x) {
tbl_interval(x)
}
#' @export
#' @rdname as.tbl_interval
as.tbl_interval.GRanges <- function(x) {
# https://www.biostars.org/p/89341/
res <- tibble(
chrom = as.character(x@seqnames),
start = x@ranges@start - 1,
end = x@ranges@start - 1 + x@ranges@width,
name = rep(".", length(x)),
score = rep(".", length(x)),
strand = as.character(x@strand)
)
res <- mutate(res, strand = ifelse(strand == "*", ".", strand))
tbl_interval(res)
}
#' Construct a tbl_interval using tribble formatting.
#'
#' @rdname tbl_interval
#'
#' @return [tbl_interval()]
#
#' @export
trbl_interval <- function(...) {
out <- tibble::tribble(...)
out <- as.tbl_interval(out)
out
}
#' Test if the object is a tbl_interval.
#'
#' @param x An object
#' @return `TRUE` if the object inherits from the [tbl_interval()] class.
#' @export
is.tbl_interval <- function(x) {
"tbl_ivl" %in% class(x)
}
#' Tibble for reference sizes.
#'
#' Equivalent to information in UCSC "chromSizes" files. Required column names are:
#' `chrom` and `size`
#'
#' @param x A `data_frame`
#' @param ... params for [tibble::tibble()]
#' @param .validate check valid column names
#'
#' @rdname tbl_genome
#'
#' @examples
#' genome <- tibble::tribble(
#' ~chrom, ~size,
#' 'chr1', 1e6,
#' 'chr2', 1e7
#' )
#'
#' is.tbl_genome(genome)
#' genome <- tbl_genome(genome)
#' is.tbl_genome(genome)
#'
#' @export
tbl_genome <- function(x, ..., .validate = TRUE) {
out <- tibble::as_tibble(x, ...)
if (.validate) {
out <- check_genome(out)
}
class(out) <- union("tbl_gnm", class(out))
out
}
#' Coerce objects to tbl_genome.
#'
#' This is an S3 generic. valr includes methods to coerce tbl_df and data.frame
#' objects.
#'
#' @param x object to convert to tbl_genome.
#'
#' @return [tbl_genome()]
#'
#' @export
as.tbl_genome <- function(x) {
UseMethod("as.tbl_genome")
}
#' @export
#' @rdname as.tbl_genome
as.tbl_genome.tbl_df <- function(x) {
tbl_genome(x)
}
#' @export
#' @rdname as.tbl_genome
as.tbl_genome.data.frame <- function(x) {
tbl_genome(x)
}
#' Construct a tbl_genome using tribble formatting.
#'
#' @return [tbl_genome()]
#'
#' @rdname tbl_genome
#'
#' @examples
#' trbl_genome(
#' ~chrom, ~size,
#' 'chr1', 1e6
#' )
#'
#' @export
trbl_genome <- function(...) {
out <- tibble::tribble(...)
out <- tbl_genome(out)
out
}
#' Test if the object is a tbl_genome.
#'
#' @param x An object
#' @return `TRUE` if the object inherits from the [tbl_genome()] class.
#' @export
is.tbl_genome <- function(x) {
"tbl_gnm" %in% class(x)
}
# Validity checks ---------------------------------------------------
check_interval <- function(x) {
expect_names <- c("chrom", "start", "end")
check_names(x, expect_names)
x
}
check_genome <- function(x) {
expect_names <- c("chrom", "size")
check_names(x, expect_names)
# check for unique refs
chroms <- x[["chrom"]]
dups <- duplicated(chroms)
if (any(dups)) {
stop(sprintf(
"duplicate chroms in genome: %s",
paste0(chroms[dups], collapse = ", ")
))
}
x
}
check_names <- function(x, expected) {
missing <- setdiff(expected, names(x))
if (length(missing) != 0) {
stop(sprintf(
"expected %d required names, missing: %s",
length(expected),
paste0(missing, collapse = ", ")
))
}
}
|
# Tutorial "Cómo agrupar factores"
pacman::p_load('tidyverse')
# NOTA: un factor es una estructura de datos en R que le permite crear un conjunto de categorías. Llamamos a estas categorías niveles. Es bien sabido en la literatura psicológica que solo podemos almacenar un cierto número de cosas en nuestra memoria de trabajo. Por lo tanto, para ayudar a las personas a entender las categorías, no deberíamos mostrar demasiadas. Aquí es donde se muestra la fuerza de los factores de agrupamiento. Agrupar no es más que combinar niveles de factores en una categoría nueva y más grande. Digamos que queremos visualizar cuántos músicos llegaron al top 100 en 2000. Aquí hay un gráfico de barras que muestra cómo se ve esto:
billboard %>%
ggplot(aes(y = artist)) +
geom_bar()
# NOTA: la anterior visualización es completamente incomprensible. La función fct_lump viene a nuestro rescate:
billboard %>%
mutate(artist = fct_lump(as_factor(artist), 10)) %>%
filter(artist != 'Other') %>%
ggplot(aes(y = artist)) +
geom_bar()
# NOTA: proporcionamos fct_lump con un número. El número indica, bueno, ¿qué? 10 ciertamente no indica los niveles restantes que no fueron categorizados como “Otros”. Tampoco son 10 los niveles que juntamos. Resulta que fct_lump es una función bastante oscura porque elige diferentes métodos en función de sus argumentos. El equipo de tidyverse ya no recomienda el uso de esta función. Es por eso que se crearon cuatro funciones nuevas en 2020, que veremos a continuación:
# - fct_lump_min.
# - fct_lump_n.
# - fct_lump_prop.
# - fct_lump_lowfreq.
## La función fct_lump_min para agrupar niveles que ocurren no más de un mínimo de veces ----
billboard %>%
mutate(artist = fct_lump_min(as_factor(artist), 3)) %>% # fct_lump_min resume todos los niveles que no aparecen más de un mínimo de veces. Aquí se quiso agrupar todos los niveles (o músicos) que llegaron al top 100 menos de tres veces en el 2000. Claramente, todos los demás músicos tenían al menos tres canciones que llegaron al Top 100.
filter(artist != 'Other') %>%
ggplot(aes(y = artist)) +
geom_bar()
## La función fct_lump_n para agrupar n de los niveles que ocurren con mayor o menor frecuencia ----
billboard %>%
mutate(artist = fct_lump_n(artist, n = -5)) %>% # Si especifica el argumento n con un número negativo, la función agrupará todos los niveles que ocurren con mayor frecuencia (exactamente lo contrario de lo que hace un número positivo). Por ejemplo, podríamos agrupar a los 5 músicos con más canciones en el Top 100.
filter(artist != "Other") %>%
ggplot(aes(y = artist)) +
geom_bar()
billboard %>%
mutate(artist = fct_lump_n(artist, n = 5)) %>% # Otro ejemplo si quisieramos agrupar todos los niveles excepto los 5 más comunes.
filter(artist != 'Other') %>%
ggplot(aes(y = artist)) +
geom_bar()
# NOTA: Claramente, los anteriores no son cinco niveles. ¿Qué salió mal? Resulta que muchos niveles ocurren tres veces. Así que tenemos que decidir qué hacer con los niveles que ocurren la misma cantidad de veces. Los tres niveles más comunes son Jay-Z, Whitney Houston y The Dixie Chicks. Pero, ¿cuáles deberían ser los niveles 4 y 5 más frecuentes? Si no le da a la función ninguna información adicional, fct_lump_n le mostrará todos los niveles cuyo número cae por debajo del último nivel, que es claramente uno de los niveles más frecuentes. Puede cambiar este comportamiento con el argumento ties.method. El argumento por defecto es min, que acabamos de ver:
billboard %>%
mutate(artist = fct_lump_n(artist, n = 5, ties.method = 'min')) %>%
filter(artist != "Other") %>%
ggplot(aes(y = artist)) +
geom_bar()
# NOTA: las otras opciones son "average", "first", "last", "random" y "max":
billboard %>%
mutate(artist = fct_lump_n(artist, n = 5, ties.method = 'max')) %>% # "max" elimina todos los niveles que no se pueden identificar de forma única.
filter(artist != 'Other') %>%
ggplot(aes(y = artist)) +
geom_bar()
billboard %>%
mutate(artist = fct_lump_n(artist, n = 5, ties.method = 'random')) %>% # "random" selecciona aleatoriamente los niveles que no se pueden identificar de forma única como los niveles más frecuentes.
filter(artist != 'Other') %>%
ggplot(aes(y = artist)) +
geom_bar()
| /tidyverse_booster/03_Improve creating and modifying columns/4to_tutorial.r | no_license | Leo4Luffy/Mi_practica | R | false | false | 4,390 | r | # Tutorial "Cómo agrupar factores"
pacman::p_load('tidyverse')
# NOTA: un factor es una estructura de datos en R que le permite crear un conjunto de categorías. Llamamos a estas categorías niveles. Es bien sabido en la literatura psicológica que solo podemos almacenar un cierto número de cosas en nuestra memoria de trabajo. Por lo tanto, para ayudar a las personas a entender las categorías, no deberíamos mostrar demasiadas. Aquí es donde se muestra la fuerza de los factores de agrupamiento. Agrupar no es más que combinar niveles de factores en una categoría nueva y más grande. Digamos que queremos visualizar cuántos músicos llegaron al top 100 en 2000. Aquí hay un gráfico de barras que muestra cómo se ve esto:
billboard %>%
ggplot(aes(y = artist)) +
geom_bar()
# NOTA: la anterior visualización es completamente incomprensible. La función fct_lump viene a nuestro rescate:
billboard %>%
mutate(artist = fct_lump(as_factor(artist), 10)) %>%
filter(artist != 'Other') %>%
ggplot(aes(y = artist)) +
geom_bar()
# NOTA: proporcionamos fct_lump con un número. El número indica, bueno, ¿qué? 10 ciertamente no indica los niveles restantes que no fueron categorizados como “Otros”. Tampoco son 10 los niveles que juntamos. Resulta que fct_lump es una función bastante oscura porque elige diferentes métodos en función de sus argumentos. El equipo de tidyverse ya no recomienda el uso de esta función. Es por eso que se crearon cuatro funciones nuevas en 2020, que veremos a continuación:
# - fct_lump_min.
# - fct_lump_n.
# - fct_lump_prop.
# - fct_lump_lowfreq.
## La función fct_lump_min para agrupar niveles que ocurren no más de un mínimo de veces ----
billboard %>%
mutate(artist = fct_lump_min(as_factor(artist), 3)) %>% # fct_lump_min resume todos los niveles que no aparecen más de un mínimo de veces. Aquí se quiso agrupar todos los niveles (o músicos) que llegaron al top 100 menos de tres veces en el 2000. Claramente, todos los demás músicos tenían al menos tres canciones que llegaron al Top 100.
filter(artist != 'Other') %>%
ggplot(aes(y = artist)) +
geom_bar()
## La función fct_lump_n para agrupar n de los niveles que ocurren con mayor o menor frecuencia ----
billboard %>%
mutate(artist = fct_lump_n(artist, n = -5)) %>% # Si especifica el argumento n con un número negativo, la función agrupará todos los niveles que ocurren con mayor frecuencia (exactamente lo contrario de lo que hace un número positivo). Por ejemplo, podríamos agrupar a los 5 músicos con más canciones en el Top 100.
filter(artist != "Other") %>%
ggplot(aes(y = artist)) +
geom_bar()
billboard %>%
mutate(artist = fct_lump_n(artist, n = 5)) %>% # Otro ejemplo si quisieramos agrupar todos los niveles excepto los 5 más comunes.
filter(artist != 'Other') %>%
ggplot(aes(y = artist)) +
geom_bar()
# NOTA: Claramente, los anteriores no son cinco niveles. ¿Qué salió mal? Resulta que muchos niveles ocurren tres veces. Así que tenemos que decidir qué hacer con los niveles que ocurren la misma cantidad de veces. Los tres niveles más comunes son Jay-Z, Whitney Houston y The Dixie Chicks. Pero, ¿cuáles deberían ser los niveles 4 y 5 más frecuentes? Si no le da a la función ninguna información adicional, fct_lump_n le mostrará todos los niveles cuyo número cae por debajo del último nivel, que es claramente uno de los niveles más frecuentes. Puede cambiar este comportamiento con el argumento ties.method. El argumento por defecto es min, que acabamos de ver:
billboard %>%
mutate(artist = fct_lump_n(artist, n = 5, ties.method = 'min')) %>%
filter(artist != "Other") %>%
ggplot(aes(y = artist)) +
geom_bar()
# NOTA: las otras opciones son "average", "first", "last", "random" y "max":
billboard %>%
mutate(artist = fct_lump_n(artist, n = 5, ties.method = 'max')) %>% # "max" elimina todos los niveles que no se pueden identificar de forma única.
filter(artist != 'Other') %>%
ggplot(aes(y = artist)) +
geom_bar()
billboard %>%
mutate(artist = fct_lump_n(artist, n = 5, ties.method = 'random')) %>% # "random" selecciona aleatoriamente los niveles que no se pueden identificar de forma única como los niveles más frecuentes.
filter(artist != 'Other') %>%
ggplot(aes(y = artist)) +
geom_bar()
|
## Read in the file into a data variable
## Read Data From Working Session Folder
data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";")
## Subset the data using Date as a factor with level 1/2/2007 and 2/2/2007
data2 <- subset(data, Date == "1/2/2007" | Date == "2/2/2007")
## Convert and concatenate Date and Time variable into single Date variable
data2$Date <- as.Date(as.character(data2$Date), format = "%d/%m/%Y")
data2$Date <- with(data2, as.POSIXct(paste(Date, Time)))
data2$Global_active_power <- as.numeric(as.character(data2$Global_active_power))
data2$Global_reactive_power <- as.numeric(as.character(data2$Global_reactive_power))
data2$Voltage <- as.numeric(as.character(data2$Voltage))
data2$Global_intensity <- as.numeric(as.character(data2$Global_intensity))
data2$Sub_metering_1 <- as.numeric(as.character(data2$Sub_metering_1))
data2$Sub_metering_2 <- as.numeric(as.character(data2$Sub_metering_2))
## Plot 4 - Multiple base plots
png(filename = "plot_4.png", width = 480, height = 480, units = "px")
par(mfrow = c(1,4), mar = c(6, 6, 2, 1), oma = c(0, 0, 2, 0))
plot(data2$Global_active_power ~ data2$Date, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "")
plot(data2$Date, data2$Sub_metering_1, main = "", type = "l", ylab = "Energy sub metering", xlab = "")
points(data2$Date, data2$Sub_metering_2, col = "red", type = "l", lty = 3)
points(data2$Date, data2$Sub_metering_3, col = "blue", type = "l")
legend("topright", col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = c(1,1,1))
plot(data2$Voltage ~ data2$Date, type = "l", ylab = "Voltage", xlab = "datetime")
plot(data2$Global_reactive_power ~ data2$Date, type = "l", ylab = "Global_reactive_power", xlab = "datetime")
dev.off()
| /plot4.R | no_license | Kamran-Wali/ExData_Plotting1 | R | false | false | 1,810 | r | ## Read in the file into a data variable
## Read Data From Working Session Folder
data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";")
## Subset the data using Date as a factor with level 1/2/2007 and 2/2/2007
data2 <- subset(data, Date == "1/2/2007" | Date == "2/2/2007")
## Convert and concatenate Date and Time variable into single Date variable
data2$Date <- as.Date(as.character(data2$Date), format = "%d/%m/%Y")
data2$Date <- with(data2, as.POSIXct(paste(Date, Time)))
data2$Global_active_power <- as.numeric(as.character(data2$Global_active_power))
data2$Global_reactive_power <- as.numeric(as.character(data2$Global_reactive_power))
data2$Voltage <- as.numeric(as.character(data2$Voltage))
data2$Global_intensity <- as.numeric(as.character(data2$Global_intensity))
data2$Sub_metering_1 <- as.numeric(as.character(data2$Sub_metering_1))
data2$Sub_metering_2 <- as.numeric(as.character(data2$Sub_metering_2))
## Plot 4 - Multiple base plots
png(filename = "plot_4.png", width = 480, height = 480, units = "px")
par(mfrow = c(1,4), mar = c(6, 6, 2, 1), oma = c(0, 0, 2, 0))
plot(data2$Global_active_power ~ data2$Date, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "")
plot(data2$Date, data2$Sub_metering_1, main = "", type = "l", ylab = "Energy sub metering", xlab = "")
points(data2$Date, data2$Sub_metering_2, col = "red", type = "l", lty = 3)
points(data2$Date, data2$Sub_metering_3, col = "blue", type = "l")
legend("topright", col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = c(1,1,1))
plot(data2$Voltage ~ data2$Date, type = "l", ylab = "Voltage", xlab = "datetime")
plot(data2$Global_reactive_power ~ data2$Date, type = "l", ylab = "Global_reactive_power", xlab = "datetime")
dev.off()
|
# Extract only the rows matching the 2 dates - 2007-02-01 and 2007-02-02.
library(sqldf)
epc <- "household_power_consumption.txt"
epcdata <- read.csv.sql(epc,
sql = "select * from file where Date = '1/2/2007' or Date = '2/2/2007' ", header=TRUE, sep=";", na.strings="?")
closeAllConnections()
epcdata$DateTime <- paste(epcdata$Date,epcdata$Time)
epcdata$DateTime <- as.POSIXct(epcdata$DateTime, format = "%d/%m/%Y %H:%M:%S")
# Plot4
png(filename="plot4.png")
par(mfrow=c(2,2))
with(epcdata,{
plot(epcdata$DateTime,epcdata$Global_active_power, type="l", xlab="", ylab="Global Active Power")
plot(epcdata$DateTime,epcdata$Voltage, type="l", xlab="datetime", ylab="Voltage")
plot(epcdata$DateTime,epcdata$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(epcdata$DateTime,epcdata$Sub_metering_2, type="l", col ="red")
lines(epcdata$DateTime,epcdata$Sub_metering_3, type="l", col ="blue")
legend( "topright",pch = "____", col = c( "black","red","blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(epcdata$DateTime,epcdata$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
})
dev.off() | /plot4.R | no_license | dkspeaks/ExData_Plotting1 | R | false | false | 1,195 | r | # Extract only the rows matching the 2 dates - 2007-02-01 and 2007-02-02.
library(sqldf)
epc <- "household_power_consumption.txt"
epcdata <- read.csv.sql(epc,
sql = "select * from file where Date = '1/2/2007' or Date = '2/2/2007' ", header=TRUE, sep=";", na.strings="?")
closeAllConnections()
epcdata$DateTime <- paste(epcdata$Date,epcdata$Time)
epcdata$DateTime <- as.POSIXct(epcdata$DateTime, format = "%d/%m/%Y %H:%M:%S")
# Plot4
png(filename="plot4.png")
par(mfrow=c(2,2))
with(epcdata,{
plot(epcdata$DateTime,epcdata$Global_active_power, type="l", xlab="", ylab="Global Active Power")
plot(epcdata$DateTime,epcdata$Voltage, type="l", xlab="datetime", ylab="Voltage")
plot(epcdata$DateTime,epcdata$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(epcdata$DateTime,epcdata$Sub_metering_2, type="l", col ="red")
lines(epcdata$DateTime,epcdata$Sub_metering_3, type="l", col ="blue")
legend( "topright",pch = "____", col = c( "black","red","blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(epcdata$DateTime,epcdata$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
})
dev.off() |
library(pacman);p_load('rvest','hellno',stringr,data.table, XML)
getAbstact=function(URL,checkSave=FALSE)
{
print('getAbstact')
# defineST<-a<-str_extract(pattern='[0-9]+',url)
# if(checkSave)
# {
# if( length(grep(defineST, list.files('../tmpfiles3/')))>0 ) #test if found
# {
# print(paste0('currently reading file:', defineST));flush.console()
# Rval<-tryCatch({ fread( paste0('../tmpfiles3/', defineST ,'.Rdata')) },error = function(x) {return('NULL')})
# return(Rval)
# }}
## parsing data from pubmedsite
p_load('rvest')
input=URL; print(URL);flush.console()
html_nodess=tryCatch({ read_html(input) }, error = function(...) return(NA))
if( !exists('html_nodess') )return(NA) ##ERRECHECK
rval <- html_nodess %>%
html_nodes( ".rprtid , .auths , dd , .abstr p , #maincontent h1") %>%
html_text()
# print(toString(rval))
#output of object
if(length(rval) == 0) return (NULL)
if(checkSave)save('rval',file= paste0('../tmpfiles3/', defineST ,'.Rdata'))
return(rval)
}
getLink=function(SearchTerm, n=10)
{
print('getLink')
#define a list of search term (step1)
url2= paste0('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&term="',SearchTerm,'"
&retmax=',n,'&tool=AbstraTastic')
con=url(url2)
xml_doc=read_xml(con)
# close(con)
xml_data <- xmlParse(xml_doc)
test=xmlToList(xml_data)
PUBMEDID=(test["IdList" ])
print(paste0('the length is:',length(unlist(PUBMEDID))))
st=paste0( 'https://www.ncbi.nlm.nih.gov/pubmed/?term=' ,unlist(PUBMEDID))
return(st)
}
formatAbstract=function(x)
{
x<-unlist(x) #no list
x<-x[which(!grepl('Indexed for MEDLINE',x))] #no filler
z<-(min(which(sapply(x, function(b)grepl('PMID:',b))) )) #legacy
if(z<2)z<-2 #legacy
Nchars=unlist(lapply(x, nchar))
abst_loc<-sort(values<-(Nchars ),index=TRUE, decreasing=TRUE)$ix[1] #biggest text = abstr
reviter =NA
qq=reviter[1];reviter <-(rev(which(Nchars >100)))
if(length(reviter) >2)
{ qq = abst_loc
for( i in 1:(length(reviter)-1) )
{#print(i)
if ( reviter[i] - reviter[i+1] == 1) qq<- c(qq,reviter[i+1])
}
}
ret<-list( Title = x[1],
Abstract= toString(x[qq]) ,
PMID2 = x[length(x)])
return(ret) }
#calling fucntions
rval<-'DKD' %>% (function(x,n=3)(getLink(x,n))) %>% lapply(. ,getAbstact) %>% lapply(. ,formatAbstract)
dfr=do.call(rbind,lapply( rval,retabstr))
| /scripts/newSEARCHfun.R | no_license | SkanderMulder/Abstractastic | R | false | false | 2,323 | r | library(pacman);p_load('rvest','hellno',stringr,data.table, XML)
getAbstact=function(URL,checkSave=FALSE)
{
print('getAbstact')
# defineST<-a<-str_extract(pattern='[0-9]+',url)
# if(checkSave)
# {
# if( length(grep(defineST, list.files('../tmpfiles3/')))>0 ) #test if found
# {
# print(paste0('currently reading file:', defineST));flush.console()
# Rval<-tryCatch({ fread( paste0('../tmpfiles3/', defineST ,'.Rdata')) },error = function(x) {return('NULL')})
# return(Rval)
# }}
## parsing data from pubmedsite
p_load('rvest')
input=URL; print(URL);flush.console()
html_nodess=tryCatch({ read_html(input) }, error = function(...) return(NA))
if( !exists('html_nodess') )return(NA) ##ERRECHECK
rval <- html_nodess %>%
html_nodes( ".rprtid , .auths , dd , .abstr p , #maincontent h1") %>%
html_text()
# print(toString(rval))
#output of object
if(length(rval) == 0) return (NULL)
if(checkSave)save('rval',file= paste0('../tmpfiles3/', defineST ,'.Rdata'))
return(rval)
}
getLink=function(SearchTerm, n=10)
{
print('getLink')
#define a list of search term (step1)
url2= paste0('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&term="',SearchTerm,'"
&retmax=',n,'&tool=AbstraTastic')
con=url(url2)
xml_doc=read_xml(con)
# close(con)
xml_data <- xmlParse(xml_doc)
test=xmlToList(xml_data)
PUBMEDID=(test["IdList" ])
print(paste0('the length is:',length(unlist(PUBMEDID))))
st=paste0( 'https://www.ncbi.nlm.nih.gov/pubmed/?term=' ,unlist(PUBMEDID))
return(st)
}
formatAbstract=function(x)
{
x<-unlist(x) #no list
x<-x[which(!grepl('Indexed for MEDLINE',x))] #no filler
z<-(min(which(sapply(x, function(b)grepl('PMID:',b))) )) #legacy
if(z<2)z<-2 #legacy
Nchars=unlist(lapply(x, nchar))
abst_loc<-sort(values<-(Nchars ),index=TRUE, decreasing=TRUE)$ix[1] #biggest text = abstr
reviter =NA
qq=reviter[1];reviter <-(rev(which(Nchars >100)))
if(length(reviter) >2)
{ qq = abst_loc
for( i in 1:(length(reviter)-1) )
{#print(i)
if ( reviter[i] - reviter[i+1] == 1) qq<- c(qq,reviter[i+1])
}
}
ret<-list( Title = x[1],
Abstract= toString(x[qq]) ,
PMID2 = x[length(x)])
return(ret) }
#calling fucntions
rval<-'DKD' %>% (function(x,n=3)(getLink(x,n))) %>% lapply(. ,getAbstact) %>% lapply(. ,formatAbstract)
dfr=do.call(rbind,lapply( rval,retabstr))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.ctsemFit.R
\name{plot.ctsemFit}
\alias{plot.ctsemFit}
\title{Plotting function for object class ctsemFit}
\usage{
\method{plot}{ctsemFit}(x, resolution = 50, wait = TRUE,
max.time = "auto", mean = TRUE, withinVariance = TRUE, AR = TRUE,
CR = TRUE, standardiseCR = FALSE, randomImpulse = FALSE,
experimentalImpulse = FALSE, xlab = "Time", meansylim = "auto",
ARylim = "auto", CRylim = "auto", ylab = "Value", ...)
}
\arguments{
\item{x}{ctsemFit object as generated by \code{\link{ctFit}}.}
\item{resolution}{Numeric. Plot points between each unit of time. Default of 'auto' adapts to max.time and results in 500 in total.}
\item{wait}{If true, user is prompted to continue before plotting next graph. If false, graphs are plotted one after another without waiting.}
\item{max.time}{Time scale on which to plot parameters. If auto, parameters are plotted for full range of observed variables.}
\item{mean}{if TRUE, plot of means from 0 to max.time included in output.}
\item{withinVariance}{if TRUE, plot within subject variance / covariance.}
\item{AR}{if TRUE, plot of autoregressive values from 0 to max.time included in output.}
\item{CR}{if TRUE, plot of cross regressive values from 0 to max.time included in output.}
\item{standardiseCR}{if TRUE , cross regression values are standardised based on estimated within subject variance.}
\item{randomImpulse}{if TRUE (default), plots expected change in processes given a random fluctuation of +1 for each process --
plot is then a mixture of DIFFUSION and DRIFT characteristics.}
\item{experimentalImpulse}{if TRUE (default), plots expected change in processes given an exogenous input of +1 for each process --
alternate characterisation of autoregressive and cross regressive plots.}
\item{xlab}{X axis label.}
\item{meansylim}{Vector of min and max limits for mean trajectory plot. 'auto' calculates automatically.}
\item{ARylim}{Vector of min and max limits for autoregression plot. 'auto' is c(0,1), and expands if necessary.}
\item{CRylim}{Vector of min and max limits for cross regression plot. 'auto' is c(-1,1), and expands if necessary.}
\item{ylab}{Y axis label.}
\item{...}{Other options passed to \code{plot()}.}
}
\value{
Nothing. Side-effect: plots graphs.
}
\description{
Ouputs mean trajectories, autoregression, and crossregression plots.
For more customization possibilities, see \code{\link{ctPlot}}.
}
\examples{
## Examples set to 'donttest' because they take longer than 5s.
### example from Driver, Oud, Voelkle (2015),
### simulated happiness and leisure time with unobserved heterogeneity.
\donttest{
data(ctExample1)
traitmodel <- ctModel(n.manifest=2, n.latent=2, Tpoints=6, LAMBDA=diag(2),
manifestNames=c('LeisureTime', 'Happiness'),
latentNames=c('LeisureTime', 'Happiness'), TRAITVAR="auto")
traitfit <- ctFit(dat=ctExample1, ctmodelobj=traitmodel)
plot(traitfit, wait=FALSE)
}
}
| /man/plot.ctsemFit.Rd | no_license | davan690/ctsem | R | false | true | 2,984 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.ctsemFit.R
\name{plot.ctsemFit}
\alias{plot.ctsemFit}
\title{Plotting function for object class ctsemFit}
\usage{
\method{plot}{ctsemFit}(x, resolution = 50, wait = TRUE,
max.time = "auto", mean = TRUE, withinVariance = TRUE, AR = TRUE,
CR = TRUE, standardiseCR = FALSE, randomImpulse = FALSE,
experimentalImpulse = FALSE, xlab = "Time", meansylim = "auto",
ARylim = "auto", CRylim = "auto", ylab = "Value", ...)
}
\arguments{
\item{x}{ctsemFit object as generated by \code{\link{ctFit}}.}
\item{resolution}{Numeric. Plot points between each unit of time. Default of 'auto' adapts to max.time and results in 500 in total.}
\item{wait}{If true, user is prompted to continue before plotting next graph. If false, graphs are plotted one after another without waiting.}
\item{max.time}{Time scale on which to plot parameters. If auto, parameters are plotted for full range of observed variables.}
\item{mean}{if TRUE, plot of means from 0 to max.time included in output.}
\item{withinVariance}{if TRUE, plot within subject variance / covariance.}
\item{AR}{if TRUE, plot of autoregressive values from 0 to max.time included in output.}
\item{CR}{if TRUE, plot of cross regressive values from 0 to max.time included in output.}
\item{standardiseCR}{if TRUE , cross regression values are standardised based on estimated within subject variance.}
\item{randomImpulse}{if TRUE (default), plots expected change in processes given a random fluctuation of +1 for each process --
plot is then a mixture of DIFFUSION and DRIFT characteristics.}
\item{experimentalImpulse}{if TRUE (default), plots expected change in processes given an exogenous input of +1 for each process --
alternate characterisation of autoregressive and cross regressive plots.}
\item{xlab}{X axis label.}
\item{meansylim}{Vector of min and max limits for mean trajectory plot. 'auto' calculates automatically.}
\item{ARylim}{Vector of min and max limits for autoregression plot. 'auto' is c(0,1), and expands if necessary.}
\item{CRylim}{Vector of min and max limits for cross regression plot. 'auto' is c(-1,1), and expands if necessary.}
\item{ylab}{Y axis label.}
\item{...}{Other options passed to \code{plot()}.}
}
\value{
Nothing. Side-effect: plots graphs.
}
\description{
Ouputs mean trajectories, autoregression, and crossregression plots.
For more customization possibilities, see \code{\link{ctPlot}}.
}
\examples{
## Examples set to 'donttest' because they take longer than 5s.
### example from Driver, Oud, Voelkle (2015),
### simulated happiness and leisure time with unobserved heterogeneity.
\donttest{
data(ctExample1)
traitmodel <- ctModel(n.manifest=2, n.latent=2, Tpoints=6, LAMBDA=diag(2),
manifestNames=c('LeisureTime', 'Happiness'),
latentNames=c('LeisureTime', 'Happiness'), TRAITVAR="auto")
traitfit <- ctFit(dat=ctExample1, ctmodelobj=traitmodel)
plot(traitfit, wait=FALSE)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/in.da.R
\name{in.da}
\alias{in.da}
\alias{rv.da}
\title{Optimization functions for Dimensional Anchors in Radviz}
\usage{
in.da(springs, similarity)
rv.da(springs, similarity)
}
\arguments{
\item{springs}{A matrix of 2D dimensional anchor coordinates, as returned by \code{\link{make.S}}}
\item{similarity}{A similarity matrix measuring the correlation between Dimensional Anchors}
}
\value{
A measure of the efficiency of the Radviz projection of the similarity matrix
onto a set of springs
}
\description{
Visual efficiency of Radviz plots depends heavily on the correct arrangement of Dimensional Anchors.
These functions implement the optimization strategies described in
\href{http://link.springer.com/chapter/10.1007/978-3-642-13672-6_13}{Di Caro et al 2012}
}
\details{
Following the recommendation of de cario et al. we used a cosine function to calculate
the similarity between Dimensional Anchors (see \code{\link{cosine}} for details).
The in.da function implements the independent similarity measure,
where the value increases as the Radviz projection improves.
The rv.da function implements the radviz-dependent similarity measure,
where the value decreases as the Radviz projection improves.
}
\examples{
data(iris)
das <- c('Sepal.Length','Sepal.Width','Petal.Length','Petal.Width')
S <- make.S(das)
scaled <- apply(iris[,das],2,do.L)
sim.mat <- cosine(scaled)
in.da(S,sim.mat) # increases with better projections
rv.da(S,sim.mat) # decreases with better projections
}
\author{
Yann Abraham
}
| /man/in.da.Rd | no_license | ivan-marroquin/Radviz | R | false | true | 1,653 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/in.da.R
\name{in.da}
\alias{in.da}
\alias{rv.da}
\title{Optimization functions for Dimensional Anchors in Radviz}
\usage{
in.da(springs, similarity)
rv.da(springs, similarity)
}
\arguments{
\item{springs}{A matrix of 2D dimensional anchor coordinates, as returned by \code{\link{make.S}}}
\item{similarity}{A similarity matrix measuring the correlation between Dimensional Anchors}
}
\value{
A measure of the efficiency of the Radviz projection of the similarity matrix
onto a set of springs
}
\description{
Visual efficiency of Radviz plots depends heavily on the correct arrangement of Dimensional Anchors.
These functions implement the optimization strategies described in
\href{http://link.springer.com/chapter/10.1007/978-3-642-13672-6_13}{Di Caro et al 2012}
}
\details{
Following the recommendation of de cario et al. we used a cosine function to calculate
the similarity between Dimensional Anchors (see \code{\link{cosine}} for details).
The in.da function implements the independent similarity measure,
where the value increases as the Radviz projection improves.
The rv.da function implements the radviz-dependent similarity measure,
where the value decreases as the Radviz projection improves.
}
\examples{
data(iris)
das <- c('Sepal.Length','Sepal.Width','Petal.Length','Petal.Width')
S <- make.S(das)
scaled <- apply(iris[,das],2,do.L)
sim.mat <- cosine(scaled)
in.da(S,sim.mat) # increases with better projections
rv.da(S,sim.mat) # decreases with better projections
}
\author{
Yann Abraham
}
|
\name{pairwise}
\alias{pairwise}
\title{ Pair Indices }
\description{
A utility function to determine indices for pairwise comparisons.
}
\usage{
pairwise(N)
}
\arguments{
\item{N}{ a single numeric value representing the total number of
things to undergo pairwise comparison. }
}
\value{
Returns a two column numeric matrix giving the indices for all
pairs.
}
\references{
Grant, B.J. et al. (2006) \emph{Bioinformatics} \bold{22}, 2695--2696.
}
\author{ Barry Grant }
\seealso{ \code{\link{seqidentity}} }
\examples{
pairwise(3)
pairwise(20)
}
\keyword{ utilities }
| /ver_devel/bio3d/man/pairwise.Rd | no_license | Grantlab/bio3d | R | false | false | 586 | rd | \name{pairwise}
\alias{pairwise}
\title{ Pair Indices }
\description{
A utility function to determine indices for pairwise comparisons.
}
\usage{
pairwise(N)
}
\arguments{
\item{N}{ a single numeric value representing the total number of
things to undergo pairwise comparison. }
}
\value{
Returns a two column numeric matrix giving the indices for all
pairs.
}
\references{
Grant, B.J. et al. (2006) \emph{Bioinformatics} \bold{22}, 2695--2696.
}
\author{ Barry Grant }
\seealso{ \code{\link{seqidentity}} }
\examples{
pairwise(3)
pairwise(20)
}
\keyword{ utilities }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rejSamp.R
\name{rejSamp}
\alias{rejSamp}
\title{Generate random numbers by rejection sampling}
\usage{
rejSamp(f, n = 1, min = 0, max = 1, g = NULL, rg = NULL,
g.factor = 1)
}
\arguments{
\item{f}{[\code{function}]\cr
Function to be interpreted as probability density function.}
\item{n}{[\code{integer(1)}]\cr
Amount of random numbers to be generated. Default is 1.}
\item{min}{[\code{numeric(1)}]\cr}
\item{max}{[\code{numeric(1)}]\cr
Interval the generated random numbers are from. Default is (0, 1).}
\item{g}{[\code{function}]\cr
Probability density function of the instrumental distribution.}
\item{rg}{[\code{function}]\cr
Function that generates random numbers from the instrumental distribution.}
\item{g.factor}{[\code{numeric(1)}]\cr
Factor for \code{g}. Default is 1.}
}
\value{
\code{n} Random numbers from the function \code{f} interpreted as probality density function on the interval (\code{min}, \code{max}).
}
\description{
Generate random numbers by rejection sampling
}
\details{
If no instrumental distribution is specified, the uniform distribution is used.
The call \code{g}(x) should return the probability of the realisation x for the instrumental distribution \code{g}.
The choice of \code{g.factor} should guarantee \code{f}(x) < \code{g.factor} * \code{g}(x).
}
\examples{
test <- function(x) x^2
rejSamp(f = test, n = 10, min = -1, max = 1)
}
| /man/rejSamp.Rd | no_license | jakob-r/rejSamp-1 | R | false | true | 1,461 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rejSamp.R
\name{rejSamp}
\alias{rejSamp}
\title{Generate random numbers by rejection sampling}
\usage{
rejSamp(f, n = 1, min = 0, max = 1, g = NULL, rg = NULL,
g.factor = 1)
}
\arguments{
\item{f}{[\code{function}]\cr
Function to be interpreted as probability density function.}
\item{n}{[\code{integer(1)}]\cr
Amount of random numbers to be generated. Default is 1.}
\item{min}{[\code{numeric(1)}]\cr}
\item{max}{[\code{numeric(1)}]\cr
Interval the generated random numbers are from. Default is (0, 1).}
\item{g}{[\code{function}]\cr
Probability density function of the instrumental distribution.}
\item{rg}{[\code{function}]\cr
Function that generates random numbers from the instrumental distribution.}
\item{g.factor}{[\code{numeric(1)}]\cr
Factor for \code{g}. Default is 1.}
}
\value{
\code{n} Random numbers from the function \code{f} interpreted as probality density function on the interval (\code{min}, \code{max}).
}
\description{
Generate random numbers by rejection sampling
}
\details{
If no instrumental distribution is specified, the uniform distribution is used.
The call \code{g}(x) should return the probability of the realisation x for the instrumental distribution \code{g}.
The choice of \code{g.factor} should guarantee \code{f}(x) < \code{g.factor} * \code{g}(x).
}
\examples{
test <- function(x) x^2
rejSamp(f = test, n = 10, min = -1, max = 1)
}
|
# Agave Platform Science API
#
# Power your digital lab and reduce the time from theory to discovery using the Agave Science-as-a-Service API Platform. Agave provides hosted services that allow researchers to manage data, conduct experiments, and publish and share results from anywhere at any time.
#
# Agave Platform version: 2.2.14
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' FileRenameAction Class
#'
#' Request for a file/folder to be renamed on the target system. Metadata will be preserved after rename. Rename operations are only applied relative to the file/folder given in the URL. To rename a file/folder to a different path, see the FileMoveAction.
#'
#' @field action
#' @field path Name of new directory or target file or folder.
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
FileRenameAction <- R6::R6Class(
'FileRenameAction',
public = list(
`action` = NULL,
`path` = NULL,
initialize = function(`action`, `path`){
if (!missing(`action`)) {
stopifnot(R6::is.R6(`action`))
self$`action` <- `action`
}
if (!missing(`path`)) {
stopifnot(is.character(`path`), length(`path`) == 1)
self$`path` <- `path`
}
},
asJSON = function() {
self$toJSON()
},
toJSON = function() {
FileRenameActionObject <- list()
if (!is.null(self$`action`)) {
FileRenameActionObject[['action']] <- self$`action`$toJSON()
}
else {
FileRenameActionObject[['action']] <- NULL
}
if (!is.null(self$`path`)) {
FileRenameActionObject[['path']] <- self$`path`
}
else {
FileRenameActionObject[['path']] <- NULL
}
FileRenameActionObject
},
fromJSON = function(FileRenameActionObject) {
if (is.character(FileRenameActionObject)) {
FileRenameActionObject <- jsonlite::fromJSON(FileRenameActionJson)
}
if ("result" %in% names(FileRenameActionObject)) {
FileRenameActionObject <- FileRenameActionObject$result
}
if (!is.null(FileRenameActionObject$`action`)) {
actionObject <- FileManagementActionType$new()
actionObject$fromJSON(jsonlite::toJSON(FileRenameActionObject$action, auto_unbox = TRUE))
self$`action` <- actionObject
}
if (!is.null(FileRenameActionObject$`path`)) {
self$`path` <- FileRenameActionObject$`path`
}
},
toJSONString = function() {
sprintf(
'{
"action": %s,
"path": %s
}',
self$`action`$toJSON(),
ifelse( is.null(self$`path`),"null",paste0(c('"', self$`path`, '"')))
)
},
fromJSONString = function(FileRenameActionJson) {
FileRenameActionObject <- jsonlite::fromJSON(FileRenameActionJson)
self::fromJSON(FileRenameActionObject)
}
)
)
| /R/FileRenameAction.r | permissive | agaveplatform/r-sdk | R | false | false | 2,889 | r | # Agave Platform Science API
#
# Power your digital lab and reduce the time from theory to discovery using the Agave Science-as-a-Service API Platform. Agave provides hosted services that allow researchers to manage data, conduct experiments, and publish and share results from anywhere at any time.
#
# Agave Platform version: 2.2.14
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' FileRenameAction Class
#'
#' Request for a file/folder to be renamed on the target system. Metadata will be preserved after rename. Rename operations are only applied relative to the file/folder given in the URL. To rename a file/folder to a different path, see the FileMoveAction.
#'
#' @field action
#' @field path Name of new directory or target file or folder.
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
FileRenameAction <- R6::R6Class(
'FileRenameAction',
public = list(
`action` = NULL,
`path` = NULL,
initialize = function(`action`, `path`){
if (!missing(`action`)) {
stopifnot(R6::is.R6(`action`))
self$`action` <- `action`
}
if (!missing(`path`)) {
stopifnot(is.character(`path`), length(`path`) == 1)
self$`path` <- `path`
}
},
asJSON = function() {
self$toJSON()
},
toJSON = function() {
FileRenameActionObject <- list()
if (!is.null(self$`action`)) {
FileRenameActionObject[['action']] <- self$`action`$toJSON()
}
else {
FileRenameActionObject[['action']] <- NULL
}
if (!is.null(self$`path`)) {
FileRenameActionObject[['path']] <- self$`path`
}
else {
FileRenameActionObject[['path']] <- NULL
}
FileRenameActionObject
},
fromJSON = function(FileRenameActionObject) {
if (is.character(FileRenameActionObject)) {
FileRenameActionObject <- jsonlite::fromJSON(FileRenameActionJson)
}
if ("result" %in% names(FileRenameActionObject)) {
FileRenameActionObject <- FileRenameActionObject$result
}
if (!is.null(FileRenameActionObject$`action`)) {
actionObject <- FileManagementActionType$new()
actionObject$fromJSON(jsonlite::toJSON(FileRenameActionObject$action, auto_unbox = TRUE))
self$`action` <- actionObject
}
if (!is.null(FileRenameActionObject$`path`)) {
self$`path` <- FileRenameActionObject$`path`
}
},
toJSONString = function() {
sprintf(
'{
"action": %s,
"path": %s
}',
self$`action`$toJSON(),
ifelse( is.null(self$`path`),"null",paste0(c('"', self$`path`, '"')))
)
},
fromJSONString = function(FileRenameActionJson) {
FileRenameActionObject <- jsonlite::fromJSON(FileRenameActionJson)
self::fromJSON(FileRenameActionObject)
}
)
)
|
##########################
##########################
##emission calculations
##########################
##########################
#kr
#description
##########################
#functions to calculate emissions
#includes
##########################
#calcEm
#calcEMHoribaPitot
#
#to do
##########################
#comments
##########################
#
##########################
##########################
##calcVSP
##########################
##########################
#kr 23/01/2012 v 0.0.6
#what it does
##########################
#calculates Emissions
#most urgent
######################################
#confirm inputs to calcEmHoribaPitot
##what exhaust parameters are needed?
##what delay offset is being applied to conc.co2
##round up/round down?
#
#urgent
##########################
#need to tidy the this.call
#handling give current
#not parent call!!!
##########################
#need to tidy calcEmHoribaPitot
#error catchers to add
#tempGet to be tidied
#fix to be tidied
#
#to do
##########################
#make test more robust?
#comments
##########################
#
################################
################################
##calcEm
################################
################################
calcEm <- function(conc = NULL,
calc.method = calcEm_HoribaPitot, analyte = NULL,
..., data = NULL, fun.name = "calcEm", force = FALSE,
this.call = NULL){
#setup
dots <- quos(...)
if(is.null(this.call))
this.call <- match.call()
settings <- calcChecks(fun.name, ..., data = data)
#get what there is
conc <- getPEMSElement(!!enquo(conc), data, ref.name="conc")
temp <- attr(conc, "name")
if(!force){
if(length(grep("^conc.", temp))<1)
checkIfMissing(if.missing = settings$if.missing,
reply = paste("'", temp, "' should be concentration, \n\tdenoted conc.[analyte]", sep=""),
suggest = "select suitable input or force if sure ?calcEm", if.warning = NULL,
fun.name = fun.name)
}
temp <- gsub("^conc.", "", temp)
if(is.null(analyte))
analyte <- temp
if(!force){
if(temp != analyte)
checkIfMissing(if.missing = settings$if.missing,
reply = "Input type does not match assigned 'analyte'",
suggest = "select suitable input or force if sure ?calcEm", if.warning = NULL,
fun.name = fun.name)
}
#this is based on current calcVSP approach
#but need to send it data and analyte...
if(is.function(calc.method)){
#strip output because calcVSP packing this...
if("output" %in% names(dots))
dots[[which(names(dots)=="output")]]<-NULL
em <- eval_tidy(quo(calc.method(conc=conc, data=data, fun.name=fun.name,
analyte=analyte, this.call=this.call, !!!dots)))
return(pemsOutput(em, output = settings$output, data = data,
fun.name = fun.name, this.call = this.call))
}
checkIfMissing(if.missing = settings$if.missing,
reply = "could not run calc.method!",
suggest = "check ?calcEm if reason unclear", if.warning = "returning NULL",
fun.name = fun.name)
return(NULL)
}
##################################
##################################
##calcEm_HoribaPitot
##################################
##################################
#various error catchers needed
calcEm_HoribaPitot <- function(conc = NULL, time = local.time,
exflow = exh.flow.rate, extemp = exh.temp,
express = exh.press, analyte = NULL, delay = NULL, mm = NULL,
..., force = force, data = NULL, fun.name = "calcEm_HoribaPitot",
this.call = NULL){
#setup
settings <- calcChecks(fun.name, ..., data = data)
#get all but conc - handled by calcEM...
time <- getPEMSElement(!!enquo(time), data, if.missing="return")
exflow <- getPEMSElement(!!enquo(exflow), data, if.missing="return")
extemp <- getPEMSElement(!!enquo(extemp), data, if.missing="return")
express <- getPEMSElement(!!enquo(express), data, if.missing="return")
# if(!force & !is.null(time)){
#check that time is equidistant
# }
#could pull this out as function for next few?
#note some check ref.chem, some don't
tempGet <- function(..., id=NULL, data=NULL, default=NULL){
extra.args <- list(...)
if(id %in% names(extra.args)) return(extra.args[[id]])
if(!is.null(data)){
if(isPEMS(data))
if(id %in% names(getPEMSConstants(data)))
return(getPEMSConstants(data)[[id]])
if(id %in% names(data)) return(data[[id]])
}
if(!is.null(default)){
if(id %in% names(default)) return(default[[id]])
}
return(NULL)
}
##########################################
#need to rethink mm.hc options
#maybe extend this to calcVSP etc?
##########################################
if(is.null(mm))
mm <- tempGet(..., id=paste("mm.", analyte[1], sep=""), data=data, default=ref.chem)
if(is.null(mm) & analyte[1]=="hc"){
mm.h <- tempGet(..., id="mm.h", data=data, default=ref.chem)
alpha.exhaust.hc <- tempGet(..., id="alpha.exhaust.hc", data=data, default=ref.chem)
mm.c <- tempGet(..., id="mm.c", data=data, default=ref.chem)
temp <- mm.h * alpha.exhaust.hc + mm.c
thc.c6 <- tempGet(..., id="thc.c6", data=data, default=ref.chem)
mm <- temp * thc.c6
#################################
#error catchers for missing cases
#################################
}
if(is.null(delay))
delay <- tempGet(..., id=paste("delay.", analyte[1], sep=""), data=data)
###################################
#to do
###################################
#units of all inputs
conc <- convertUnits(conc, to = "vol%",
if.missing = settings$if.missing,
unit.convesions = settings$unit.conversions)
exflow <- convertUnits(exflow, to = "L/min",
if.missing = settings$if.missing,
unit.convesions = settings$unit.conversions)
if(delay[1]>=1){
conc <- c(conc[(floor(delay[1])+1): length(conc)], rep(NA, floor(delay[1])))
}
em <- conc * mm * exflow * (1/60) * (1/100) * (1/22.415) * (273.15/293.15)
em <- pems.element(em, name=paste("em.", analyte[1], sep=""), units="g/s")
pemsOutput(em, output = settings$output, data = data,
fun.name = fun.name, this.call = this.call)
}
| /R/calcEm.R | no_license | cran/pems.utils | R | false | false | 7,161 | r |
##########################
##########################
##emission calculations
##########################
##########################
#kr
#description
##########################
#functions to calculate emissions
#includes
##########################
#calcEm
#calcEMHoribaPitot
#
#to do
##########################
#comments
##########################
#
##########################
##########################
##calcVSP
##########################
##########################
#kr 23/01/2012 v 0.0.6
#what it does
##########################
#calculates Emissions
#most urgent
######################################
#confirm inputs to calcEmHoribaPitot
##what exhaust parameters are needed?
##what delay offset is being applied to conc.co2
##round up/round down?
#
#urgent
##########################
#need to tidy the this.call
#handling give current
#not parent call!!!
##########################
#need to tidy calcEmHoribaPitot
#error catchers to add
#tempGet to be tidied
#fix to be tidied
#
#to do
##########################
#make test more robust?
#comments
##########################
#
################################
################################
##calcEm
################################
################################
calcEm <- function(conc = NULL,
calc.method = calcEm_HoribaPitot, analyte = NULL,
..., data = NULL, fun.name = "calcEm", force = FALSE,
this.call = NULL){
#setup
dots <- quos(...)
if(is.null(this.call))
this.call <- match.call()
settings <- calcChecks(fun.name, ..., data = data)
#get what there is
conc <- getPEMSElement(!!enquo(conc), data, ref.name="conc")
temp <- attr(conc, "name")
if(!force){
if(length(grep("^conc.", temp))<1)
checkIfMissing(if.missing = settings$if.missing,
reply = paste("'", temp, "' should be concentration, \n\tdenoted conc.[analyte]", sep=""),
suggest = "select suitable input or force if sure ?calcEm", if.warning = NULL,
fun.name = fun.name)
}
temp <- gsub("^conc.", "", temp)
if(is.null(analyte))
analyte <- temp
if(!force){
if(temp != analyte)
checkIfMissing(if.missing = settings$if.missing,
reply = "Input type does not match assigned 'analyte'",
suggest = "select suitable input or force if sure ?calcEm", if.warning = NULL,
fun.name = fun.name)
}
#this is based on current calcVSP approach
#but need to send it data and analyte...
if(is.function(calc.method)){
#strip output because calcVSP packing this...
if("output" %in% names(dots))
dots[[which(names(dots)=="output")]]<-NULL
em <- eval_tidy(quo(calc.method(conc=conc, data=data, fun.name=fun.name,
analyte=analyte, this.call=this.call, !!!dots)))
return(pemsOutput(em, output = settings$output, data = data,
fun.name = fun.name, this.call = this.call))
}
checkIfMissing(if.missing = settings$if.missing,
reply = "could not run calc.method!",
suggest = "check ?calcEm if reason unclear", if.warning = "returning NULL",
fun.name = fun.name)
return(NULL)
}
##################################
##################################
##calcEm_HoribaPitot
##################################
##################################
#various error catchers needed
calcEm_HoribaPitot <- function(conc = NULL, time = local.time,
exflow = exh.flow.rate, extemp = exh.temp,
express = exh.press, analyte = NULL, delay = NULL, mm = NULL,
..., force = force, data = NULL, fun.name = "calcEm_HoribaPitot",
this.call = NULL){
#setup
settings <- calcChecks(fun.name, ..., data = data)
#get all but conc - handled by calcEM...
time <- getPEMSElement(!!enquo(time), data, if.missing="return")
exflow <- getPEMSElement(!!enquo(exflow), data, if.missing="return")
extemp <- getPEMSElement(!!enquo(extemp), data, if.missing="return")
express <- getPEMSElement(!!enquo(express), data, if.missing="return")
# if(!force & !is.null(time)){
#check that time is equidistant
# }
#could pull this out as function for next few?
#note some check ref.chem, some don't
tempGet <- function(..., id=NULL, data=NULL, default=NULL){
extra.args <- list(...)
if(id %in% names(extra.args)) return(extra.args[[id]])
if(!is.null(data)){
if(isPEMS(data))
if(id %in% names(getPEMSConstants(data)))
return(getPEMSConstants(data)[[id]])
if(id %in% names(data)) return(data[[id]])
}
if(!is.null(default)){
if(id %in% names(default)) return(default[[id]])
}
return(NULL)
}
##########################################
#need to rethink mm.hc options
#maybe extend this to calcVSP etc?
##########################################
if(is.null(mm))
mm <- tempGet(..., id=paste("mm.", analyte[1], sep=""), data=data, default=ref.chem)
if(is.null(mm) & analyte[1]=="hc"){
mm.h <- tempGet(..., id="mm.h", data=data, default=ref.chem)
alpha.exhaust.hc <- tempGet(..., id="alpha.exhaust.hc", data=data, default=ref.chem)
mm.c <- tempGet(..., id="mm.c", data=data, default=ref.chem)
temp <- mm.h * alpha.exhaust.hc + mm.c
thc.c6 <- tempGet(..., id="thc.c6", data=data, default=ref.chem)
mm <- temp * thc.c6
#################################
#error catchers for missing cases
#################################
}
if(is.null(delay))
delay <- tempGet(..., id=paste("delay.", analyte[1], sep=""), data=data)
###################################
#to do
###################################
#units of all inputs
conc <- convertUnits(conc, to = "vol%",
if.missing = settings$if.missing,
unit.convesions = settings$unit.conversions)
exflow <- convertUnits(exflow, to = "L/min",
if.missing = settings$if.missing,
unit.convesions = settings$unit.conversions)
if(delay[1]>=1){
conc <- c(conc[(floor(delay[1])+1): length(conc)], rep(NA, floor(delay[1])))
}
em <- conc * mm * exflow * (1/60) * (1/100) * (1/22.415) * (273.15/293.15)
em <- pems.element(em, name=paste("em.", analyte[1], sep=""), units="g/s")
pemsOutput(em, output = settings$output, data = data,
fun.name = fun.name, this.call = this.call)
}
|
# --------------------------------------------------------------------------- #
# #
# Legger til lag der trafikklenker med toveistrafikk ligger dobbelt #
# #
# Ny variabel med_metrering angir #
# #
# 1 = lenkeretning MED metreringsretning #
# 0 = lenkeretning MOT metreringsretning #
# #
# Lag: 'trafikklenker_rettet_metrering' #
# #
# --------------------------------------------------------------------------- #
library(tidyverse)
library(sf)
library(tmap)
# les inn trafikklenkene ------------------------------------------------------
path_gpkg <- '/nr_prep_links/trafikklenker_undir_maincomp.gpkg'
trafikklenker <- st_read(path_gpkg, layer = 'edges_main')
# START_NODE_OID --> END_NODE_OID følger metreringsretningen ved datauttaksdato
# variabelen DIRECTION angir trafikkretning relativt til metreringsretningen
# 1 = MED - lenken har enveistrafikk og trafikkretningen er MED metreringsretningen
# 2 = MOT - lenken har enveistrafikk og trafikkretningen er MOT metreringsretningen
# 3 = BEGGE - lenken har toveistrafikk
trafikklenker %>%
st_set_geometry(NULL) %>%
count(DIRECTION)
# DIRECTION n
# 1 1 828
# 2 2 142
# 3 3 53041
# legg til metreringsretning dummy --------------------------------------------
trafikklenker <- mutate(trafikklenker, med_metrering = 1)
# dupliserer leker med toveistrafikk ------------------------------------------
trafikklenker_enveis <- filter(trafikklenker, DIRECTION < 3)
trafikklenker_toveis <- filter(trafikklenker, DIRECTION == 3)
trafikklenker_toveis_mot <- trafikklenker_toveis # lag en kopi
identical(trafikklenker_toveis_mot, trafikklenker_toveis)
# TRUE
# snu retning
trafikklenker_toveis_mot <- trafikklenker_toveis_mot %>%
mutate(
# snu start- og sluttnode
START_NODE_OID = trafikklenker_toveis$END_NODE_OID,
END_NODE_OID = trafikklenker_toveis$START_NODE_OID,
# angi at lenkeretning er mot metreringsretning ved å sette dummy = 0
med_metrering = 0,
# snu vegreferansen
ROADREF_START = trafikklenker_toveis$ROADREF_END,
ROADREF_END = trafikklenker_toveis$ROADREF_START,
# snu from og to
from = trafikklenker_toveis$to,
to = trafikklenker_toveis$from
)
identical(trafikklenker_toveis_mot, trafikklenker_toveis)
# FALSE
trafikklenker <- bind_rows(trafikklenker_enveis, trafikklenker_toveis, trafikklenker_toveis_mot)
rm(trafikklenker_enveis, trafikklenker_toveis, trafikklenker_toveis_mot)
table(trafikklenker$med_metrering)
# sorter etter ID
trafikklenker <- trafikklenker %>%
arrange(ID)
# nå ligger trafikklenkene med toveistrafikk dobbelt
trafikklenker %>%
filter(DIRECTION == 3) %>%
head() %>%
View()
# lagrer til datasettet -------------------------------------------------------
st_write(trafikklenker, dsn = path_gpkg, layer = 'trafikklenker_rettet_metrering')
| /nr_prep_links/toi/3_rettede_lenker_metrering.R | no_license | snohan/trafikkdata | R | false | false | 3,383 | r | # --------------------------------------------------------------------------- #
# #
# Legger til lag der trafikklenker med toveistrafikk ligger dobbelt #
# #
# Ny variabel med_metrering angir #
# #
# 1 = lenkeretning MED metreringsretning #
# 0 = lenkeretning MOT metreringsretning #
# #
# Lag: 'trafikklenker_rettet_metrering' #
# #
# --------------------------------------------------------------------------- #
library(tidyverse)
library(sf)
library(tmap)
# les inn trafikklenkene ------------------------------------------------------
path_gpkg <- '/nr_prep_links/trafikklenker_undir_maincomp.gpkg'
trafikklenker <- st_read(path_gpkg, layer = 'edges_main')
# START_NODE_OID --> END_NODE_OID følger metreringsretningen ved datauttaksdato
# variabelen DIRECTION angir trafikkretning relativt til metreringsretningen
# 1 = MED - lenken har enveistrafikk og trafikkretningen er MED metreringsretningen
# 2 = MOT - lenken har enveistrafikk og trafikkretningen er MOT metreringsretningen
# 3 = BEGGE - lenken har toveistrafikk
trafikklenker %>%
st_set_geometry(NULL) %>%
count(DIRECTION)
# DIRECTION n
# 1 1 828
# 2 2 142
# 3 3 53041
# legg til metreringsretning dummy --------------------------------------------
trafikklenker <- mutate(trafikklenker, med_metrering = 1)
# dupliserer leker med toveistrafikk ------------------------------------------
trafikklenker_enveis <- filter(trafikklenker, DIRECTION < 3)
trafikklenker_toveis <- filter(trafikklenker, DIRECTION == 3)
trafikklenker_toveis_mot <- trafikklenker_toveis # lag en kopi
identical(trafikklenker_toveis_mot, trafikklenker_toveis)
# TRUE
# snu retning
trafikklenker_toveis_mot <- trafikklenker_toveis_mot %>%
mutate(
# snu start- og sluttnode
START_NODE_OID = trafikklenker_toveis$END_NODE_OID,
END_NODE_OID = trafikklenker_toveis$START_NODE_OID,
# angi at lenkeretning er mot metreringsretning ved å sette dummy = 0
med_metrering = 0,
# snu vegreferansen
ROADREF_START = trafikklenker_toveis$ROADREF_END,
ROADREF_END = trafikklenker_toveis$ROADREF_START,
# snu from og to
from = trafikklenker_toveis$to,
to = trafikklenker_toveis$from
)
identical(trafikklenker_toveis_mot, trafikklenker_toveis)
# FALSE
trafikklenker <- bind_rows(trafikklenker_enveis, trafikklenker_toveis, trafikklenker_toveis_mot)
rm(trafikklenker_enveis, trafikklenker_toveis, trafikklenker_toveis_mot)
table(trafikklenker$med_metrering)
# sorter etter ID
trafikklenker <- trafikklenker %>%
arrange(ID)
# nå ligger trafikklenkene med toveistrafikk dobbelt
trafikklenker %>%
filter(DIRECTION == 3) %>%
head() %>%
View()
# lagrer til datasettet -------------------------------------------------------
st_write(trafikklenker, dsn = path_gpkg, layer = 'trafikklenker_rettet_metrering')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{UNSDGGoals}
\alias{UNSDGGoals}
\title{UNSDGGoals}
\format{
A data frame with 17 rows and 6 variables:
\describe{
\item{\code{code}}{double COLUMN_DESCRIPTION}
\item{\code{name}}{character COLUMN_DESCRIPTION}
\item{\code{description}}{logical COLUMN_DESCRIPTION}
\item{\code{category}}{logical COLUMN_DESCRIPTION}
\item{\code{url}}{logical COLUMN_DESCRIPTION}
\item{\code{status}}{character COLUMN_DESCRIPTION}
}
}
\source{
\url{https://iatistandard.org/en/iati-standard/203/codelists/}
}
\usage{
UNSDGGoals
}
\description{
A value from the top-level list of UN sustainable development
goals (SDGs) (e.g. ‘1’)
External URL: https://sustainabledevelopment.un.org/?menu=1300
}
\examples{
{
head(UNSDGGoals,10)
}
}
\keyword{datasets}
| /man/UNSDGGoals.Rd | permissive | unhcr-americas/IatiTidy | R | false | true | 854 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{UNSDGGoals}
\alias{UNSDGGoals}
\title{UNSDGGoals}
\format{
A data frame with 17 rows and 6 variables:
\describe{
\item{\code{code}}{double COLUMN_DESCRIPTION}
\item{\code{name}}{character COLUMN_DESCRIPTION}
\item{\code{description}}{logical COLUMN_DESCRIPTION}
\item{\code{category}}{logical COLUMN_DESCRIPTION}
\item{\code{url}}{logical COLUMN_DESCRIPTION}
\item{\code{status}}{character COLUMN_DESCRIPTION}
}
}
\source{
\url{https://iatistandard.org/en/iati-standard/203/codelists/}
}
\usage{
UNSDGGoals
}
\description{
A value from the top-level list of UN sustainable development
goals (SDGs) (e.g. ‘1’)
External URL: https://sustainabledevelopment.un.org/?menu=1300
}
\examples{
{
head(UNSDGGoals,10)
}
}
\keyword{datasets}
|
if (!exists("hpc_data")) {
source('load_data.R')
}
png("plot4.png", width = 480, height = 480)
par(mfcol = c(2, 2), mar = c(4, 4, 2, 1), oma = c(0, 0, 2, 0))
with(hpc_data, {
plot(datetime, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
with(hpc_data, plot(datetime, Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering"))
with(hpc_data, lines(datetime, Sub_metering_2, type = "l", col = "red"))
with(hpc_data, lines(datetime, Sub_metering_3, type = "l", col = "blue"))
legend("topright", xjust=1, lty=1, y.intersp = 0.8, legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col= c("black", "red", "blue"))
plot(datetime, Voltage, type = "l")
plot(datetime, Global_reactive_power, type = "l")
})
dev.off()
| /plot4.R | no_license | jmterrettaz/ExData_Plotting1 | R | false | false | 812 | r |
if (!exists("hpc_data")) {
source('load_data.R')
}
png("plot4.png", width = 480, height = 480)
par(mfcol = c(2, 2), mar = c(4, 4, 2, 1), oma = c(0, 0, 2, 0))
with(hpc_data, {
plot(datetime, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
with(hpc_data, plot(datetime, Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering"))
with(hpc_data, lines(datetime, Sub_metering_2, type = "l", col = "red"))
with(hpc_data, lines(datetime, Sub_metering_3, type = "l", col = "blue"))
legend("topright", xjust=1, lty=1, y.intersp = 0.8, legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col= c("black", "red", "blue"))
plot(datetime, Voltage, type = "l")
plot(datetime, Global_reactive_power, type = "l")
})
dev.off()
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common new_handlers new_service set_config
NULL
#' AWS OpsWorks
#'
#' @description
#' Welcome to the *AWS OpsWorks Stacks API Reference*. This guide provides
#' descriptions, syntax, and usage examples for AWS OpsWorks Stacks actions
#' and data types, including common parameters and error codes.
#'
#' AWS OpsWorks Stacks is an application management service that provides
#' an integrated experience for overseeing the complete application
#' lifecycle. For information about this product, go to the [AWS
#' OpsWorks](https://aws.amazon.com/opsworks/) details page.
#'
#' **SDKs and CLI**
#'
#' The most common way to use the AWS OpsWorks Stacks API is by using the
#' AWS Command Line Interface (CLI) or by using one of the AWS SDKs to
#' implement applications in your preferred language. For more information,
#' see:
#'
#' - [AWS
#' CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html)
#'
#' - [AWS SDK for
#' Java](https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/opsworks/AWSOpsWorksClient.html)
#'
#' - [AWS SDK for
#' .NET](https://docs.aws.amazon.com/sdkfornet/latest/apidocs/Index.html)
#'
#' - [AWS SDK for PHP
#' 2](https://docs.aws.amazon.com/aws-sdk-php/v3/api/class-Aws.OpsWorks.OpsWorksClient.html)
#'
#' - [AWS SDK for Ruby](https://docs.aws.amazon.com/sdk-for-ruby/v2/api/)
#'
#' - [AWS SDK for
#' Node.js](https://docs.aws.amazon.com/sdk-for-javascript/index.html)
#'
#' - [AWS SDK for
#' Python(Boto)](http://docs.pythonboto.org/en/latest/ref/opsworks.html)
#'
#' **Endpoints**
#'
#' AWS OpsWorks Stacks supports the following endpoints, all HTTPS. You
#' must connect to one of the following endpoints. Stacks can only be
#' accessed or managed within the endpoint in which they are created.
#'
#' - opsworks.us-east-1.amazonaws.com
#'
#' - opsworks.us-east-2.amazonaws.com
#'
#' - opsworks.us-west-1.amazonaws.com
#'
#' - opsworks.us-west-2.amazonaws.com
#'
#' - opsworks.ca-central-1.amazonaws.com (API only; not available in the
#' AWS console)
#'
#' - opsworks.eu-west-1.amazonaws.com
#'
#' - opsworks.eu-west-2.amazonaws.com
#'
#' - opsworks.eu-west-3.amazonaws.com
#'
#' - opsworks.eu-central-1.amazonaws.com
#'
#' - opsworks.ap-northeast-1.amazonaws.com
#'
#' - opsworks.ap-northeast-2.amazonaws.com
#'
#' - opsworks.ap-south-1.amazonaws.com
#'
#' - opsworks.ap-southeast-1.amazonaws.com
#'
#' - opsworks.ap-southeast-2.amazonaws.com
#'
#' - opsworks.sa-east-1.amazonaws.com
#'
#' **Chef Versions**
#'
#' When you call [`create_stack`][opsworks_create_stack],
#' [`clone_stack`][opsworks_clone_stack], or
#' [`update_stack`][opsworks_update_stack] we recommend you use the
#' `ConfigurationManager` parameter to specify the Chef version. The
#' recommended and default value for Linux stacks is currently 12. Windows
#' stacks use Chef 12.2. For more information, see [Chef
#' Versions](https://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-chef11.html).
#'
#' You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We
#' recommend migrating your existing Linux stacks to Chef 12 as soon as
#' possible.
#'
#' @param
#' config
#' Optional configuration of credentials, endpoint, and/or region.
#'
#' @section Service syntax:
#' ```
#' svc <- opsworks(
#' config = list(
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string"
#' ),
#' endpoint = "string",
#' region = "string"
#' )
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' svc <- opsworks()
#' svc$assign_instance(
#' Foo = 123
#' )
#' }
#'
#' @section Operations:
#' \tabular{ll}{
#' \link[=opsworks_assign_instance]{assign_instance} \tab Assign a registered instance to a layer\cr
#' \link[=opsworks_assign_volume]{assign_volume} \tab Assigns one of the stack's registered Amazon EBS volumes to a specified instance\cr
#' \link[=opsworks_associate_elastic_ip]{associate_elastic_ip} \tab Associates one of the stack's registered Elastic IP addresses with a specified instance\cr
#' \link[=opsworks_attach_elastic_load_balancer]{attach_elastic_load_balancer} \tab Attaches an Elastic Load Balancing load balancer to a specified layer\cr
#' \link[=opsworks_clone_stack]{clone_stack} \tab Creates a clone of a specified stack\cr
#' \link[=opsworks_create_app]{create_app} \tab Creates an app for a specified stack\cr
#' \link[=opsworks_create_deployment]{create_deployment} \tab Runs deployment or stack commands\cr
#' \link[=opsworks_create_instance]{create_instance} \tab Creates an instance in a specified stack\cr
#' \link[=opsworks_create_layer]{create_layer} \tab Creates a layer\cr
#' \link[=opsworks_create_stack]{create_stack} \tab Creates a new stack\cr
#' \link[=opsworks_create_user_profile]{create_user_profile} \tab Creates a new user profile\cr
#' \link[=opsworks_delete_app]{delete_app} \tab Deletes a specified app\cr
#' \link[=opsworks_delete_instance]{delete_instance} \tab Deletes a specified instance, which terminates the associated Amazon EC2 instance\cr
#' \link[=opsworks_delete_layer]{delete_layer} \tab Deletes a specified layer\cr
#' \link[=opsworks_delete_stack]{delete_stack} \tab Deletes a specified stack\cr
#' \link[=opsworks_delete_user_profile]{delete_user_profile} \tab Deletes a user profile\cr
#' \link[=opsworks_deregister_ecs_cluster]{deregister_ecs_cluster} \tab Deregisters a specified Amazon ECS cluster from a stack\cr
#' \link[=opsworks_deregister_elastic_ip]{deregister_elastic_ip} \tab Deregisters a specified Elastic IP address\cr
#' \link[=opsworks_deregister_instance]{deregister_instance} \tab Deregister a registered Amazon EC2 or on-premises instance\cr
#' \link[=opsworks_deregister_rds_db_instance]{deregister_rds_db_instance} \tab Deregisters an Amazon RDS instance\cr
#' \link[=opsworks_deregister_volume]{deregister_volume} \tab Deregisters an Amazon EBS volume\cr
#' \link[=opsworks_describe_agent_versions]{describe_agent_versions} \tab Describes the available AWS OpsWorks Stacks agent versions\cr
#' \link[=opsworks_describe_apps]{describe_apps} \tab Requests a description of a specified set of apps\cr
#' \link[=opsworks_describe_commands]{describe_commands} \tab Describes the results of specified commands\cr
#' \link[=opsworks_describe_deployments]{describe_deployments} \tab Requests a description of a specified set of deployments\cr
#' \link[=opsworks_describe_ecs_clusters]{describe_ecs_clusters} \tab Describes Amazon ECS clusters that are registered with a stack\cr
#' \link[=opsworks_describe_elastic_ips]{describe_elastic_ips} \tab Describes Elastic IP addresses\cr
#' \link[=opsworks_describe_elastic_load_balancers]{describe_elastic_load_balancers} \tab Describes a stack's Elastic Load Balancing instances\cr
#' \link[=opsworks_describe_instances]{describe_instances} \tab Requests a description of a set of instances\cr
#' \link[=opsworks_describe_layers]{describe_layers} \tab Requests a description of one or more layers in a specified stack\cr
#' \link[=opsworks_describe_load_based_auto_scaling]{describe_load_based_auto_scaling} \tab Describes load-based auto scaling configurations for specified layers\cr
#' \link[=opsworks_describe_my_user_profile]{describe_my_user_profile} \tab Describes a user's SSH information\cr
#' \link[=opsworks_describe_operating_systems]{describe_operating_systems} \tab Describes the operating systems that are supported by AWS OpsWorks Stacks\cr
#' \link[=opsworks_describe_permissions]{describe_permissions} \tab Describes the permissions for a specified stack\cr
#' \link[=opsworks_describe_raid_arrays]{describe_raid_arrays} \tab Describe an instance's RAID arrays\cr
#' \link[=opsworks_describe_rds_db_instances]{describe_rds_db_instances} \tab Describes Amazon RDS instances\cr
#' \link[=opsworks_describe_service_errors]{describe_service_errors} \tab Describes AWS OpsWorks Stacks service errors\cr
#' \link[=opsworks_describe_stack_provisioning_parameters]{describe_stack_provisioning_parameters} \tab Requests a description of a stack's provisioning parameters\cr
#' \link[=opsworks_describe_stacks]{describe_stacks} \tab Requests a description of one or more stacks\cr
#' \link[=opsworks_describe_stack_summary]{describe_stack_summary} \tab Describes the number of layers and apps in a specified stack, and the number of instances in each state, such as running_setup or online\cr
#' \link[=opsworks_describe_time_based_auto_scaling]{describe_time_based_auto_scaling} \tab Describes time-based auto scaling configurations for specified instances\cr
#' \link[=opsworks_describe_user_profiles]{describe_user_profiles} \tab Describe specified users\cr
#' \link[=opsworks_describe_volumes]{describe_volumes} \tab Describes an instance's Amazon EBS volumes\cr
#' \link[=opsworks_detach_elastic_load_balancer]{detach_elastic_load_balancer} \tab Detaches a specified Elastic Load Balancing instance from its layer\cr
#' \link[=opsworks_disassociate_elastic_ip]{disassociate_elastic_ip} \tab Disassociates an Elastic IP address from its instance\cr
#' \link[=opsworks_get_hostname_suggestion]{get_hostname_suggestion} \tab Gets a generated host name for the specified layer, based on the current host name theme\cr
#' \link[=opsworks_grant_access]{grant_access} \tab This action can be used only with Windows stacks\cr
#' \link[=opsworks_list_tags]{list_tags} \tab Returns a list of tags that are applied to the specified stack or layer\cr
#' \link[=opsworks_reboot_instance]{reboot_instance} \tab Reboots a specified instance\cr
#' \link[=opsworks_register_ecs_cluster]{register_ecs_cluster} \tab Registers a specified Amazon ECS cluster with a stack\cr
#' \link[=opsworks_register_elastic_ip]{register_elastic_ip} \tab Registers an Elastic IP address with a specified stack\cr
#' \link[=opsworks_register_instance]{register_instance} \tab Registers instances that were created outside of AWS OpsWorks Stacks with a specified stack\cr
#' \link[=opsworks_register_rds_db_instance]{register_rds_db_instance} \tab Registers an Amazon RDS instance with a stack\cr
#' \link[=opsworks_register_volume]{register_volume} \tab Registers an Amazon EBS volume with a specified stack\cr
#' \link[=opsworks_set_load_based_auto_scaling]{set_load_based_auto_scaling} \tab Specify the load-based auto scaling configuration for a specified layer\cr
#' \link[=opsworks_set_permission]{set_permission} \tab Specifies a user's permissions\cr
#' \link[=opsworks_set_time_based_auto_scaling]{set_time_based_auto_scaling} \tab Specify the time-based auto scaling configuration for a specified instance\cr
#' \link[=opsworks_start_instance]{start_instance} \tab Starts a specified instance\cr
#' \link[=opsworks_start_stack]{start_stack} \tab Starts a stack's instances\cr
#' \link[=opsworks_stop_instance]{stop_instance} \tab Stops a specified instance\cr
#' \link[=opsworks_stop_stack]{stop_stack} \tab Stops a specified stack\cr
#' \link[=opsworks_tag_resource]{tag_resource} \tab Apply cost-allocation tags to a specified stack or layer in AWS OpsWorks Stacks\cr
#' \link[=opsworks_unassign_instance]{unassign_instance} \tab Unassigns a registered instance from all layers that are using the instance\cr
#' \link[=opsworks_unassign_volume]{unassign_volume} \tab Unassigns an assigned Amazon EBS volume\cr
#' \link[=opsworks_untag_resource]{untag_resource} \tab Removes tags from a specified stack or layer\cr
#' \link[=opsworks_update_app]{update_app} \tab Updates a specified app\cr
#' \link[=opsworks_update_elastic_ip]{update_elastic_ip} \tab Updates a registered Elastic IP address's name\cr
#' \link[=opsworks_update_instance]{update_instance} \tab Updates a specified instance\cr
#' \link[=opsworks_update_layer]{update_layer} \tab Updates a specified layer\cr
#' \link[=opsworks_update_my_user_profile]{update_my_user_profile} \tab Updates a user's SSH public key\cr
#' \link[=opsworks_update_rds_db_instance]{update_rds_db_instance} \tab Updates an Amazon RDS instance\cr
#' \link[=opsworks_update_stack]{update_stack} \tab Updates a specified stack\cr
#' \link[=opsworks_update_user_profile]{update_user_profile} \tab Updates a specified user profile\cr
#' \link[=opsworks_update_volume]{update_volume} \tab Updates an Amazon EBS volume's name or mount point
#' }
#'
#' @return
#' A client for the service. You can call the service's operations using
#' syntax like `svc$operation(...)`, where `svc` is the name you've assigned
#' to the client. The available operations are listed in the
#' Operations section.
#'
#' @rdname opsworks
#' @export
opsworks <- function(config = list()) {
svc <- .opsworks$operations
svc <- set_config(svc, config)
return(svc)
}
# Private API objects: metadata, handlers, interfaces, etc.
.opsworks <- list()
.opsworks$operations <- list()
.opsworks$metadata <- list(
service_name = "opsworks",
endpoints = list("*" = list(endpoint = "opsworks.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "opsworks.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "opsworks.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "opsworks.{region}.sc2s.sgov.gov", global = FALSE)),
service_id = "OpsWorks",
api_version = "2013-02-18",
signing_name = "opsworks",
json_version = "1.1",
target_prefix = "OpsWorks_20130218"
)
.opsworks$service <- function(config = list()) {
handlers <- new_handlers("jsonrpc", "v4")
new_service(.opsworks$metadata, handlers, config)
}
| /paws/R/opsworks_service.R | permissive | williazo/paws | R | false | false | 13,759 | r | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common new_handlers new_service set_config
NULL
#' AWS OpsWorks
#'
#' @description
#' Welcome to the *AWS OpsWorks Stacks API Reference*. This guide provides
#' descriptions, syntax, and usage examples for AWS OpsWorks Stacks actions
#' and data types, including common parameters and error codes.
#'
#' AWS OpsWorks Stacks is an application management service that provides
#' an integrated experience for overseeing the complete application
#' lifecycle. For information about this product, go to the [AWS
#' OpsWorks](https://aws.amazon.com/opsworks/) details page.
#'
#' **SDKs and CLI**
#'
#' The most common way to use the AWS OpsWorks Stacks API is by using the
#' AWS Command Line Interface (CLI) or by using one of the AWS SDKs to
#' implement applications in your preferred language. For more information,
#' see:
#'
#' - [AWS
#' CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html)
#'
#' - [AWS SDK for
#' Java](https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/opsworks/AWSOpsWorksClient.html)
#'
#' - [AWS SDK for
#' .NET](https://docs.aws.amazon.com/sdkfornet/latest/apidocs/Index.html)
#'
#' - [AWS SDK for PHP
#' 2](https://docs.aws.amazon.com/aws-sdk-php/v3/api/class-Aws.OpsWorks.OpsWorksClient.html)
#'
#' - [AWS SDK for Ruby](https://docs.aws.amazon.com/sdk-for-ruby/v2/api/)
#'
#' - [AWS SDK for
#' Node.js](https://docs.aws.amazon.com/sdk-for-javascript/index.html)
#'
#' - [AWS SDK for
#' Python(Boto)](http://docs.pythonboto.org/en/latest/ref/opsworks.html)
#'
#' **Endpoints**
#'
#' AWS OpsWorks Stacks supports the following endpoints, all HTTPS. You
#' must connect to one of the following endpoints. Stacks can only be
#' accessed or managed within the endpoint in which they are created.
#'
#' - opsworks.us-east-1.amazonaws.com
#'
#' - opsworks.us-east-2.amazonaws.com
#'
#' - opsworks.us-west-1.amazonaws.com
#'
#' - opsworks.us-west-2.amazonaws.com
#'
#' - opsworks.ca-central-1.amazonaws.com (API only; not available in the
#' AWS console)
#'
#' - opsworks.eu-west-1.amazonaws.com
#'
#' - opsworks.eu-west-2.amazonaws.com
#'
#' - opsworks.eu-west-3.amazonaws.com
#'
#' - opsworks.eu-central-1.amazonaws.com
#'
#' - opsworks.ap-northeast-1.amazonaws.com
#'
#' - opsworks.ap-northeast-2.amazonaws.com
#'
#' - opsworks.ap-south-1.amazonaws.com
#'
#' - opsworks.ap-southeast-1.amazonaws.com
#'
#' - opsworks.ap-southeast-2.amazonaws.com
#'
#' - opsworks.sa-east-1.amazonaws.com
#'
#' **Chef Versions**
#'
#' When you call [`create_stack`][opsworks_create_stack],
#' [`clone_stack`][opsworks_clone_stack], or
#' [`update_stack`][opsworks_update_stack] we recommend you use the
#' `ConfigurationManager` parameter to specify the Chef version. The
#' recommended and default value for Linux stacks is currently 12. Windows
#' stacks use Chef 12.2. For more information, see [Chef
#' Versions](https://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-chef11.html).
#'
#' You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We
#' recommend migrating your existing Linux stacks to Chef 12 as soon as
#' possible.
#'
#' @param
#' config
#' Optional configuration of credentials, endpoint, and/or region.
#'
#' @section Service syntax:
#' ```
#' svc <- opsworks(
#' config = list(
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string"
#' ),
#' endpoint = "string",
#' region = "string"
#' )
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' svc <- opsworks()
#' svc$assign_instance(
#' Foo = 123
#' )
#' }
#'
#' @section Operations:
#' \tabular{ll}{
#' \link[=opsworks_assign_instance]{assign_instance} \tab Assign a registered instance to a layer\cr
#' \link[=opsworks_assign_volume]{assign_volume} \tab Assigns one of the stack's registered Amazon EBS volumes to a specified instance\cr
#' \link[=opsworks_associate_elastic_ip]{associate_elastic_ip} \tab Associates one of the stack's registered Elastic IP addresses with a specified instance\cr
#' \link[=opsworks_attach_elastic_load_balancer]{attach_elastic_load_balancer} \tab Attaches an Elastic Load Balancing load balancer to a specified layer\cr
#' \link[=opsworks_clone_stack]{clone_stack} \tab Creates a clone of a specified stack\cr
#' \link[=opsworks_create_app]{create_app} \tab Creates an app for a specified stack\cr
#' \link[=opsworks_create_deployment]{create_deployment} \tab Runs deployment or stack commands\cr
#' \link[=opsworks_create_instance]{create_instance} \tab Creates an instance in a specified stack\cr
#' \link[=opsworks_create_layer]{create_layer} \tab Creates a layer\cr
#' \link[=opsworks_create_stack]{create_stack} \tab Creates a new stack\cr
#' \link[=opsworks_create_user_profile]{create_user_profile} \tab Creates a new user profile\cr
#' \link[=opsworks_delete_app]{delete_app} \tab Deletes a specified app\cr
#' \link[=opsworks_delete_instance]{delete_instance} \tab Deletes a specified instance, which terminates the associated Amazon EC2 instance\cr
#' \link[=opsworks_delete_layer]{delete_layer} \tab Deletes a specified layer\cr
#' \link[=opsworks_delete_stack]{delete_stack} \tab Deletes a specified stack\cr
#' \link[=opsworks_delete_user_profile]{delete_user_profile} \tab Deletes a user profile\cr
#' \link[=opsworks_deregister_ecs_cluster]{deregister_ecs_cluster} \tab Deregisters a specified Amazon ECS cluster from a stack\cr
#' \link[=opsworks_deregister_elastic_ip]{deregister_elastic_ip} \tab Deregisters a specified Elastic IP address\cr
#' \link[=opsworks_deregister_instance]{deregister_instance} \tab Deregister a registered Amazon EC2 or on-premises instance\cr
#' \link[=opsworks_deregister_rds_db_instance]{deregister_rds_db_instance} \tab Deregisters an Amazon RDS instance\cr
#' \link[=opsworks_deregister_volume]{deregister_volume} \tab Deregisters an Amazon EBS volume\cr
#' \link[=opsworks_describe_agent_versions]{describe_agent_versions} \tab Describes the available AWS OpsWorks Stacks agent versions\cr
#' \link[=opsworks_describe_apps]{describe_apps} \tab Requests a description of a specified set of apps\cr
#' \link[=opsworks_describe_commands]{describe_commands} \tab Describes the results of specified commands\cr
#' \link[=opsworks_describe_deployments]{describe_deployments} \tab Requests a description of a specified set of deployments\cr
#' \link[=opsworks_describe_ecs_clusters]{describe_ecs_clusters} \tab Describes Amazon ECS clusters that are registered with a stack\cr
#' \link[=opsworks_describe_elastic_ips]{describe_elastic_ips} \tab Describes Elastic IP addresses\cr
#' \link[=opsworks_describe_elastic_load_balancers]{describe_elastic_load_balancers} \tab Describes a stack's Elastic Load Balancing instances\cr
#' \link[=opsworks_describe_instances]{describe_instances} \tab Requests a description of a set of instances\cr
#' \link[=opsworks_describe_layers]{describe_layers} \tab Requests a description of one or more layers in a specified stack\cr
#' \link[=opsworks_describe_load_based_auto_scaling]{describe_load_based_auto_scaling} \tab Describes load-based auto scaling configurations for specified layers\cr
#' \link[=opsworks_describe_my_user_profile]{describe_my_user_profile} \tab Describes a user's SSH information\cr
#' \link[=opsworks_describe_operating_systems]{describe_operating_systems} \tab Describes the operating systems that are supported by AWS OpsWorks Stacks\cr
#' \link[=opsworks_describe_permissions]{describe_permissions} \tab Describes the permissions for a specified stack\cr
#' \link[=opsworks_describe_raid_arrays]{describe_raid_arrays} \tab Describe an instance's RAID arrays\cr
#' \link[=opsworks_describe_rds_db_instances]{describe_rds_db_instances} \tab Describes Amazon RDS instances\cr
#' \link[=opsworks_describe_service_errors]{describe_service_errors} \tab Describes AWS OpsWorks Stacks service errors\cr
#' \link[=opsworks_describe_stack_provisioning_parameters]{describe_stack_provisioning_parameters} \tab Requests a description of a stack's provisioning parameters\cr
#' \link[=opsworks_describe_stacks]{describe_stacks} \tab Requests a description of one or more stacks\cr
#' \link[=opsworks_describe_stack_summary]{describe_stack_summary} \tab Describes the number of layers and apps in a specified stack, and the number of instances in each state, such as running_setup or online\cr
#' \link[=opsworks_describe_time_based_auto_scaling]{describe_time_based_auto_scaling} \tab Describes time-based auto scaling configurations for specified instances\cr
#' \link[=opsworks_describe_user_profiles]{describe_user_profiles} \tab Describe specified users\cr
#' \link[=opsworks_describe_volumes]{describe_volumes} \tab Describes an instance's Amazon EBS volumes\cr
#' \link[=opsworks_detach_elastic_load_balancer]{detach_elastic_load_balancer} \tab Detaches a specified Elastic Load Balancing instance from its layer\cr
#' \link[=opsworks_disassociate_elastic_ip]{disassociate_elastic_ip} \tab Disassociates an Elastic IP address from its instance\cr
#' \link[=opsworks_get_hostname_suggestion]{get_hostname_suggestion} \tab Gets a generated host name for the specified layer, based on the current host name theme\cr
#' \link[=opsworks_grant_access]{grant_access} \tab This action can be used only with Windows stacks\cr
#' \link[=opsworks_list_tags]{list_tags} \tab Returns a list of tags that are applied to the specified stack or layer\cr
#' \link[=opsworks_reboot_instance]{reboot_instance} \tab Reboots a specified instance\cr
#' \link[=opsworks_register_ecs_cluster]{register_ecs_cluster} \tab Registers a specified Amazon ECS cluster with a stack\cr
#' \link[=opsworks_register_elastic_ip]{register_elastic_ip} \tab Registers an Elastic IP address with a specified stack\cr
#' \link[=opsworks_register_instance]{register_instance} \tab Registers instances that were created outside of AWS OpsWorks Stacks with a specified stack\cr
#' \link[=opsworks_register_rds_db_instance]{register_rds_db_instance} \tab Registers an Amazon RDS instance with a stack\cr
#' \link[=opsworks_register_volume]{register_volume} \tab Registers an Amazon EBS volume with a specified stack\cr
#' \link[=opsworks_set_load_based_auto_scaling]{set_load_based_auto_scaling} \tab Specify the load-based auto scaling configuration for a specified layer\cr
#' \link[=opsworks_set_permission]{set_permission} \tab Specifies a user's permissions\cr
#' \link[=opsworks_set_time_based_auto_scaling]{set_time_based_auto_scaling} \tab Specify the time-based auto scaling configuration for a specified instance\cr
#' \link[=opsworks_start_instance]{start_instance} \tab Starts a specified instance\cr
#' \link[=opsworks_start_stack]{start_stack} \tab Starts a stack's instances\cr
#' \link[=opsworks_stop_instance]{stop_instance} \tab Stops a specified instance\cr
#' \link[=opsworks_stop_stack]{stop_stack} \tab Stops a specified stack\cr
#' \link[=opsworks_tag_resource]{tag_resource} \tab Apply cost-allocation tags to a specified stack or layer in AWS OpsWorks Stacks\cr
#' \link[=opsworks_unassign_instance]{unassign_instance} \tab Unassigns a registered instance from all layers that are using the instance\cr
#' \link[=opsworks_unassign_volume]{unassign_volume} \tab Unassigns an assigned Amazon EBS volume\cr
#' \link[=opsworks_untag_resource]{untag_resource} \tab Removes tags from a specified stack or layer\cr
#' \link[=opsworks_update_app]{update_app} \tab Updates a specified app\cr
#' \link[=opsworks_update_elastic_ip]{update_elastic_ip} \tab Updates a registered Elastic IP address's name\cr
#' \link[=opsworks_update_instance]{update_instance} \tab Updates a specified instance\cr
#' \link[=opsworks_update_layer]{update_layer} \tab Updates a specified layer\cr
#' \link[=opsworks_update_my_user_profile]{update_my_user_profile} \tab Updates a user's SSH public key\cr
#' \link[=opsworks_update_rds_db_instance]{update_rds_db_instance} \tab Updates an Amazon RDS instance\cr
#' \link[=opsworks_update_stack]{update_stack} \tab Updates a specified stack\cr
#' \link[=opsworks_update_user_profile]{update_user_profile} \tab Updates a specified user profile\cr
#' \link[=opsworks_update_volume]{update_volume} \tab Updates an Amazon EBS volume's name or mount point
#' }
#'
#' @return
#' A client for the service. You can call the service's operations using
#' syntax like `svc$operation(...)`, where `svc` is the name you've assigned
#' to the client. The available operations are listed in the
#' Operations section.
#'
#' @rdname opsworks
#' @export
opsworks <- function(config = list()) {
svc <- .opsworks$operations
svc <- set_config(svc, config)
return(svc)
}
# Private API objects: metadata, handlers, interfaces, etc.
.opsworks <- list()
.opsworks$operations <- list()
.opsworks$metadata <- list(
service_name = "opsworks",
endpoints = list("*" = list(endpoint = "opsworks.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "opsworks.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "opsworks.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "opsworks.{region}.sc2s.sgov.gov", global = FALSE)),
service_id = "OpsWorks",
api_version = "2013-02-18",
signing_name = "opsworks",
json_version = "1.1",
target_prefix = "OpsWorks_20130218"
)
.opsworks$service <- function(config = list()) {
handlers <- new_handlers("jsonrpc", "v4")
new_service(.opsworks$metadata, handlers, config)
}
|
# SET DIRECTORY TO SOURCE FILE LOCATION: Session> Set Working Directory> To Source File Location
########################
### Read in raw Data ###
########################
library(tidyverse)
covid <- read.csv("../../data/interim/full_data.csv")
#####################
### Data Cleaning ###
#####################
# Reformat dates from excel format
date_cols <- c("Closed.other.non.essential.businesses", "Began.to.reopen.businesses.statewide", "Stay.at.home..shelter.in.place" , "End.stay.at.home.shelter.in.place" , "Mandate.face.mask.use.by.all.individuals.in.public.spaces", "Mandate.face.mask.use.by.employees.in.public.facing.businesses", "State.ended.statewide.mask.use.by.individuals.in.public.spaces" )
# Convert missing date values to NA
for (c in date_cols) {
covid[[c]] <- ifelse(covid[[c]] == 0, NA, covid[[c]])
}
for (c in date_cols) {
covid[[c]] <- as.Date(covid[[c]], origin = "1899-12-30")
}
# Convert State of Emergency from "2020-02-30" to Date format (for some reason I couldn't just add it to `date_cols`)
covid$`State.of.emergency`<- as.Date(covid$`State.of.emergency`)
############################################################
### Build indicators for state-level policy date columns ###
############################################################
covid$NoStateEmergency <- ifelse(is.na(covid$`State.of.emergency`), 1, 0)
covid$NoCloseBusi <- ifelse(is.na(covid$`Closed.other.non.essential.businesses`), 1, 0)
covid$NoReopenBusi <- ifelse(is.na(covid$`Began.to.reopen.businesses.statewide`), 1, 0)
covid$SIP <- ifelse(is.na(covid$`Stay.at.home..shelter.in.place`), 0, 1)
covid$NoENDSIP <- ifelse(is.na(covid$`End.stay.at.home.shelter.in.place`), 1, 0)
covid$EndedSIP <- ifelse(is.na(covid$`End.stay.at.home.shelter.in.place`), 0, 1)
covid$NoFaceMask <- ifelse(is.na(covid$`Mandate.face.mask.use.by.all.individuals.in.public.spaces`), 1, 0)
covid$NoFaceMaskEmploy <- ifelse(is.na(covid$`Mandate.face.mask.use.by.employees.in.public.facing.businesses`), 1, 0)
covid$NoEndFaceMask <- ifelse(is.na(covid$`State.ended.statewide.mask.use.by.individuals.in.public.spaces`), 1, 0)
##########################
### Transform Features ###
##########################
# state emergency should be reasonable marker of when COVID began entering state
# SIP - StateEmergency (how long before people had to shelter in place)
covid$SIP_StateEmergency <- covid$`Stay.at.home..shelter.in.place` - covid$`State.of.emergency`
# CloseNonEssential - StateEmergency (how long were non-essential businesses open for )
covid$CloseNonEssential_StateEmergency <- covid$`Closed.other.non.essential.businesses` - covid$`State.of.emergency`
# BeganReopenBusiness - StateEmergency (how long before business reopened)
covid$ReopenBusi_StateEmergency <- covid$`Began.to.reopen.businesses.statewide` - covid$`State.of.emergency`
# MandateFaceMask - StateEmergency (how long before masks were mandatory)
covid$FaceMask_StateEmergency <- covid$`Mandate.face.mask.use.by.all.individuals.in.public.spaces` - covid$`State.of.emergency`
# MandateFaceMaskEmployee - StateEmergency (How long before employees in public businesses needed to wear mask)
covid$FaceMaskEmploy_StateEmergency <- covid$`Mandate.face.mask.use.by.employees.in.public.facing.businesses` - covid$`State.of.emergency`
# BeganReopenBusuness - CloseBusiness (how long businesses were closed for)
covid$ReopenBusi_CloseBusi <- covid$`Began.to.reopen.businesses.statewide` - covid$`Closed.other.non.essential.businesses`
# ENDSIP - SIP (how long did people shelter in place)
covid$ENDSIP_SIP <- covid$`End.stay.at.home.shelter.in.place` - covid$`Stay.at.home..shelter.in.place`
# CurrentDate - EndSIP (for states who ended SIP how long has it been)
covid$Curr_ENDSIP <- as.Date("2020-10-26", origin = "1899-12-30") - covid$`End.stay.at.home.shelter.in.place`
# CurrentDate - StateEndMaskReq (very few points prob not useful)
covid$Curr_ENDMask <- as.Date("2020-10-26", origin = "1899-12-30") - covid$`State.ended.statewide.mask.use.by.individuals.in.public.spaces`
# Some necessary transformations for EDA, needed for plot_usmap() function
covid <- covid %>% rename(state = State)
covid <- mutate(covid, NoFaceMask_factor = factor(NoFaceMask))
covid <- mutate(covid, NoFaceMaskEmploy_factor = factor(NoFaceMaskEmploy))
covid <- mutate(covid, SIP_factor = factor(SIP))
# Need log case rates for lm
covid <- mutate(covid, log_case_rates = log(Cases.in.Last.7.Days))
##########################################
###Save as R object in data/processed ###
##########################################
saveRDS(covid, file = "../../data/processed/main_state_data.RDS")
| /covid-stats-proj/src/data/feature_engineering.R | no_license | jearcher/Data-Science | R | false | false | 4,659 | r | # SET DIRECTORY TO SOURCE FILE LOCATION: Session> Set Working Directory> To Source File Location
########################
### Read in raw Data ###
########################
library(tidyverse)
covid <- read.csv("../../data/interim/full_data.csv")
#####################
### Data Cleaning ###
#####################
# Reformat dates from excel format
date_cols <- c("Closed.other.non.essential.businesses", "Began.to.reopen.businesses.statewide", "Stay.at.home..shelter.in.place" , "End.stay.at.home.shelter.in.place" , "Mandate.face.mask.use.by.all.individuals.in.public.spaces", "Mandate.face.mask.use.by.employees.in.public.facing.businesses", "State.ended.statewide.mask.use.by.individuals.in.public.spaces" )
# Convert missing date values to NA
for (c in date_cols) {
covid[[c]] <- ifelse(covid[[c]] == 0, NA, covid[[c]])
}
for (c in date_cols) {
covid[[c]] <- as.Date(covid[[c]], origin = "1899-12-30")
}
# Convert State of Emergency from "2020-02-30" to Date format (for some reason I couldn't just add it to `date_cols`)
covid$`State.of.emergency`<- as.Date(covid$`State.of.emergency`)
############################################################
### Build indicators for state-level policy date columns ###
############################################################
covid$NoStateEmergency <- ifelse(is.na(covid$`State.of.emergency`), 1, 0)
covid$NoCloseBusi <- ifelse(is.na(covid$`Closed.other.non.essential.businesses`), 1, 0)
covid$NoReopenBusi <- ifelse(is.na(covid$`Began.to.reopen.businesses.statewide`), 1, 0)
covid$SIP <- ifelse(is.na(covid$`Stay.at.home..shelter.in.place`), 0, 1)
covid$NoENDSIP <- ifelse(is.na(covid$`End.stay.at.home.shelter.in.place`), 1, 0)
covid$EndedSIP <- ifelse(is.na(covid$`End.stay.at.home.shelter.in.place`), 0, 1)
covid$NoFaceMask <- ifelse(is.na(covid$`Mandate.face.mask.use.by.all.individuals.in.public.spaces`), 1, 0)
covid$NoFaceMaskEmploy <- ifelse(is.na(covid$`Mandate.face.mask.use.by.employees.in.public.facing.businesses`), 1, 0)
covid$NoEndFaceMask <- ifelse(is.na(covid$`State.ended.statewide.mask.use.by.individuals.in.public.spaces`), 1, 0)
##########################
### Transform Features ###
##########################
# state emergency should be reasonable marker of when COVID began entering state
# SIP - StateEmergency (how long before people had to shelter in place)
covid$SIP_StateEmergency <- covid$`Stay.at.home..shelter.in.place` - covid$`State.of.emergency`
# CloseNonEssential - StateEmergency (how long were non-essential businesses open for )
covid$CloseNonEssential_StateEmergency <- covid$`Closed.other.non.essential.businesses` - covid$`State.of.emergency`
# BeganReopenBusiness - StateEmergency (how long before business reopened)
covid$ReopenBusi_StateEmergency <- covid$`Began.to.reopen.businesses.statewide` - covid$`State.of.emergency`
# MandateFaceMask - StateEmergency (how long before masks were mandatory)
covid$FaceMask_StateEmergency <- covid$`Mandate.face.mask.use.by.all.individuals.in.public.spaces` - covid$`State.of.emergency`
# MandateFaceMaskEmployee - StateEmergency (How long before employees in public businesses needed to wear mask)
covid$FaceMaskEmploy_StateEmergency <- covid$`Mandate.face.mask.use.by.employees.in.public.facing.businesses` - covid$`State.of.emergency`
# BeganReopenBusuness - CloseBusiness (how long businesses were closed for)
covid$ReopenBusi_CloseBusi <- covid$`Began.to.reopen.businesses.statewide` - covid$`Closed.other.non.essential.businesses`
# ENDSIP - SIP (how long did people shelter in place)
covid$ENDSIP_SIP <- covid$`End.stay.at.home.shelter.in.place` - covid$`Stay.at.home..shelter.in.place`
# CurrentDate - EndSIP (for states who ended SIP how long has it been)
covid$Curr_ENDSIP <- as.Date("2020-10-26", origin = "1899-12-30") - covid$`End.stay.at.home.shelter.in.place`
# CurrentDate - StateEndMaskReq (very few points prob not useful)
covid$Curr_ENDMask <- as.Date("2020-10-26", origin = "1899-12-30") - covid$`State.ended.statewide.mask.use.by.individuals.in.public.spaces`
# Some necessary transformations for EDA, needed for plot_usmap() function
covid <- covid %>% rename(state = State)
covid <- mutate(covid, NoFaceMask_factor = factor(NoFaceMask))
covid <- mutate(covid, NoFaceMaskEmploy_factor = factor(NoFaceMaskEmploy))
covid <- mutate(covid, SIP_factor = factor(SIP))
# Need log case rates for lm
covid <- mutate(covid, log_case_rates = log(Cases.in.Last.7.Days))
##########################################
###Save as R object in data/processed ###
##########################################
saveRDS(covid, file = "../../data/processed/main_state_data.RDS")
|
#### Figure 7 ADVI has a bad fit in Horseshoe logistic
library(rstan)
options(mc.cores = parallel::detectCores())
datafile <- 'leukemia.RData'
load(datafile,verbose=T)
x <- scale(x)
d <- NCOL(x)
n <- NROW(x)
# compile the model
stanmodel <- stan_model('glm_bernoulli_rhs.stan')
scale_icept=10
slab_scale=5
slab_df=4
# data and prior
tau0 <- 1/(d-1) * 2/sqrt(n) # should be a reasonable scale for tau (see Piironen&Vehtari 2017, EJS paper)
scale_global=tau0
data <- list(n=n, d=d, x=x, y=as.vector(y), scale_icept=10, scale_global=tau0,
slab_scale=5, slab_df=4)
# NUTS solution (increase the number of iterations, here only 100 iterations to make this script run relatively fast)
fit_nuts <- sampling(stanmodel, data=data, iter=3000, control=list(adapt_delta=0.9))
# save(fit_nuts, file="stan_fit.RData")
# save(fit_advi, file="vi_fit.RData")
# load("vi_fit.RData")
# load("stan_fit.RData")
# ADVI
fit_advi <- vb(stanmodel, data=data,iter=1e6,output_samples=1e4,tol_rel_obj=0.001,eta = 0.1 )
# example of how to make predictions (here on training data)
e_vi <- extract(fit_advi)
f <- e$beta %*% t(x) + as.vector(e$beta0)
mu <- colMeans(binomial()$linkinv(f))
plot(mu,y)
# investigate the posterior of the coefficients of the most relevant variables
# these plots should reveal the multimodality
e <- extract(fit_nuts)
ind <- order(abs(colMeans(e$beta)), decreasing = T)[1:5] # indices of the coefficient with largest absolute mean
qplot(e$beta[,ind[1]], e$beta[,ind[2]])
qplot(e$beta[,ind[1]])
qplot(e$beta[,ind[2]])
# result from ADVI should probably be somewhat different as it is likely to catch only one mode
e_vi <- extract(fit_advi)
qplot(e_vi$beta[,ind[1]], e_vi$beta[,ind[2]])
qplot(e_vi$beta[,ind[1]])
qplot(e_vi$beta[,ind[2]])
density_target=c()
library(arm)
library(invgamma)
S=length(e_vi$beta0)
for( i in 1:S)
density_target[i]=sum( log(invlogit( e_vi$f[i,]))* y +log(1- invlogit( e_vi$f[i,]))*(1-y) )+dnorm(e_vi$beta0[i], 0, scale_icept, log=T)+ dinvgamma(e_vi$caux[i], 0.5*slab_df, 0.5*slab_df, log=T)+ dcauchy(e_vi$tau[i] ,0,scale_global,log=T)+ sum(dcauchy(e_vi$lambda[i,] ,0,1, log=T))+ sum(dnorm(e_vi$z[i,], 0,1, log=T))+log(e_vi$tau[i])+sum(log(e_vi$lambda[i,]))+log(e_vi$caux[i])
trans_parameter=cbind(e_vi$beta0, e_vi$z, log(e_vi$tau),log(e_vi$lambda),log(e_vi$caux) )
vi_parameter_mean=apply(trans_parameter, 2, mean)
vi_parameter_sd=apply(trans_parameter, 2, sd)
one_data_normal_likelihood=function(vec){
return( sum( dnorm(vec,mean=vi_parameter_mean,sd=vi_parameter_sd, log=T)))
}
lp_vi= apply(trans_parameter, 1, one_data_normal_likelihood)
ip_ratio=density_target-lp_vi
library(loo)
joint_diagnoistics=psislw(lw=ip_ratio[complete.cases(ip_ratio)])
joint_diagnoistics$pareto_k
cols <- c(1 ,"blue","red")
cols2 <- sapply(cols, function(i) {
c2r <- col2rgb(i) / 255
c2r <- rgb(c2r[1], c2r[2], c2r[3], alpha=0.15)
})
pdf("horseshoe.pdf",width=4,height=4/3)
par(mfrow=c(1,3),oma=c(1.6 ,1.5,0,0.5 ), pty='m',mar=c(0.5,0.4,0.5,0.3) ,mgp=c(1.5,0.25,0), lwd=0.5,tck=-0.01, cex.axis=0.6, cex.lab=0.8, cex.main=0.9,xpd=F)
plot(0,xlim=c(-3,15),ylim=c(0,0.6) ,type='n',axes=F,xlab="",ylab=" ",yaxs='i' )
xx=density( e$beta[,ind[1]])
lines(xx, col=cols[2] ,lwd=0.5)
x_trans= xx$x
y_trans= xx$y
polygon(c(x_trans ,rev(x_trans) ), c(y_trans ,rep(0,length(x_trans))) , col=cols2[2], border=NA)
xx=density(e_vi$beta[,ind[1]],adjust=200)
lines(xx, col=cols[3],lwd=0.5)
x_trans= xx$x
y_trans= xx$y
polygon(c(x_trans ,rev(x_trans) ), c(y_trans ,rep(0,length(x_trans))) , col=cols2[3], border=NA,xpd=T)
axis(1, padj=-1, at=c(0,5,10,15),lwd=0.5)
mtext(1, text=expression(beta[1834]),line=1,cex=0.7)
axis(2, las=2, at=c(0,0.3,0.6),label=c("0",".3",".6"),lwd=0.5)
box(bty='l',lwd=0.5)
mtext(2, line=1 ,text =" posterior desnity",cex=0.7)
text(2,.4 , labels ="VI",cex=0.85,col=2)
text(10,.1 , labels ="NUTS",cex=0.85,col=4)
plot(0,xlim=c(-6,18),ylim=c(0,0.3) ,type='n',axes=F,xlab="",ylab=" ",yaxs='i' )
xx=density( log(e$lambda[, ind[1]]))
lines(xx, col=cols[2] ,lwd=0.5)
x_trans= xx$x
y_trans= xx$y
polygon(c(x_trans ,rev(x_trans) ), c(y_trans ,rep(0,length(x_trans))) , col=cols2[2], border=NA)
xx=density(log(e_vi$lambda[, ind[1]]),adjust=2)
lines(xx, col=cols[3],lwd=0.5)
x_trans= xx$x
y_trans= xx$y
polygon(c(x_trans ,rev(x_trans) ), c(y_trans ,rep(0,length(x_trans))) , col=cols2[3], border=NA,xpd=T)
axis(1, padj=-1, at=c(-5,5,15),lwd=0.5)
mtext(1, text=expression(log~lambda[1834]),line=1,cex=0.7)
axis(2, las=2, at=c(0,0.15,0.3),label=c("0",".15",".3"), lwd=0.5)
box(bty='l',lwd=0.5)
plot(0,xlim=c(-13,-5),ylim=c(0,1) ,type='n',axes=F,xlab="",ylab=" ",yaxs='i' )
xx=density( log(e$tau),adj=1.8)
lines(xx, col=cols[2] ,lwd=0.5)
x_trans= xx$x
y_trans= xx$y
polygon(c(x_trans ,rev(x_trans) ), c(y_trans ,rep(0,length(x_trans))) , col=cols2[2], border=NA,xpd=T)
xx=density(log(e_vi$tau),adjust=2)
lines(xx, col=cols[3],lwd=0.5)
x_trans= xx$x
y_trans= xx$y
polygon(c(x_trans ,rev(x_trans) ), c(y_trans ,rep(0,length(x_trans))) , col=cols2[3], border=NA)
axis(1, padj=-1, at=c(-11,-8,-5),lwd=0.5)
mtext(1, text=expression(log~tau),line=1,cex=0.7)
axis(2, las=2, at=c(0,0.5,1),label=c("0",".5","1"),lwd=0.5)
box(bty='l',lwd=0.5)
dev.off()
sigma_t=mean(y)*(1-mean(y))
eff_k_stan=matrix(NA,dim(e$lambda)[1],d)
eff_k_vi=matrix(NA,dim(e_vi$lambda)[1],d)
for(i in 1:d){
eff_k_stan[,i]=1/(1+n*sigma_t^(-2)*e$tau^2*e$lambda[,i]^2)
eff_k_vi[,i]=1/(1+n*sigma_t^(-2)*e_vi$tau^2*e_vi$lambda[,i]^2)
}
length( apply(eff_k_stan, 1,sum))
length( apply(eff_k_vi, 1,sum))
m_eff_stan= apply(eff_k_stan, 1,sum)
m_eff_vi= apply(eff_k_vi, 1,sum)[sample(1:8000,2400)]
pdf("linear_reg_cgr_m_eff.pdf",height=1.4, width=3)
par(mfrow=c(1,2),oma=c(1,1,1,0), pty='m',mar=c(0.5,0.4,0.5,0) ,mgp=c(1.5,0.25,0), lwd=0.5,tck=-0.01, cex.axis=0.5, cex.lab=0.9, cex.main=0.9)
hist(m_eff_vi, prob=T,breaks = seq(d,4000,-60) ,xlim=c(6000,7140),ylim=c(0,0.008),axes=F,xlab="", ylab="",main = "")
lines(x=rep(mean(m_eff_vi),2),y=c(-1,0.005), col=2)
text(6700,0.005,labels = "posterior\n mean\n = 6988",cex=0.7,col=2)
axis(1, padj=-1,lwd=0.5, at=c(6000,6500,7000))
axis(2, lwd=0.5, at=c(0,0.002,0.004),las=2)
mtext(1, text=expression(m[eff]), line = 0.6, cex=0.7)
mtext(3, text="in VI posteriors ", line = -0.3, cex=0.7)
hist(m_eff_stan, prob=T,breaks = seq(d,4000,-60) ,xlim=c(6000,7140),ylim=c(0,0.008),axes=F,xlab="", ylab="",main = "")
lines(x=rep(mean(m_eff_stan),2),y=c(-1,0.005), col=2)
text(6700,0.005,labels = "posterior\n mean\n = 6940",cex=0.7,col=2)
axis(1, padj=-1,lwd=0.5, at=c(6000,6500,7000))
mtext(1, text=expression(m[eff]), line = 0.6, cex=0.7)
mtext(3, text=" in NUTS posteriors ", line = -0.3, cex=0.7)
mtext(3, text=" effective number of parameters ", line = 0, cex=0.7,outer=T)
dev.off()
| /WIP/Evaluating Variational Inference/Figure_7_Horseshoe.R | permissive | junpenglao/Planet_Sakaar_Data_Science | R | false | false | 6,889 | r | #### Figure 7 ADVI has a bad fit in Horseshoe logistic
library(rstan)
options(mc.cores = parallel::detectCores())
datafile <- 'leukemia.RData'
load(datafile,verbose=T)
x <- scale(x)
d <- NCOL(x)
n <- NROW(x)
# compile the model
stanmodel <- stan_model('glm_bernoulli_rhs.stan')
scale_icept=10
slab_scale=5
slab_df=4
# data and prior
tau0 <- 1/(d-1) * 2/sqrt(n) # should be a reasonable scale for tau (see Piironen&Vehtari 2017, EJS paper)
scale_global=tau0
data <- list(n=n, d=d, x=x, y=as.vector(y), scale_icept=10, scale_global=tau0,
slab_scale=5, slab_df=4)
# NUTS solution (increase the number of iterations, here only 100 iterations to make this script run relatively fast)
fit_nuts <- sampling(stanmodel, data=data, iter=3000, control=list(adapt_delta=0.9))
# save(fit_nuts, file="stan_fit.RData")
# save(fit_advi, file="vi_fit.RData")
# load("vi_fit.RData")
# load("stan_fit.RData")
# ADVI
fit_advi <- vb(stanmodel, data=data,iter=1e6,output_samples=1e4,tol_rel_obj=0.001,eta = 0.1 )
# example of how to make predictions (here on training data)
e_vi <- extract(fit_advi)
f <- e$beta %*% t(x) + as.vector(e$beta0)
mu <- colMeans(binomial()$linkinv(f))
plot(mu,y)
# investigate the posterior of the coefficients of the most relevant variables
# these plots should reveal the multimodality
e <- extract(fit_nuts)
ind <- order(abs(colMeans(e$beta)), decreasing = T)[1:5] # indices of the coefficient with largest absolute mean
qplot(e$beta[,ind[1]], e$beta[,ind[2]])
qplot(e$beta[,ind[1]])
qplot(e$beta[,ind[2]])
# result from ADVI should probably be somewhat different as it is likely to catch only one mode
e_vi <- extract(fit_advi)
qplot(e_vi$beta[,ind[1]], e_vi$beta[,ind[2]])
qplot(e_vi$beta[,ind[1]])
qplot(e_vi$beta[,ind[2]])
density_target=c()
library(arm)
library(invgamma)
S=length(e_vi$beta0)
for( i in 1:S)
density_target[i]=sum( log(invlogit( e_vi$f[i,]))* y +log(1- invlogit( e_vi$f[i,]))*(1-y) )+dnorm(e_vi$beta0[i], 0, scale_icept, log=T)+ dinvgamma(e_vi$caux[i], 0.5*slab_df, 0.5*slab_df, log=T)+ dcauchy(e_vi$tau[i] ,0,scale_global,log=T)+ sum(dcauchy(e_vi$lambda[i,] ,0,1, log=T))+ sum(dnorm(e_vi$z[i,], 0,1, log=T))+log(e_vi$tau[i])+sum(log(e_vi$lambda[i,]))+log(e_vi$caux[i])
trans_parameter=cbind(e_vi$beta0, e_vi$z, log(e_vi$tau),log(e_vi$lambda),log(e_vi$caux) )
vi_parameter_mean=apply(trans_parameter, 2, mean)
vi_parameter_sd=apply(trans_parameter, 2, sd)
one_data_normal_likelihood=function(vec){
return( sum( dnorm(vec,mean=vi_parameter_mean,sd=vi_parameter_sd, log=T)))
}
lp_vi= apply(trans_parameter, 1, one_data_normal_likelihood)
ip_ratio=density_target-lp_vi
library(loo)
joint_diagnoistics=psislw(lw=ip_ratio[complete.cases(ip_ratio)])
joint_diagnoistics$pareto_k
cols <- c(1 ,"blue","red")
cols2 <- sapply(cols, function(i) {
c2r <- col2rgb(i) / 255
c2r <- rgb(c2r[1], c2r[2], c2r[3], alpha=0.15)
})
pdf("horseshoe.pdf",width=4,height=4/3)
par(mfrow=c(1,3),oma=c(1.6 ,1.5,0,0.5 ), pty='m',mar=c(0.5,0.4,0.5,0.3) ,mgp=c(1.5,0.25,0), lwd=0.5,tck=-0.01, cex.axis=0.6, cex.lab=0.8, cex.main=0.9,xpd=F)
plot(0,xlim=c(-3,15),ylim=c(0,0.6) ,type='n',axes=F,xlab="",ylab=" ",yaxs='i' )
xx=density( e$beta[,ind[1]])
lines(xx, col=cols[2] ,lwd=0.5)
x_trans= xx$x
y_trans= xx$y
polygon(c(x_trans ,rev(x_trans) ), c(y_trans ,rep(0,length(x_trans))) , col=cols2[2], border=NA)
xx=density(e_vi$beta[,ind[1]],adjust=200)
lines(xx, col=cols[3],lwd=0.5)
x_trans= xx$x
y_trans= xx$y
polygon(c(x_trans ,rev(x_trans) ), c(y_trans ,rep(0,length(x_trans))) , col=cols2[3], border=NA,xpd=T)
axis(1, padj=-1, at=c(0,5,10,15),lwd=0.5)
mtext(1, text=expression(beta[1834]),line=1,cex=0.7)
axis(2, las=2, at=c(0,0.3,0.6),label=c("0",".3",".6"),lwd=0.5)
box(bty='l',lwd=0.5)
mtext(2, line=1 ,text =" posterior desnity",cex=0.7)
text(2,.4 , labels ="VI",cex=0.85,col=2)
text(10,.1 , labels ="NUTS",cex=0.85,col=4)
plot(0,xlim=c(-6,18),ylim=c(0,0.3) ,type='n',axes=F,xlab="",ylab=" ",yaxs='i' )
xx=density( log(e$lambda[, ind[1]]))
lines(xx, col=cols[2] ,lwd=0.5)
x_trans= xx$x
y_trans= xx$y
polygon(c(x_trans ,rev(x_trans) ), c(y_trans ,rep(0,length(x_trans))) , col=cols2[2], border=NA)
xx=density(log(e_vi$lambda[, ind[1]]),adjust=2)
lines(xx, col=cols[3],lwd=0.5)
x_trans= xx$x
y_trans= xx$y
polygon(c(x_trans ,rev(x_trans) ), c(y_trans ,rep(0,length(x_trans))) , col=cols2[3], border=NA,xpd=T)
axis(1, padj=-1, at=c(-5,5,15),lwd=0.5)
mtext(1, text=expression(log~lambda[1834]),line=1,cex=0.7)
axis(2, las=2, at=c(0,0.15,0.3),label=c("0",".15",".3"), lwd=0.5)
box(bty='l',lwd=0.5)
plot(0,xlim=c(-13,-5),ylim=c(0,1) ,type='n',axes=F,xlab="",ylab=" ",yaxs='i' )
xx=density( log(e$tau),adj=1.8)
lines(xx, col=cols[2] ,lwd=0.5)
x_trans= xx$x
y_trans= xx$y
polygon(c(x_trans ,rev(x_trans) ), c(y_trans ,rep(0,length(x_trans))) , col=cols2[2], border=NA,xpd=T)
xx=density(log(e_vi$tau),adjust=2)
lines(xx, col=cols[3],lwd=0.5)
x_trans= xx$x
y_trans= xx$y
polygon(c(x_trans ,rev(x_trans) ), c(y_trans ,rep(0,length(x_trans))) , col=cols2[3], border=NA)
axis(1, padj=-1, at=c(-11,-8,-5),lwd=0.5)
mtext(1, text=expression(log~tau),line=1,cex=0.7)
axis(2, las=2, at=c(0,0.5,1),label=c("0",".5","1"),lwd=0.5)
box(bty='l',lwd=0.5)
dev.off()
sigma_t=mean(y)*(1-mean(y))
eff_k_stan=matrix(NA,dim(e$lambda)[1],d)
eff_k_vi=matrix(NA,dim(e_vi$lambda)[1],d)
for(i in 1:d){
eff_k_stan[,i]=1/(1+n*sigma_t^(-2)*e$tau^2*e$lambda[,i]^2)
eff_k_vi[,i]=1/(1+n*sigma_t^(-2)*e_vi$tau^2*e_vi$lambda[,i]^2)
}
length( apply(eff_k_stan, 1,sum))
length( apply(eff_k_vi, 1,sum))
m_eff_stan= apply(eff_k_stan, 1,sum)
m_eff_vi= apply(eff_k_vi, 1,sum)[sample(1:8000,2400)]
pdf("linear_reg_cgr_m_eff.pdf",height=1.4, width=3)
par(mfrow=c(1,2),oma=c(1,1,1,0), pty='m',mar=c(0.5,0.4,0.5,0) ,mgp=c(1.5,0.25,0), lwd=0.5,tck=-0.01, cex.axis=0.5, cex.lab=0.9, cex.main=0.9)
hist(m_eff_vi, prob=T,breaks = seq(d,4000,-60) ,xlim=c(6000,7140),ylim=c(0,0.008),axes=F,xlab="", ylab="",main = "")
lines(x=rep(mean(m_eff_vi),2),y=c(-1,0.005), col=2)
text(6700,0.005,labels = "posterior\n mean\n = 6988",cex=0.7,col=2)
axis(1, padj=-1,lwd=0.5, at=c(6000,6500,7000))
axis(2, lwd=0.5, at=c(0,0.002,0.004),las=2)
mtext(1, text=expression(m[eff]), line = 0.6, cex=0.7)
mtext(3, text="in VI posteriors ", line = -0.3, cex=0.7)
hist(m_eff_stan, prob=T,breaks = seq(d,4000,-60) ,xlim=c(6000,7140),ylim=c(0,0.008),axes=F,xlab="", ylab="",main = "")
lines(x=rep(mean(m_eff_stan),2),y=c(-1,0.005), col=2)
text(6700,0.005,labels = "posterior\n mean\n = 6940",cex=0.7,col=2)
axis(1, padj=-1,lwd=0.5, at=c(6000,6500,7000))
mtext(1, text=expression(m[eff]), line = 0.6, cex=0.7)
mtext(3, text=" in NUTS posteriors ", line = -0.3, cex=0.7)
mtext(3, text=" effective number of parameters ", line = 0, cex=0.7,outer=T)
dev.off()
|
testlist <- list(A = structure(c(1.38997190089718e-309, 3.81575932257023e-236, 3.81571422920468e-236), .Dim = c(1L, 3L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613126275-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 226 | r | testlist <- list(A = structure(c(1.38997190089718e-309, 3.81575932257023e-236, 3.81571422920468e-236), .Dim = c(1L, 3L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
#' @title multimodality_score
#' @description Calculate multimodality score based on bootstrapped potential analysis
#' @param x A vector, or data matrix (variables x samples)
#' @param detection.threshold Mode detection threshold
#' @param bw.adjust Bandwidth adjustment
#' @param bs.iterations Bootstrap iterations
#' @param detection.limit minimum accepted density for a maximum; as a multiple of kernel height
#' @param verbose Verbose
#' @return A list with following elements:
#' \itemize{
#' \item{score}{Fraction of bootstrap samples where multiple modes are observed}
#' \item{nmodes}{The most frequently observed number of modes in bootrstrap sampling results}
#' \item{results}{Full results of potential_analysis_bootstrap for each row of the input matrix.}
#' }
#' @details This function repeats potential analysis (Livina et al. 2010) multiple
#' times with bootstrap sampling for each row of the input data
#' (as in Lahti et al. 2014) and returns the specified results.
#' @export
#' @import earlywarnings
#' @author Leo Lahti \email{leo.lahti@@iki.fi}
#' @examples
#' data(peerj32)
#' multimodality_score(t(peerj32$microbes[, c("Akkermansia", "Dialister")]))
#' @references
#' Livina et al. (2010). Potential analysis
#' reveals changing number of climate states during the last 60
#' kyr. \emph{Climate of the Past}, 6, 77-82.
#' Lahti et al. (2014). Tipping elements of the human intestinal
#' ecosystem. \emph{Nature Communications} 5:4344.
#' @keywords utilities
multimodality_score <- function (x, detection.threshold = 1, bw.adjust = 1, bs.iterations = 100, detection.limit = 1, verbose = TRUE) {
if (is.vector(x)) {
# Add small noise to enable robust density estimation (identical values may cause failure)
x <- x + rnorm(length(x), sd = sd(x)/100)
m <- potential_analysis_bootstrap(x, detection.threshold = detection.threshold, bw.adjust = bw.adjust, bs.iterations = bs.iterations, detection.limit = detection.limit)
ret <- list(score = 1 - m$unimodality.support, modes = m$modes, results = m)
return(ret)
} else {
# Univariate potential analysis for all taxa with full data
potential.results <- list()
nmodes <- c()
if (is.null(rownames(x))) {
rownames(x) <- as.character(1:nrow(x))
}
for (tax in rownames(x)) {
if (verbose) { message(tax) }
m <- multimodality_score(as.numeric(x[tax, ]), detection.threshold, bw.adjust, bs.iterations, detection.limit, verbose)
nmodes[[tax]] <- m$modes
potential.results[[tax]] <- m
}
multimodality.score <- sapply(potential.results, function (x) { 1 - x$unimodality.support })
ret <- list(score = multimodality.score, modes = nmodes, results = potential.results)
}
ret
}
| /R/multimodality_score.R | no_license | himanshu-fff/microbiome | R | false | false | 2,784 | r | #' @title multimodality_score
#' @description Calculate multimodality score based on bootstrapped potential analysis
#' @param x A vector, or data matrix (variables x samples)
#' @param detection.threshold Mode detection threshold
#' @param bw.adjust Bandwidth adjustment
#' @param bs.iterations Bootstrap iterations
#' @param detection.limit minimum accepted density for a maximum; as a multiple of kernel height
#' @param verbose Verbose
#' @return A list with following elements:
#' \itemize{
#' \item{score}{Fraction of bootstrap samples where multiple modes are observed}
#' \item{nmodes}{The most frequently observed number of modes in bootrstrap sampling results}
#' \item{results}{Full results of potential_analysis_bootstrap for each row of the input matrix.}
#' }
#' @details This function repeats potential analysis (Livina et al. 2010) multiple
#' times with bootstrap sampling for each row of the input data
#' (as in Lahti et al. 2014) and returns the specified results.
#' @export
#' @import earlywarnings
#' @author Leo Lahti \email{leo.lahti@@iki.fi}
#' @examples
#' data(peerj32)
#' multimodality_score(t(peerj32$microbes[, c("Akkermansia", "Dialister")]))
#' @references
#' Livina et al. (2010). Potential analysis
#' reveals changing number of climate states during the last 60
#' kyr. \emph{Climate of the Past}, 6, 77-82.
#' Lahti et al. (2014). Tipping elements of the human intestinal
#' ecosystem. \emph{Nature Communications} 5:4344.
#' @keywords utilities
multimodality_score <- function (x, detection.threshold = 1, bw.adjust = 1, bs.iterations = 100, detection.limit = 1, verbose = TRUE) {
if (is.vector(x)) {
# Add small noise to enable robust density estimation (identical values may cause failure)
x <- x + rnorm(length(x), sd = sd(x)/100)
m <- potential_analysis_bootstrap(x, detection.threshold = detection.threshold, bw.adjust = bw.adjust, bs.iterations = bs.iterations, detection.limit = detection.limit)
ret <- list(score = 1 - m$unimodality.support, modes = m$modes, results = m)
return(ret)
} else {
# Univariate potential analysis for all taxa with full data
potential.results <- list()
nmodes <- c()
if (is.null(rownames(x))) {
rownames(x) <- as.character(1:nrow(x))
}
for (tax in rownames(x)) {
if (verbose) { message(tax) }
m <- multimodality_score(as.numeric(x[tax, ]), detection.threshold, bw.adjust, bs.iterations, detection.limit, verbose)
nmodes[[tax]] <- m$modes
potential.results[[tax]] <- m
}
multimodality.score <- sapply(potential.results, function (x) { 1 - x$unimodality.support })
ret <- list(score = multimodality.score, modes = nmodes, results = potential.results)
}
ret
}
|
#' Get community membership by modularity optimization
#' @description Through the use of greedy optimization
#' of a modularity score, obtain the group membership
#' values for each of the nodes in the graph.
#' @param graph a graph object of class
#' \code{dgr_graph}.
#' @return a data frame with group membership
#' assignments for each of the nodes.
#' @examples
#' # Create a random graph
#' graph <-
#' create_random_graph(
#' n = 10, m = 22,
#' set_seed = 23)
#'
#' # Get the group membership values for all
#' # nodes in the graph through the greedy
#' # optimization of modularity algorithm
#' get_cmty_fast_greedy(graph)
#' #> id f_g_group
#' #> 1 1 1
#' #> 2 2 2
#' #> 3 3 2
#' #> 4 4 1
#' #> 5 5 1
#' #> 6 6 1
#' #> 7 7 2
#' #> 8 8 1
#' #> 9 9 2
#' #> 10 10 1
#'
#' # Add the group membership values to the
#' # graph as a node attribute
#' graph <-
#' graph %>%
#' join_node_attrs(
#' df = get_cmty_fast_greedy(.))
#'
#' # Display the graph's node data frame
#' get_node_df(graph)
#' #> id type label value f_g_group
#' #> 1 1 <NA> 1 6.0 1
#' #> 2 2 <NA> 2 2.5 2
#' #> 3 3 <NA> 3 3.5 2
#' #> 4 4 <NA> 4 7.5 1
#' #> 5 5 <NA> 5 8.5 1
#' #> 6 6 <NA> 6 4.5 1
#' #> 7 7 <NA> 7 10.0 2
#' #> 8 8 <NA> 8 10.0 1
#' #> 9 9 <NA> 9 8.5 2
#' #> 10 10 <NA> 10 10.0 1
#' @importFrom igraph cluster_fast_greedy membership
#' @export get_cmty_fast_greedy
get_cmty_fast_greedy <- function(graph) {
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
stop("The graph object is not valid.")
}
# If graph is directed, transform to undirected
graph <- set_graph_undirected(graph)
# Convert the graph to an igraph object
ig_graph <- to_igraph(graph)
# Get the community object using the
# `cluster_fast_greedy()` function
cmty_fast_greedy_obj <-
igraph::cluster_fast_greedy(ig_graph)
# Create df with node memberships
data.frame(
id = as.integer(names(igraph::membership(cmty_fast_greedy_obj))),
f_g_group = as.vector(igraph::membership(cmty_fast_greedy_obj)),
stringsAsFactors = FALSE)
}
| /R/get_cmty_fast_greedy.R | no_license | chengfeifan/DiagrammeR | R | false | false | 2,332 | r | #' Get community membership by modularity optimization
#' @description Through the use of greedy optimization
#' of a modularity score, obtain the group membership
#' values for each of the nodes in the graph.
#' @param graph a graph object of class
#' \code{dgr_graph}.
#' @return a data frame with group membership
#' assignments for each of the nodes.
#' @examples
#' # Create a random graph
#' graph <-
#' create_random_graph(
#' n = 10, m = 22,
#' set_seed = 23)
#'
#' # Get the group membership values for all
#' # nodes in the graph through the greedy
#' # optimization of modularity algorithm
#' get_cmty_fast_greedy(graph)
#' #> id f_g_group
#' #> 1 1 1
#' #> 2 2 2
#' #> 3 3 2
#' #> 4 4 1
#' #> 5 5 1
#' #> 6 6 1
#' #> 7 7 2
#' #> 8 8 1
#' #> 9 9 2
#' #> 10 10 1
#'
#' # Add the group membership values to the
#' # graph as a node attribute
#' graph <-
#' graph %>%
#' join_node_attrs(
#' df = get_cmty_fast_greedy(.))
#'
#' # Display the graph's node data frame
#' get_node_df(graph)
#' #> id type label value f_g_group
#' #> 1 1 <NA> 1 6.0 1
#' #> 2 2 <NA> 2 2.5 2
#' #> 3 3 <NA> 3 3.5 2
#' #> 4 4 <NA> 4 7.5 1
#' #> 5 5 <NA> 5 8.5 1
#' #> 6 6 <NA> 6 4.5 1
#' #> 7 7 <NA> 7 10.0 2
#' #> 8 8 <NA> 8 10.0 1
#' #> 9 9 <NA> 9 8.5 2
#' #> 10 10 <NA> 10 10.0 1
#' @importFrom igraph cluster_fast_greedy membership
#' @export get_cmty_fast_greedy
get_cmty_fast_greedy <- function(graph) {
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
stop("The graph object is not valid.")
}
# If graph is directed, transform to undirected
graph <- set_graph_undirected(graph)
# Convert the graph to an igraph object
ig_graph <- to_igraph(graph)
# Get the community object using the
# `cluster_fast_greedy()` function
cmty_fast_greedy_obj <-
igraph::cluster_fast_greedy(ig_graph)
# Create df with node memberships
data.frame(
id = as.integer(names(igraph::membership(cmty_fast_greedy_obj))),
f_g_group = as.vector(igraph::membership(cmty_fast_greedy_obj)),
stringsAsFactors = FALSE)
}
|
testlist <- list(cost = structure(c(1.44888560957826e+135, 1.6249392498385e+65, 5.27956628994611e-134, 1.56839475268612e-251, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 5L)), flow = structure(c(3.80768289350145e+125, 8.58414828913381e+155, 3.37787969964034e+43, 2.83184518248624e-19, 7.49487861616974e+223, 8.52929466674086e+86, 2.51852491380534e-303, 3.12954510408264e-253, 2.45741775099414e-215, 6.59159493393162e+70, 2.33952815237705e+77, 3.1674929214459e+282, 1.0709591854537e+63, 7.43876613929257e+191, 8.31920980250172e+78, 1.26747339146319e+161, 5.68076251052666e-141, 9.98610641272026e+182, 232665383858.491, 3.75587249552337e-34, 8.67688084914444e+71, 2.85936996201565e+135, 5.49642980516022e+268, 854537881567133, 1.33507119962914e+95, 2.76994725819545e+63, 4.08029273738449e+275, 4.93486427894025e+289, 1.24604061502336e+294, 3.2125809174767e-185, 9.58716852715016e+39, 6.94657888227078e+275, 3.46330348083089e+199, 3.28318446108869e-286, 6.12239214969922e-296 ), .Dim = c(5L, 7L)))
result <- do.call(epiphy:::costTotCPP,testlist)
str(result) | /epiphy/inst/testfiles/costTotCPP/AFL_costTotCPP/costTotCPP_valgrind_files/1615926619-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 1,101 | r | testlist <- list(cost = structure(c(1.44888560957826e+135, 1.6249392498385e+65, 5.27956628994611e-134, 1.56839475268612e-251, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 5L)), flow = structure(c(3.80768289350145e+125, 8.58414828913381e+155, 3.37787969964034e+43, 2.83184518248624e-19, 7.49487861616974e+223, 8.52929466674086e+86, 2.51852491380534e-303, 3.12954510408264e-253, 2.45741775099414e-215, 6.59159493393162e+70, 2.33952815237705e+77, 3.1674929214459e+282, 1.0709591854537e+63, 7.43876613929257e+191, 8.31920980250172e+78, 1.26747339146319e+161, 5.68076251052666e-141, 9.98610641272026e+182, 232665383858.491, 3.75587249552337e-34, 8.67688084914444e+71, 2.85936996201565e+135, 5.49642980516022e+268, 854537881567133, 1.33507119962914e+95, 2.76994725819545e+63, 4.08029273738449e+275, 4.93486427894025e+289, 1.24604061502336e+294, 3.2125809174767e-185, 9.58716852715016e+39, 6.94657888227078e+275, 3.46330348083089e+199, 3.28318446108869e-286, 6.12239214969922e-296 ), .Dim = c(5L, 7L)))
result <- do.call(epiphy:::costTotCPP,testlist)
str(result) |
\name{requireData}
\alias{requireData}
\title{
Attach packages as required and expose non-LazyData data sets as promises.
}
\description{
This function provides \code{LazyData} functionality for packages
which do not provide it. It acts as an enhanced substitute for the
base packges \code{require} function.
}
\usage{
requireData(package = stop("you must specify a package"),
lib.loc = NULL, quietly = TRUE, character.only = FALSE,
warn.conflicts = TRUE, reallyQuietly = TRUE, ...)
}
\arguments{
\item{package}{
The name of the package whose attachment to the search path is
required. May me a name or a literal character string.
}
\item{lib.loc}{
The path to the library holding the package. As for
\code{\link{require}}.
}
\item{quietly}{
Logical: should the stanandard loading message be suppressed?
Ignored if \code{reallyQuiet} is \code{TRUE}.
}
\item{character.only}{
Logical: should the \code{package} argument be treated as a
character string even if not literal?
}
\item{warn.conflicts}{
Should objects masked by the attachment of the package be flagged?
As for \code{\link{require}}.
Ignored if \code{reallyQuiet} is \code{TRUE}.
}
\item{reallyQuietly}{
Logical: should the \code{package} be loaded using
\code{suppressPackageStartupMessages}? If \code{TRUE}, the default,
this will make the loading
as quietly as possible, but will suppress potentially useful
messages, such as masking information.
}
\item{\dots}{
Additional arguments currently ignored.
}
}
\details{
The only function this package provides, \code{requireData}, is a
substitute for the base function \code{require}. If the package is
not already on the search path, it attaches it. In addition, if the
package a) has data sets and b) does NOT use the LazyData facility,
then an additonal entry is made on the search path. This is an
unlocked environment initially populated by `promises'
(using \code{delayedAssign}) to load a copy of the data set into
memory if and when it is needed.
This is done recursively for all packages attached to the search path
via dependencies.
If the package appears on the search path as \code{package:<pkg>} at
positon \code{p}, then any exposed data set objects appear at postion
\code{p+1} as \code{datasets:<pkg>}. The package environment is
locked, but the datasets environment is not. If a data set object is
needed at any stage, it is brought silently into memory at position
\code{p+1} on the search path.
Any further call to \code{requireData(<pkg>)} will reinstate the
datasets as promises, thus potentially freeing memory.
The intended effect is to make data sets more conveniently available
to users, to make the use of the \code{data} function largely
unnecessary, and to avoid cluttering the global environment with
copies of passive data set objects.
}
\value{
\code{TRUE} if the package was successfully attached and \code{FALSE}
otherwise.
}
\references{
Null
}
\author{
Bill Venables
}
\seealso{
\code{\link{require}}, \code{\link[utils]{data}}
}
\examples{
\dontshow{
.Search <- function() {
srch <- search()
srch <- paste(format(srch),
ifelse(grepl("^datasets:", srch), "<<-- data here", ""))
noquote(as.matrix(srch))
}
}
requireData("mgcv") ## we assume has data sets but no LazyLoad
.Search() ## show augmented search path
## > ls("datasets:mgcv")
##[1] "columb" "columb.polys"
}
\keyword{ data }
| /man/requireData.Rd | no_license | cran/lazyData | R | false | false | 3,562 | rd | \name{requireData}
\alias{requireData}
\title{
Attach packages as required and expose non-LazyData data sets as promises.
}
\description{
This function provides \code{LazyData} functionality for packages
which do not provide it. It acts as an enhanced substitute for the
base packges \code{require} function.
}
\usage{
requireData(package = stop("you must specify a package"),
lib.loc = NULL, quietly = TRUE, character.only = FALSE,
warn.conflicts = TRUE, reallyQuietly = TRUE, ...)
}
\arguments{
\item{package}{
The name of the package whose attachment to the search path is
required. May me a name or a literal character string.
}
\item{lib.loc}{
The path to the library holding the package. As for
\code{\link{require}}.
}
\item{quietly}{
Logical: should the stanandard loading message be suppressed?
Ignored if \code{reallyQuiet} is \code{TRUE}.
}
\item{character.only}{
Logical: should the \code{package} argument be treated as a
character string even if not literal?
}
\item{warn.conflicts}{
Should objects masked by the attachment of the package be flagged?
As for \code{\link{require}}.
Ignored if \code{reallyQuiet} is \code{TRUE}.
}
\item{reallyQuietly}{
Logical: should the \code{package} be loaded using
\code{suppressPackageStartupMessages}? If \code{TRUE}, the default,
this will make the loading
as quietly as possible, but will suppress potentially useful
messages, such as masking information.
}
\item{\dots}{
Additional arguments currently ignored.
}
}
\details{
The only function this package provides, \code{requireData}, is a
substitute for the base function \code{require}. If the package is
not already on the search path, it attaches it. In addition, if the
package a) has data sets and b) does NOT use the LazyData facility,
then an additonal entry is made on the search path. This is an
unlocked environment initially populated by `promises'
(using \code{delayedAssign}) to load a copy of the data set into
memory if and when it is needed.
This is done recursively for all packages attached to the search path
via dependencies.
If the package appears on the search path as \code{package:<pkg>} at
positon \code{p}, then any exposed data set objects appear at postion
\code{p+1} as \code{datasets:<pkg>}. The package environment is
locked, but the datasets environment is not. If a data set object is
needed at any stage, it is brought silently into memory at position
\code{p+1} on the search path.
Any further call to \code{requireData(<pkg>)} will reinstate the
datasets as promises, thus potentially freeing memory.
The intended effect is to make data sets more conveniently available
to users, to make the use of the \code{data} function largely
unnecessary, and to avoid cluttering the global environment with
copies of passive data set objects.
}
\value{
\code{TRUE} if the package was successfully attached and \code{FALSE}
otherwise.
}
\references{
Null
}
\author{
Bill Venables
}
\seealso{
\code{\link{require}}, \code{\link[utils]{data}}
}
\examples{
\dontshow{
.Search <- function() {
srch <- search()
srch <- paste(format(srch),
ifelse(grepl("^datasets:", srch), "<<-- data here", ""))
noquote(as.matrix(srch))
}
}
requireData("mgcv") ## we assume has data sets but no LazyLoad
.Search() ## show augmented search path
## > ls("datasets:mgcv")
##[1] "columb" "columb.polys"
}
\keyword{ data }
|
library(pafdR)
### Name: gen.rnd.vec
### Title: Generates random vector for numerical answer (internal)
### Aliases: gen.rnd.vec
### ** Examples
library(pafdR)
my.sol <- 1 #assume solution to exercise is 1
my.answers <- gen.rnd.vec()*my.sol
my.answers
| /data/genthat_extracted_code/pafdR/examples/gen.rnd.vec.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 262 | r | library(pafdR)
### Name: gen.rnd.vec
### Title: Generates random vector for numerical answer (internal)
### Aliases: gen.rnd.vec
### ** Examples
library(pafdR)
my.sol <- 1 #assume solution to exercise is 1
my.answers <- gen.rnd.vec()*my.sol
my.answers
|
# PRODUCTS ------------------------------------------------------------------------------------------------------------
#' Products available
#'
#' Returns information about the Uber products offered at a given location.
#'
#' @param latitude Latitude of location.
#' @param longitude Longitude of location.
#' @param product_id Unique identifier representing a specific product.
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v1-products}.
#' @examples
#' \dontrun{
#' uber_products(latitude = -33.925278, longitude = 18.423889)
#' uber_products(product_id = "91901472-f30d-4614-8ba7-9fcc937cebf5")
#' }
#' @export
uber_products <- function(latitude = NA, longitude = NA, product_id = NA) {
if (any(is.na(c(latitude, longitude))) && is.na(product_id))
stop("Either both latitude and longitude or product_id must be specified.")
if (is.na(product_id)) {
rides = callAPI("products", 1, params = parseParameters(environment()))$products
} else {
rides = callAPI(paste("products", product_id, sep = "/"), 1)
}
rides
}
# ESTIMATES -----------------------------------------------------------------------------------------------------------
#' Price estimate
#'
#' Returns an estimated price range for each product offered at a given location
#'
#' @param start_latitude Initial latitude.
#' @param start_longitude Initial longitude.
#' @param end_latitude Final latitude.
#' @param end_longitude Final longitude.
#' @param seat_count Number of passengers.
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v1-estimates-price}
#' @examples
#' \dontrun{
#' uber_estimate_price(start_latitude = 37.761492, start_longitude = -122.423941,
#' end_latitude = 37.775393, end_longitude = -122.417546)
#' }
#' @export
uber_estimate_price <- function(start_latitude, start_longitude, end_latitude, end_longitude, seat_count = NULL) {
estimates = callAPI("estimates/price", 1, method = "GET", params = parseParameters(environment()))
#
estimates$prices
}
#' Time estimate
#'
#' Returns ETAs for all products currently available at a given location. The ETA for each product is expressed in seconds.
#'
#' @param start_latitude Initial latitude.
#' @param start_longitude Initial longitude.
#' @param product_id Unique identifier representing a specific product.
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v1-estimates-time}
#' @examples
#' \dontrun{
#' uber_estimate_time(start_latitude = 37.761492, start_longitude = -122.423941)
#' }
#' @export
uber_estimate_time <- function(start_latitude, start_longitude, product_id = NULL) {
estimates = callAPI("estimates/time", 1, method = "GET", params = parseParameters(environment()))
#
estimates$times
}
# HISTORY -------------------------------------------------------------------------------------------------------------
#' History
#'
#' Returns data about a user's activity on Uber.
#'
#' Requires an OAuth 2.0 token with the history or history_lite scope.
#'
#' @param limit Number of items to retrieve.
#' @param offset Offset the returned results.
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v12-history}
#' @examples
#' \dontrun{
#' uber_history()
#' }
#' @import dplyr
#' @export
uber_history <- function(limit = 5, offset = 0) {
if(limit <= 0) {
stop("You must specify a positive value for limit.")
} else {
data <- callAPI("history", 1.2, method = "GET", params = parseParameters(environment()))
}
if (length(data$history) == 0){
history.df.final = NULL
} else {
times <- c("request_time", "start_time", "end_time")
history.df.final <- select_(data$history, .dots = c("-start_city")) %>%
cbind(data$history$start_city) %>%
mutate_(.dots = setNames(paste0('as.POSIXct(',times,', origin = "1970-01-01")'), times))
}
history.df.final
}
# ME ------------------------------------------------------------------------------------------------------------------
#' User information
#'
#' Returns information about the Uber user.
#'
#' Requires an OAuth 2.0 token with the profile scope.
#'
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v1-me}
#' @examples
#' \dontrun{
#' uber_me()
#' }
#' @export
uber_me <- function() {
callAPI("me", 1)
}
# REQUESTS ------------------------------------------------------------------------------------------------------------
#' Request a ride
#'
#' Request a ride on the behalf of the authenticated user.
#'
#' Requires an OAuth 2.0 token with the request scope.
#'
#' @param start_latitude Initial latitude.
#' @param start_longitude Initial longitude.
#' @param end_latitude Final latitude.
#' @param end_longitude Final longitude.
#' @param start_address Initial address.
#' @param end_address Final address.
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v1-requests}
#' @examples
#' \dontrun{
#' uber_requests(start_address = "37 Beach Road, Mouille Point, Cape Town",
#' end_address = "100 St Georges Mall, Cape Town City Centre, Cape Town")
#' }
#' @export
uber_requests <- function(start_latitude = NULL, start_longitude = NULL, end_latitude = NULL, end_longitude = NULL, start_address = NULL, end_address = NULL) {
callAPI("requests", 1, method = "POST", params = parseParameters(environment()))
}
#' Request estimate
#'
#' Allows a ride to be estimated given the desired product, start, and end locations. If the end location is not provided, only the pickup ETA and details of surge pricing information are provided. If the pickup ETA is null, there are no cars available, but an estimate may still be given to the user.
#'
#' Requires an OAuth 2.0 token with the request scope.
#'
#' @param start_latitude Initial latitude.
#' @param start_longitude Initial longitude.
#' @param end_latitude Final latitude.
#' @param end_longitude Final longitude.
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v1-requests-estimate}
#' @examples
#' \dontrun{
#' uber_requests_estimate(start_latitude = 37.761492, start_longitude = -122.423941,
#' end_latitude = 37.775393, end_longitude = -122.417546)
#' }
#' @export
uber_requests_estimate <- function(start_latitude = NULL, start_longitude = NULL, end_latitude = NULL, end_longitude = NULL) {
response <- callAPI("requests/estimate", 1, method = "POST", params = parseParameters(environment()))
#
response$price <- with(response$price,
cbind(fare_breakdown, surge_multiplier, currency_code)
)
#
response
}
#' Current request
#'
#' Retrieve details of the currently active request.
#'
#' Requires an OAuth 2.0 token with the all_trips or request scope.
#'
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v1-requests-current}
#' @examples
#' \dontrun{
#' uber_requests_current()
#' }
#' @export
uber_requests_current <- function() {
callAPI("requests/current", 1)
}
#' Delete current request
#'
#' Delete the currently active request.
#'
#' Requires an OAuth 2.0 token with the request scope.
#'
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v1-requests-current-delete}
#' @examples
#' \dontrun{
#' uber_requests_current_delete()
#' }
#' @export
uber_requests_current_delete <- function() {
callAPI("requests/current", 1, method = "DELETE")
}
# PLACES --------------------------------------------------------------------------------------------------------------
# It's possible that a home location will need to be set via the Uber application before this will work.
#' Get place address
#'
#' Retrieve home and work addresses from an Uber user's profile.
#'
#' Requires an OAuth 2.0 token with the places scope.
#'
#' @param place_id Either "home" or "work".
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v1-places-get}
#' @examples
#' \dontrun{
#' uber_places_get()
#' uber_places_get("home")
#' uber_places_get("work")
#' }
#' @export
uber_places_get <- function(place_id = c("home", "work")) {
place_id = place_id[1]
callAPI(paste("places", place_id, sep = "/"), 1, method = "GET", params = NULL)
}
#' Set place address
#'
#' Update home and work addresses for an Uber user's profile.
#'
#' Requires an OAuth 2.0 token with the places scope.
#'
#' @param place_id Either "home" or "work".
#' @param address Address to be assigned.
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v1-places-put}
#' @examples
#' \dontrun{
#' uber_places_put("home", "115 St Andrews Dr, Durban North, 4051, South Africa")
#' }
#' @export
uber_places_put <- function(place_id = c("home", "work"), address) {
place_id = place_id[1]
callAPI(paste("places", place_id, sep = "/"), 1, method = "PUT", params = list(address = address))
}
# PAYMENT -------------------------------------------------------------------------------------------------------------
#' Payment methods
#'
#' Retrieve a list of the user's available payment methods.
#'
#' Requires an OAuth 2.0 token with the request scope.
#'
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v1-payment-methods}
#' @export
uber_payment_methods <- function() {
callAPI("payment-methods", 1)
}
| /R/api.R | no_license | arthurwuhoo/ubeR | R | false | false | 9,216 | r | # PRODUCTS ------------------------------------------------------------------------------------------------------------
#' Products available
#'
#' Returns information about the Uber products offered at a given location.
#'
#' @param latitude Latitude of location.
#' @param longitude Longitude of location.
#' @param product_id Unique identifier representing a specific product.
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v1-products}.
#' @examples
#' \dontrun{
#' uber_products(latitude = -33.925278, longitude = 18.423889)
#' uber_products(product_id = "91901472-f30d-4614-8ba7-9fcc937cebf5")
#' }
#' @export
uber_products <- function(latitude = NA, longitude = NA, product_id = NA) {
if (any(is.na(c(latitude, longitude))) && is.na(product_id))
stop("Either both latitude and longitude or product_id must be specified.")
if (is.na(product_id)) {
rides = callAPI("products", 1, params = parseParameters(environment()))$products
} else {
rides = callAPI(paste("products", product_id, sep = "/"), 1)
}
rides
}
# ESTIMATES -----------------------------------------------------------------------------------------------------------
#' Price estimate
#'
#' Returns an estimated price range for each product offered at a given location
#'
#' @param start_latitude Initial latitude.
#' @param start_longitude Initial longitude.
#' @param end_latitude Final latitude.
#' @param end_longitude Final longitude.
#' @param seat_count Number of passengers.
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v1-estimates-price}
#' @examples
#' \dontrun{
#' uber_estimate_price(start_latitude = 37.761492, start_longitude = -122.423941,
#' end_latitude = 37.775393, end_longitude = -122.417546)
#' }
#' @export
uber_estimate_price <- function(start_latitude, start_longitude, end_latitude, end_longitude, seat_count = NULL) {
estimates = callAPI("estimates/price", 1, method = "GET", params = parseParameters(environment()))
#
estimates$prices
}
#' Time estimate
#'
#' Returns ETAs for all products currently available at a given location. The ETA for each product is expressed in seconds.
#'
#' @param start_latitude Initial latitude.
#' @param start_longitude Initial longitude.
#' @param product_id Unique identifier representing a specific product.
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v1-estimates-time}
#' @examples
#' \dontrun{
#' uber_estimate_time(start_latitude = 37.761492, start_longitude = -122.423941)
#' }
#' @export
uber_estimate_time <- function(start_latitude, start_longitude, product_id = NULL) {
estimates = callAPI("estimates/time", 1, method = "GET", params = parseParameters(environment()))
#
estimates$times
}
# HISTORY -------------------------------------------------------------------------------------------------------------
#' History
#'
#' Returns data about a user's activity on Uber.
#'
#' Requires an OAuth 2.0 token with the history or history_lite scope.
#'
#' @param limit Number of items to retrieve.
#' @param offset Offset the returned results.
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v12-history}
#' @examples
#' \dontrun{
#' uber_history()
#' }
#' @import dplyr
#' @export
uber_history <- function(limit = 5, offset = 0) {
if(limit <= 0) {
stop("You must specify a positive value for limit.")
} else {
data <- callAPI("history", 1.2, method = "GET", params = parseParameters(environment()))
}
if (length(data$history) == 0){
history.df.final = NULL
} else {
times <- c("request_time", "start_time", "end_time")
history.df.final <- select_(data$history, .dots = c("-start_city")) %>%
cbind(data$history$start_city) %>%
mutate_(.dots = setNames(paste0('as.POSIXct(',times,', origin = "1970-01-01")'), times))
}
history.df.final
}
# ME ------------------------------------------------------------------------------------------------------------------
#' User information
#'
#' Returns information about the Uber user.
#'
#' Requires an OAuth 2.0 token with the profile scope.
#'
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v1-me}
#' @examples
#' \dontrun{
#' uber_me()
#' }
#' @export
uber_me <- function() {
callAPI("me", 1)
}
# REQUESTS ------------------------------------------------------------------------------------------------------------
#' Request a ride
#'
#' Request a ride on the behalf of the authenticated user.
#'
#' Requires an OAuth 2.0 token with the request scope.
#'
#' @param start_latitude Initial latitude.
#' @param start_longitude Initial longitude.
#' @param end_latitude Final latitude.
#' @param end_longitude Final longitude.
#' @param start_address Initial address.
#' @param end_address Final address.
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v1-requests}
#' @examples
#' \dontrun{
#' uber_requests(start_address = "37 Beach Road, Mouille Point, Cape Town",
#' end_address = "100 St Georges Mall, Cape Town City Centre, Cape Town")
#' }
#' @export
uber_requests <- function(start_latitude = NULL, start_longitude = NULL, end_latitude = NULL, end_longitude = NULL, start_address = NULL, end_address = NULL) {
callAPI("requests", 1, method = "POST", params = parseParameters(environment()))
}
#' Request estimate
#'
#' Allows a ride to be estimated given the desired product, start, and end locations. If the end location is not provided, only the pickup ETA and details of surge pricing information are provided. If the pickup ETA is null, there are no cars available, but an estimate may still be given to the user.
#'
#' Requires an OAuth 2.0 token with the request scope.
#'
#' @param start_latitude Initial latitude.
#' @param start_longitude Initial longitude.
#' @param end_latitude Final latitude.
#' @param end_longitude Final longitude.
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v1-requests-estimate}
#' @examples
#' \dontrun{
#' uber_requests_estimate(start_latitude = 37.761492, start_longitude = -122.423941,
#' end_latitude = 37.775393, end_longitude = -122.417546)
#' }
#' @export
uber_requests_estimate <- function(start_latitude = NULL, start_longitude = NULL, end_latitude = NULL, end_longitude = NULL) {
response <- callAPI("requests/estimate", 1, method = "POST", params = parseParameters(environment()))
#
response$price <- with(response$price,
cbind(fare_breakdown, surge_multiplier, currency_code)
)
#
response
}
#' Current request
#'
#' Retrieve details of the currently active request.
#'
#' Requires an OAuth 2.0 token with the all_trips or request scope.
#'
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v1-requests-current}
#' @examples
#' \dontrun{
#' uber_requests_current()
#' }
#' @export
uber_requests_current <- function() {
callAPI("requests/current", 1)
}
#' Delete current request
#'
#' Delete the currently active request.
#'
#' Requires an OAuth 2.0 token with the request scope.
#'
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v1-requests-current-delete}
#' @examples
#' \dontrun{
#' uber_requests_current_delete()
#' }
#' @export
uber_requests_current_delete <- function() {
callAPI("requests/current", 1, method = "DELETE")
}
# PLACES --------------------------------------------------------------------------------------------------------------
# It's possible that a home location will need to be set via the Uber application before this will work.
#' Get place address
#'
#' Retrieve home and work addresses from an Uber user's profile.
#'
#' Requires an OAuth 2.0 token with the places scope.
#'
#' @param place_id Either "home" or "work".
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v1-places-get}
#' @examples
#' \dontrun{
#' uber_places_get()
#' uber_places_get("home")
#' uber_places_get("work")
#' }
#' @export
uber_places_get <- function(place_id = c("home", "work")) {
place_id = place_id[1]
callAPI(paste("places", place_id, sep = "/"), 1, method = "GET", params = NULL)
}
#' Set place address
#'
#' Update home and work addresses for an Uber user's profile.
#'
#' Requires an OAuth 2.0 token with the places scope.
#'
#' @param place_id Either "home" or "work".
#' @param address Address to be assigned.
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v1-places-put}
#' @examples
#' \dontrun{
#' uber_places_put("home", "115 St Andrews Dr, Durban North, 4051, South Africa")
#' }
#' @export
uber_places_put <- function(place_id = c("home", "work"), address) {
place_id = place_id[1]
callAPI(paste("places", place_id, sep = "/"), 1, method = "PUT", params = list(address = address))
}
# PAYMENT -------------------------------------------------------------------------------------------------------------
#' Payment methods
#'
#' Retrieve a list of the user's available payment methods.
#'
#' Requires an OAuth 2.0 token with the request scope.
#'
#' @references
#' \url{https://developer.uber.com/docs/rides/api/v1-payment-methods}
#' @export
uber_payment_methods <- function() {
callAPI("payment-methods", 1)
}
|
cholera<-read.csv(file.choose(),header = TRUE)
blood_vidal<-read.csv(file.choose(),header = TRUE)
malaria<-read.csv(file.choose(),header = TRUE)
dengue<-read.csv(file.choose(),header = TRUE)
swine_flu<-read.csv(file.choose(),header = TRUE)
par(mfrow=c(2,3))
plot(cholera$month.year,cholera$Count,xlab="month",ylab="count",main="Cholera","l")
plot(blood_vidal$month.year,blood_vidal$Count,xlab="month",ylab="count",main="BloodVidal","l")
plot(malaria$month.year,malaria$Count,xlab="month",ylab="count",main="Malaria","l")
plot(dengue$month.year,dengue$Count,xlab="month",ylab="count",main="Dengue","l")
plot(swine_flu$month.year,swine_flu$Count,xlab="month",ylab="count",main="Swine Flu","l")
| /line plot.R | no_license | RAGAVARTHINI/R-final-year-project | R | false | false | 707 | r | cholera<-read.csv(file.choose(),header = TRUE)
blood_vidal<-read.csv(file.choose(),header = TRUE)
malaria<-read.csv(file.choose(),header = TRUE)
dengue<-read.csv(file.choose(),header = TRUE)
swine_flu<-read.csv(file.choose(),header = TRUE)
par(mfrow=c(2,3))
plot(cholera$month.year,cholera$Count,xlab="month",ylab="count",main="Cholera","l")
plot(blood_vidal$month.year,blood_vidal$Count,xlab="month",ylab="count",main="BloodVidal","l")
plot(malaria$month.year,malaria$Count,xlab="month",ylab="count",main="Malaria","l")
plot(dengue$month.year,dengue$Count,xlab="month",ylab="count",main="Dengue","l")
plot(swine_flu$month.year,swine_flu$Count,xlab="month",ylab="count",main="Swine Flu","l")
|
# Load the data set
if(!file.exists("exdata-data-household_power_consumption.zip")) {
temp <- tempfile()
download.file("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
file <- unzip(temp)
unlink(temp)
}
# Read data
power <- read.table(file = "household_power_consumption.txt", header = TRUE, colClasses = "character", sep = ";", na = "?")
#Adjust Date & Filter for timepoints
power$Date <- as.Date(power$Date, format = "%d/%m/%Y")
df <- power[(power$Date == "2007-02-01") | (power$Date == "2007-02-02"),]
df <- transform(df, timestamp = as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S")
#Plot 2
png(filename = "plot2.png", width = 480, height = 480)
plot(df$timestamp, df$Global_active_power, type = "l", xlab = " ", ylab = "Global Active Power (kilowatts)")
dev.off()
| /plot2.R | no_license | aorillio/ExData_Plotting1 | R | false | false | 831 | r | # Load the data set
if(!file.exists("exdata-data-household_power_consumption.zip")) {
temp <- tempfile()
download.file("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
file <- unzip(temp)
unlink(temp)
}
# Read data
power <- read.table(file = "household_power_consumption.txt", header = TRUE, colClasses = "character", sep = ";", na = "?")
#Adjust Date & Filter for timepoints
power$Date <- as.Date(power$Date, format = "%d/%m/%Y")
df <- power[(power$Date == "2007-02-01") | (power$Date == "2007-02-02"),]
df <- transform(df, timestamp = as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S")
#Plot 2
png(filename = "plot2.png", width = 480, height = 480)
plot(df$timestamp, df$Global_active_power, type = "l", xlab = " ", ylab = "Global Active Power (kilowatts)")
dev.off()
|
library(party)
library(ggplot2)
library(randomForest)
library(e1071)
library(caret)
setwd("C:/Users/siddartha/Desktop/sid/data_analatics/Cricket-Insights/scripts/R scripts")
segment_one <- read.csv("./../../prediction data generated/first_six_overs.csv")
segment_two <- read.csv("./../../prediction data generated/seven_fifteen_overs.csv")
segment_three <- read.csv("./../../prediction data generated/sixteen_twenty_overs.csv")
for(i in 1:nrow(segment_three)){
if(as.character(segment_three$team1[i]) == as.character(segment_three$win[i])){
segment_three$win_bat_first[i] = 1
}else{
segment_three$win_bat_first[i] = 0
}
if(as.character(segment_three$team1[i]) == as.character(segment_three$toss_win[i])){
segment_three$win_toss_1[i] = 1
}else{
segment_three$win_toss_1[i] = 0
}
}
segment_three$win_bat_first <- as.factor(segment_three$win_bat_first)
segment_three$win_toss_1 <- as.factor(segment_three$win_toss_1)
#mtcars$am <- as.factor(mtcars$am)
#segment_one <- segment_one[sample(nrow(segment_one)),]
#segment_two <- segment_two[sample(nrow(segment_two)),]
#100 106/156 a = 0.83 r = 8
#85 100/156 a = .57
#svm --> 382462346 107/156 a = 0.82 r = 9 ration075--> 71.538% acc , ratio70 ---> 71.333% acc ctree --> 382462312
set.seed(382462312)
segment_three <- segment_three[sample(nrow(segment_three)),]
#Split data into train and test data
train_data_ratio = 0.70
train_segment_one = segment_one[1:floor(train_data_ratio*nrow(segment_one)),]
test_segment_one = segment_one[(floor(train_data_ratio*nrow(segment_one))+1):nrow(segment_one),]
train_segment_two = segment_two[1:floor(train_data_ratio*nrow(segment_two)),]
test_segment_two = segment_two[(floor(train_data_ratio*nrow(segment_two))+1):nrow(segment_two),]
train_segment_three = segment_three[1:floor(train_data_ratio*nrow(segment_three)),]
test_segment_three = segment_three[(floor(train_data_ratio*nrow(segment_three))+1):nrow(segment_three),]
#Prediction
selectBit = 3
if(selectBit == 1){
train_data = train_segment_one
test_data = test_segment_one
}else if(selectBit == 2){
train_data = train_segment_two
test_data = test_segment_two
}else if(selectBit == 3){
train_data = train_segment_three
test_data = test_segment_three
}
a = 0.65
b = 1 - a
r = 9
train_data$MR <- a*c(scale(train_data$MR))
train_data$OR <- a*c(scale(train_data$OR))
train_data$MW <- b*c(scale(train_data$MW))
train_data$OW <- b*c(scale(train_data$OW))
train_data$MRN <- a*c(scale(train_data$MRN))
train_data$ORN <- a*c(scale(train_data$ORN))
#win_bat_first
#output.tree <- randomForest(win ~ team1 + team2 + (MW/MR) + (OW/OR) + (MRN - ORN) , data = train_data)
#output.tree <- ctree(win_bat_first ~ team1 + team2 + win_toss_1 + (MW/MR) + (OW/OR) + (MRN - ORN) , data = train_data)
#output.tree <- randomForest(win_bat_first ~ team1 + team2 + win_toss_1 + OW + MW + OR + ORN + MRN + MR , data = train_data)
#output.tree <- ctree(win_bat_first ~ team1 + team2 + win_toss_1 + OW + MW + OR + ORN + MRN + MR , data = train_data)
#plot(output.tree)
output.tree <- svm(win_bat_first ~ team1 + team2 + win_toss_1 + OW + MW + OR + ORN + MRN + MR , data = train_data)
test_data = data.frame(file = test_data$file, venue = test_data$venue, win_toss_1 = test_data$win_toss_1,team1 = test_data$team1, team2 = test_data$team2, MR = c(scale(test_data$MR)), MRN = c(scale(test_data$MRN)), MW = c(scale(test_data$MW)), OR = c(scale(test_data$OR)), ORN = c(scale(test_data$ORN)), OW = c(scale(test_data$OW)) ,win_bat_first = test_data$win_bat_first)
test_data$MR <- a*c(scale(test_data$MR))
test_data$OR <- a*c(scale(test_data$OR))
test_data$MW <- b*c(scale(test_data$MW))
test_data$OW <- b*c(scale(test_data$OW))
test_data$MRN <- a*c(scale(test_data$MRN))
test_data$ORN <- a*c(scale(test_data$ORN))
testPred <- predict(output.tree, newdata = test_data[,1:(ncol(test_data) - 1)])
#table(testPred, test_data$win_bat_first)
#plot(output.tree)
#df <- data.frame(test_data$team1 , test_data$team2 , testPred , test_data$win)
#summary(df$testPred == df$test_data.team1 | df$testPred == df$test_data.team2)
#summary(testPred == test_data$team1 || testPred == test_data$team2)
count = 0
predictions <- data.frame(testPred)
#for (i in 1:length(predictions$testPred)){
# if((as.character(predictions$testPred[i]) == as.character(test_data$team1[i])) || (as.character(predictions$testPred[i]) == as.character(test_data$team2[i]))){
# }else{
# testPred[i] <- as.character(test_data$team1[i])
# count = count + 1
# }
#}
#count
result = summary(testPred == test_data$win_bat_first)
c = confusionMatrix(testPred, test_data$win_bat_first)
a = data.frame(c[2])$table.Freq
result
c
100*strtoi(result[3])/(strtoi(result[3]) + strtoi(result[2])) #---- Accuracy % on test data
jacard = a[4]/(a[4] + a[2] +a[3])
#write.csv(data.frame(test_data$file , testPred) , file = "/home/aditya9509/Cricket/prediction data generated/classification4.csv" )
| /scripts/R scripts/classification.R | no_license | kumarshivam675/Cricket-Insights | R | false | false | 5,079 | r | library(party)
library(ggplot2)
library(randomForest)
library(e1071)
library(caret)
setwd("C:/Users/siddartha/Desktop/sid/data_analatics/Cricket-Insights/scripts/R scripts")
segment_one <- read.csv("./../../prediction data generated/first_six_overs.csv")
segment_two <- read.csv("./../../prediction data generated/seven_fifteen_overs.csv")
segment_three <- read.csv("./../../prediction data generated/sixteen_twenty_overs.csv")
for(i in 1:nrow(segment_three)){
if(as.character(segment_three$team1[i]) == as.character(segment_three$win[i])){
segment_three$win_bat_first[i] = 1
}else{
segment_three$win_bat_first[i] = 0
}
if(as.character(segment_three$team1[i]) == as.character(segment_three$toss_win[i])){
segment_three$win_toss_1[i] = 1
}else{
segment_three$win_toss_1[i] = 0
}
}
segment_three$win_bat_first <- as.factor(segment_three$win_bat_first)
segment_three$win_toss_1 <- as.factor(segment_three$win_toss_1)
#mtcars$am <- as.factor(mtcars$am)
#segment_one <- segment_one[sample(nrow(segment_one)),]
#segment_two <- segment_two[sample(nrow(segment_two)),]
#100 106/156 a = 0.83 r = 8
#85 100/156 a = .57
#svm --> 382462346 107/156 a = 0.82 r = 9 ration075--> 71.538% acc , ratio70 ---> 71.333% acc ctree --> 382462312
set.seed(382462312)
segment_three <- segment_three[sample(nrow(segment_three)),]
#Split data into train and test data
train_data_ratio = 0.70
train_segment_one = segment_one[1:floor(train_data_ratio*nrow(segment_one)),]
test_segment_one = segment_one[(floor(train_data_ratio*nrow(segment_one))+1):nrow(segment_one),]
train_segment_two = segment_two[1:floor(train_data_ratio*nrow(segment_two)),]
test_segment_two = segment_two[(floor(train_data_ratio*nrow(segment_two))+1):nrow(segment_two),]
train_segment_three = segment_three[1:floor(train_data_ratio*nrow(segment_three)),]
test_segment_three = segment_three[(floor(train_data_ratio*nrow(segment_three))+1):nrow(segment_three),]
#Prediction
selectBit = 3
if(selectBit == 1){
train_data = train_segment_one
test_data = test_segment_one
}else if(selectBit == 2){
train_data = train_segment_two
test_data = test_segment_two
}else if(selectBit == 3){
train_data = train_segment_three
test_data = test_segment_three
}
a = 0.65
b = 1 - a
r = 9
train_data$MR <- a*c(scale(train_data$MR))
train_data$OR <- a*c(scale(train_data$OR))
train_data$MW <- b*c(scale(train_data$MW))
train_data$OW <- b*c(scale(train_data$OW))
train_data$MRN <- a*c(scale(train_data$MRN))
train_data$ORN <- a*c(scale(train_data$ORN))
#win_bat_first
#output.tree <- randomForest(win ~ team1 + team2 + (MW/MR) + (OW/OR) + (MRN - ORN) , data = train_data)
#output.tree <- ctree(win_bat_first ~ team1 + team2 + win_toss_1 + (MW/MR) + (OW/OR) + (MRN - ORN) , data = train_data)
#output.tree <- randomForest(win_bat_first ~ team1 + team2 + win_toss_1 + OW + MW + OR + ORN + MRN + MR , data = train_data)
#output.tree <- ctree(win_bat_first ~ team1 + team2 + win_toss_1 + OW + MW + OR + ORN + MRN + MR , data = train_data)
#plot(output.tree)
output.tree <- svm(win_bat_first ~ team1 + team2 + win_toss_1 + OW + MW + OR + ORN + MRN + MR , data = train_data)
test_data = data.frame(file = test_data$file, venue = test_data$venue, win_toss_1 = test_data$win_toss_1,team1 = test_data$team1, team2 = test_data$team2, MR = c(scale(test_data$MR)), MRN = c(scale(test_data$MRN)), MW = c(scale(test_data$MW)), OR = c(scale(test_data$OR)), ORN = c(scale(test_data$ORN)), OW = c(scale(test_data$OW)) ,win_bat_first = test_data$win_bat_first)
test_data$MR <- a*c(scale(test_data$MR))
test_data$OR <- a*c(scale(test_data$OR))
test_data$MW <- b*c(scale(test_data$MW))
test_data$OW <- b*c(scale(test_data$OW))
test_data$MRN <- a*c(scale(test_data$MRN))
test_data$ORN <- a*c(scale(test_data$ORN))
testPred <- predict(output.tree, newdata = test_data[,1:(ncol(test_data) - 1)])
#table(testPred, test_data$win_bat_first)
#plot(output.tree)
#df <- data.frame(test_data$team1 , test_data$team2 , testPred , test_data$win)
#summary(df$testPred == df$test_data.team1 | df$testPred == df$test_data.team2)
#summary(testPred == test_data$team1 || testPred == test_data$team2)
count = 0
predictions <- data.frame(testPred)
#for (i in 1:length(predictions$testPred)){
# if((as.character(predictions$testPred[i]) == as.character(test_data$team1[i])) || (as.character(predictions$testPred[i]) == as.character(test_data$team2[i]))){
# }else{
# testPred[i] <- as.character(test_data$team1[i])
# count = count + 1
# }
#}
#count
result = summary(testPred == test_data$win_bat_first)
c = confusionMatrix(testPred, test_data$win_bat_first)
a = data.frame(c[2])$table.Freq
result
c
100*strtoi(result[3])/(strtoi(result[3]) + strtoi(result[2])) #---- Accuracy % on test data
jacard = a[4]/(a[4] + a[2] +a[3])
#write.csv(data.frame(test_data$file , testPred) , file = "/home/aditya9509/Cricket/prediction data generated/classification4.csv" )
|
# Write log10 HITChip data matrix taxa x samples into a file
# in the sparCC format
sparcc.write <- function (dat, file) {
# Mimic absolute counts
datc <- round(10^dat)
rownames(datc) <- gsub(" ", "_", rownames(datc))
datc <- cbind(rownames(datc), datc)
colnames(datc)[[1]] <- "OTU_id"
datc <- rbind(colnames(datc), datc)
rownames(datc) <- colnames(datc) <- NULL
write.table(datc, file = file, quote = F, col.names = F, row.names = F, sep = "\t")
} | /R/sparcc.R | no_license | antagomir/scripts | R | false | false | 482 | r |
# Write log10 HITChip data matrix taxa x samples into a file
# in the sparCC format
sparcc.write <- function (dat, file) {
# Mimic absolute counts
datc <- round(10^dat)
rownames(datc) <- gsub(" ", "_", rownames(datc))
datc <- cbind(rownames(datc), datc)
colnames(datc)[[1]] <- "OTU_id"
datc <- rbind(colnames(datc), datc)
rownames(datc) <- colnames(datc) <- NULL
write.table(datc, file = file, quote = F, col.names = F, row.names = F, sep = "\t")
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{goa_cc_table}
\alias{goa_cc_table}
\title{GO cellular component annotations}
\format{
A data frame with 83,029 rows and 3 variables:
\describe{
\item{Gene.symbol}{HGNC gene symbol}
\item{GO.ID}{GO term ID}
\item{GO.name}{GO term name}
}
}
\source{
Homo sapiens EBI Gene Ontology Annotation Database protein dataset (2020-03-23 release) downloaded on 2020-03-30 from:
\url{http://current.geneontology.org/products/pages/downloads.html}
}
\usage{
goa_cc_table
}
\description{
A dataset containing annotations for 18,880 genes in 1,765 GO cellular component terms.
}
\references{
Ashburner M, Ball CA, Blake JA, et al. Gene ontology: tool for the unification of biology.
The Gene Ontology Consortium. Nat Genet. 2000;25(1):25-29. doi:10.1038/75556
The Gene Ontology Consortium. The Gene Ontology Resource: 20 years and still GOing strong.
Nucleic Acids Res. 2019;47(D1):D330-D338. doi:10.1093/nar/gky1055
}
\seealso{
Other datasets:
\code{\link{accession_gene_table}},
\code{\link{bioplex_table}},
\code{\link{example_data2}},
\code{\link{example_data3}},
\code{\link{example_data}},
\code{\link{genes_snps}},
\code{\link{gnomad_table}},
\code{\link{goa_bp_table}},
\code{\link{goa_mf_table}},
\code{\link{gtex_protein}},
\code{\link{gtex_rna}},
\code{\link{gwas_table}},
\code{\link{hgnc_group_table}},
\code{\link{hpa_rna}},
\code{\link{inweb_table}},
\code{\link{irefindex_table}},
\code{\link{msigdb_c1_table}},
\code{\link{msigdb_c2_table}},
\code{\link{msigdb_c3_table}},
\code{\link{msigdb_c4_table}},
\code{\link{msigdb_c5_table}},
\code{\link{msigdb_c6_table}},
\code{\link{msigdb_c7_table}},
\code{\link{msigdb_h_table}}
}
\concept{datasets}
\keyword{datasets}
| /man/goa_cc_table.Rd | permissive | lagelab/Genoppi | R | false | true | 1,784 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{goa_cc_table}
\alias{goa_cc_table}
\title{GO cellular component annotations}
\format{
A data frame with 83,029 rows and 3 variables:
\describe{
\item{Gene.symbol}{HGNC gene symbol}
\item{GO.ID}{GO term ID}
\item{GO.name}{GO term name}
}
}
\source{
Homo sapiens EBI Gene Ontology Annotation Database protein dataset (2020-03-23 release) downloaded on 2020-03-30 from:
\url{http://current.geneontology.org/products/pages/downloads.html}
}
\usage{
goa_cc_table
}
\description{
A dataset containing annotations for 18,880 genes in 1,765 GO cellular component terms.
}
\references{
Ashburner M, Ball CA, Blake JA, et al. Gene ontology: tool for the unification of biology.
The Gene Ontology Consortium. Nat Genet. 2000;25(1):25-29. doi:10.1038/75556
The Gene Ontology Consortium. The Gene Ontology Resource: 20 years and still GOing strong.
Nucleic Acids Res. 2019;47(D1):D330-D338. doi:10.1093/nar/gky1055
}
\seealso{
Other datasets:
\code{\link{accession_gene_table}},
\code{\link{bioplex_table}},
\code{\link{example_data2}},
\code{\link{example_data3}},
\code{\link{example_data}},
\code{\link{genes_snps}},
\code{\link{gnomad_table}},
\code{\link{goa_bp_table}},
\code{\link{goa_mf_table}},
\code{\link{gtex_protein}},
\code{\link{gtex_rna}},
\code{\link{gwas_table}},
\code{\link{hgnc_group_table}},
\code{\link{hpa_rna}},
\code{\link{inweb_table}},
\code{\link{irefindex_table}},
\code{\link{msigdb_c1_table}},
\code{\link{msigdb_c2_table}},
\code{\link{msigdb_c3_table}},
\code{\link{msigdb_c4_table}},
\code{\link{msigdb_c5_table}},
\code{\link{msigdb_c6_table}},
\code{\link{msigdb_c7_table}},
\code{\link{msigdb_h_table}}
}
\concept{datasets}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/repl.R
\name{repl_python}
\alias{repl_python}
\title{Run a Python REPL}
\usage{
repl_python(module = NULL, quiet = getOption("reticulate.repl.quiet",
default = FALSE))
}
\arguments{
\item{module}{An (optional) Python module to be imported before
the REPL is launched.}
\item{quiet}{Boolean; print a startup banner when launching the REPL? If
\code{FALSE}, the banner will be suppressed.}
}
\description{
This function provides a Python REPL in the \R session, which can be used
to interactively run Python code. All code executed within the REPL is
run within the Python main module, and any generated Python objects will
persist in the Python session after the REPL is detached.
}
\details{
When working with R and Python scripts interactively, one can activate
the Python REPL with \code{repl_python()}, run Python code, and later run \code{exit}
to return to the \R console.
}
\examples{
\dontrun{
# enter the Python REPL, create a dictionary, and exit
repl_python()
dictionary = {'alpha': 1, 'beta': 2}
exit
# access the created dictionary from R
py$dictionary
# $alpha
# [1] 1
#
# $beta
# [1] 2
}
}
\seealso{
\link{py}, for accessing objects created using the Python REPL.
}
| /man/repl_python.Rd | permissive | rossholmberg/reticulate | R | false | true | 1,266 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/repl.R
\name{repl_python}
\alias{repl_python}
\title{Run a Python REPL}
\usage{
repl_python(module = NULL, quiet = getOption("reticulate.repl.quiet",
default = FALSE))
}
\arguments{
\item{module}{An (optional) Python module to be imported before
the REPL is launched.}
\item{quiet}{Boolean; print a startup banner when launching the REPL? If
\code{FALSE}, the banner will be suppressed.}
}
\description{
This function provides a Python REPL in the \R session, which can be used
to interactively run Python code. All code executed within the REPL is
run within the Python main module, and any generated Python objects will
persist in the Python session after the REPL is detached.
}
\details{
When working with R and Python scripts interactively, one can activate
the Python REPL with \code{repl_python()}, run Python code, and later run \code{exit}
to return to the \R console.
}
\examples{
\dontrun{
# enter the Python REPL, create a dictionary, and exit
repl_python()
dictionary = {'alpha': 1, 'beta': 2}
exit
# access the created dictionary from R
py$dictionary
# $alpha
# [1] 1
#
# $beta
# [1] 2
}
}
\seealso{
\link{py}, for accessing objects created using the Python REPL.
}
|
require(knitr)
require(markdown)
setwd("~/Repositories/Coursera/GettingAndCleaning/")
knit("run_analysis.Rmd", encoding="ISO8859-1")
markdownToHTML("run_analysis.md", "run_analysis.html")
| /run_analysis.r | no_license | henriquehashimoto/GettingAndCleaning | R | false | false | 191 | r | require(knitr)
require(markdown)
setwd("~/Repositories/Coursera/GettingAndCleaning/")
knit("run_analysis.Rmd", encoding="ISO8859-1")
markdownToHTML("run_analysis.md", "run_analysis.html")
|
# Source of data for the project:
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
# 1. Merge the training and the test sets to create one data set.
# assign subject ID for test and training data sets
subjectTest <- read.table('test/subject_test.txt', col.names=c('Subject'))
subjectTrain <- read.table('train/subject_train.txt', col.names=c('Subject'))
# combine subject records from test and training data sets
subjects <- rbind(subjectTest,subjectTrain)
# read in features measurements from test and training data sets
featuresTest <- read.table('test/X_test.txt')
featuresTrain <- read.table('train/X_train.txt')
# combine test and training sets for features
features <- rbind(featuresTest,featuresTrain)
# copy feature labels
feature_labels <- read.table('features.txt', col.names=c('index', 'labels'))
# make feature_labels$labels the column name of features
names(feature_labels)
labels <- feature_labels$labels
colnames(features) <- labels
# -----------------------------------------------------------------------------
# 2. Extract only the measurements on the mean and standard deviation for each measurement.
# subset the features table by selecting variables with labels that contain mean and standard deviation in their names
feature_selected <- as.character(feature_labels$labels[grepl('mean()|std()', feature_labels$labels)])
# do not include meanFreq measurements
feature_selected <- as.character(feature_selected[!grepl('meanFreq()', feature_selected)])
features_means_stds <- features[,feature_selected]
# read in activities from test and training sets
activitiesTest <- read.table('test/y_test.txt')
colnames(activitiesTest) <- 'Activity'
activitiesTrain <- read.table('train/y_train.txt')
colnames(activitiesTrain) <- 'Activity'
# combine test and training sets for activities
activities <- rbind(activitiesTest, activitiesTrain)
# ------------------------------------------------------------------------
# 3. Use descriptive activity names to name the activities in the data set
# descriptive names according to activity_labels.txt
activities$Activity[activities$Activity=='1'] <- 'WALKING'
activities$Activity[activities$Activity=='2'] <- 'WALKING_UPSTAIRS'
activities$Activity[activities$Activity=='3'] <- 'WALKING_DOWNSTAIRS'
activities$Activity[activities$Activity=='4'] <- 'SITTING'
activities$Activity[activities$Activity=='5'] <- 'STANDING'
activities$Activity[activities$Activity=='6'] <- 'LAYING'
# ------------------------------------------------------------
# 4. Appropriately labels the data set with descriptive names.
# make variable names more descriptive by using full words and removing parentheses
names(features_means_stds) <- gsub('Acc','Acceleration', names(features_means_stds))
names(features_means_stds) <- gsub('Mag','Magnitude', names(features_means_stds))
names(features_means_stds) <- gsub('Freq','Frequency', names(features_means_stds))
names(features_means_stds) <- gsub('-mean','Mean', names(features_means_stds))
names(features_means_stds) <- gsub('-std','StandardDeviation', names(features_means_stds))
names(features_means_stds) <- gsub('\\(|\\)','', names(features_means_stds), perl=TRUE)
# --------------------------------------------------------------------------------------
# 5. Create a second, independent tidy data set with the average of each variable for each activity and each subject
# combine all data sets
all_data <- cbind(subjects,activities,features_means_stds)
# get the average of each variable for each activity and subject (uses plyr)
averagevar <- ddply(all_data, c("Subject","Activity"), numcolwise(mean))
# save final dataset as average_variables.txt
write.table(averagevar, file = "Proyect_averagevar.txt")
| /run_analysis.R | no_license | jigarcian/Getting-and-Cleaning-Data | R | false | false | 3,751 | r | # Source of data for the project:
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
# 1. Merge the training and the test sets to create one data set.
# assign subject ID for test and training data sets
subjectTest <- read.table('test/subject_test.txt', col.names=c('Subject'))
subjectTrain <- read.table('train/subject_train.txt', col.names=c('Subject'))
# combine subject records from test and training data sets
subjects <- rbind(subjectTest,subjectTrain)
# read in features measurements from test and training data sets
featuresTest <- read.table('test/X_test.txt')
featuresTrain <- read.table('train/X_train.txt')
# combine test and training sets for features
features <- rbind(featuresTest,featuresTrain)
# copy feature labels
feature_labels <- read.table('features.txt', col.names=c('index', 'labels'))
# make feature_labels$labels the column name of features
names(feature_labels)
labels <- feature_labels$labels
colnames(features) <- labels
# -----------------------------------------------------------------------------
# 2. Extract only the measurements on the mean and standard deviation for each measurement.
# subset the features table by selecting variables with labels that contain mean and standard deviation in their names
feature_selected <- as.character(feature_labels$labels[grepl('mean()|std()', feature_labels$labels)])
# do not include meanFreq measurements
feature_selected <- as.character(feature_selected[!grepl('meanFreq()', feature_selected)])
features_means_stds <- features[,feature_selected]
# read in activities from test and training sets
activitiesTest <- read.table('test/y_test.txt')
colnames(activitiesTest) <- 'Activity'
activitiesTrain <- read.table('train/y_train.txt')
colnames(activitiesTrain) <- 'Activity'
# combine test and training sets for activities
activities <- rbind(activitiesTest, activitiesTrain)
# ------------------------------------------------------------------------
# 3. Use descriptive activity names to name the activities in the data set
# descriptive names according to activity_labels.txt
activities$Activity[activities$Activity=='1'] <- 'WALKING'
activities$Activity[activities$Activity=='2'] <- 'WALKING_UPSTAIRS'
activities$Activity[activities$Activity=='3'] <- 'WALKING_DOWNSTAIRS'
activities$Activity[activities$Activity=='4'] <- 'SITTING'
activities$Activity[activities$Activity=='5'] <- 'STANDING'
activities$Activity[activities$Activity=='6'] <- 'LAYING'
# ------------------------------------------------------------
# 4. Appropriately labels the data set with descriptive names.
# make variable names more descriptive by using full words and removing parentheses
names(features_means_stds) <- gsub('Acc','Acceleration', names(features_means_stds))
names(features_means_stds) <- gsub('Mag','Magnitude', names(features_means_stds))
names(features_means_stds) <- gsub('Freq','Frequency', names(features_means_stds))
names(features_means_stds) <- gsub('-mean','Mean', names(features_means_stds))
names(features_means_stds) <- gsub('-std','StandardDeviation', names(features_means_stds))
names(features_means_stds) <- gsub('\\(|\\)','', names(features_means_stds), perl=TRUE)
# --------------------------------------------------------------------------------------
# 5. Create a second, independent tidy data set with the average of each variable for each activity and each subject
# combine all data sets
all_data <- cbind(subjects,activities,features_means_stds)
# get the average of each variable for each activity and subject (uses plyr)
averagevar <- ddply(all_data, c("Subject","Activity"), numcolwise(mean))
# save final dataset as average_variables.txt
write.table(averagevar, file = "Proyect_averagevar.txt")
|
ml_prepare_dataframe <- function(df, features, response = NULL, ...,
envir = new.env(parent = emptyenv()))
{
df <- spark_dataframe(df)
schema <- sdf_schema(df)
# default report for feature, response variable names
envir$features <- random_string("features")
envir$response <- response
envir$labels <- NULL
# ensure numeric response
if (!is.null(response)) {
responseType <- schema[[response]]$type
if (responseType == "StringType") {
envir$response <- random_string("response")
df <- ft_string_indexer(df, response, envir$response, envir)
} else if (responseType != "DoubleType") {
envir$response <- random_string("response")
castedColumn <- df %>%
invoke("col", response) %>%
invoke("cast", "double")
df <- df %>%
invoke("withColumn", envir$response, castedColumn)
}
}
# assemble features vector and return
transformed <- ft_vector_assembler(df, features, envir$features)
# return as vanilla spark dataframe
spark_dataframe(transformed)
}
try_null <- function(expr) {
tryCatch(expr, error = function(e) NULL)
}
#' @export
predict.ml_model <- function(object, newdata, ...) {
params <- object$model.parameters
predicted <- sdf_predict(object, newdata, ...)
column <- sdf_read_column(predicted, "prediction")
if (is.character(params$labels) && is.numeric(column))
column <- params$labels[column + 1]
column
}
#' @export
fitted.ml_model <- function(object, ...) {
object$.model %>%
invoke("summary") %>%
invoke("predictions") %>%
sdf_read_column("prediction")
}
#' @export
residuals.ml_model <- function(object, ...) {
object$.model %>%
invoke("summary") %>%
invoke("residuals") %>%
sdf_read_column("residuals")
}
reorder_first <- function(vector, name) {
if (is.null(vector))
return(vector)
nm <- names(vector)
if (is.null(nm) || !name %in% nm)
return(vector)
ordered <- c(name, base::setdiff(nm, name))
vector[ordered]
}
intercept_first <- function(vector) {
reorder_first(vector, "(Intercept)")
}
read_spark_vector <- function(jobj, field) {
object <- invoke(jobj, field)
invoke(object, "toArray")
}
read_spark_matrix <- function(jobj, field) {
object <- invoke(jobj, field)
nrow <- invoke(object, "numRows")
ncol <- invoke(object, "numCols")
data <- invoke(object, "toArray")
matrix(data, nrow = nrow, ncol = ncol)
}
ensure_scalar_integer <- function(object) {
if (length(object) != 1 || !is.numeric(object)) {
deparsed <- deparse(substitute(object))
errMsg <- sprintf("'%s' is not a length-one numeric value", deparsed)
stop(errMsg)
}
as.integer(object)
}
ensure_scalar_double <- function(object) {
if (length(object) != 1 || !is.numeric(object)) {
deparsed <- deparse(substitute(object))
errMsg <- sprintf("'%s' is not a length-one numeric value", deparsed)
stop(errMsg)
}
as.double(object)
}
ensure_scalar_boolean <- function(object, allow.na = FALSE, default = NULL) {
if (!is.null(default) && is.null(object)) {
object = default
}
if (length(object) != 1) {
deparsed <- deparse(substitute(object))
stop(sprintf("'%s' is not a length-one logical value", deparsed))
}
value <- as.logical(object)
if (!allow.na && is.na(value)) {
deparsed <- deparse(substitute(object))
stop(sprintf("'%s' is NA (must be TRUE/FALSE)", deparsed))
}
value
}
ensure_scalar_character <- function(object) {
if (length(object) != 1 || !is.character(object)) {
deparsed <- deparse(substitute(object))
stop(sprintf("'%s' is not a length-one character vector", deparsed))
}
as.character(object)
}
| /R/ml_utils.R | permissive | land23/sparklyr | R | false | false | 3,695 | r | ml_prepare_dataframe <- function(df, features, response = NULL, ...,
envir = new.env(parent = emptyenv()))
{
df <- spark_dataframe(df)
schema <- sdf_schema(df)
# default report for feature, response variable names
envir$features <- random_string("features")
envir$response <- response
envir$labels <- NULL
# ensure numeric response
if (!is.null(response)) {
responseType <- schema[[response]]$type
if (responseType == "StringType") {
envir$response <- random_string("response")
df <- ft_string_indexer(df, response, envir$response, envir)
} else if (responseType != "DoubleType") {
envir$response <- random_string("response")
castedColumn <- df %>%
invoke("col", response) %>%
invoke("cast", "double")
df <- df %>%
invoke("withColumn", envir$response, castedColumn)
}
}
# assemble features vector and return
transformed <- ft_vector_assembler(df, features, envir$features)
# return as vanilla spark dataframe
spark_dataframe(transformed)
}
try_null <- function(expr) {
tryCatch(expr, error = function(e) NULL)
}
#' @export
predict.ml_model <- function(object, newdata, ...) {
params <- object$model.parameters
predicted <- sdf_predict(object, newdata, ...)
column <- sdf_read_column(predicted, "prediction")
if (is.character(params$labels) && is.numeric(column))
column <- params$labels[column + 1]
column
}
#' @export
fitted.ml_model <- function(object, ...) {
object$.model %>%
invoke("summary") %>%
invoke("predictions") %>%
sdf_read_column("prediction")
}
#' @export
residuals.ml_model <- function(object, ...) {
object$.model %>%
invoke("summary") %>%
invoke("residuals") %>%
sdf_read_column("residuals")
}
reorder_first <- function(vector, name) {
if (is.null(vector))
return(vector)
nm <- names(vector)
if (is.null(nm) || !name %in% nm)
return(vector)
ordered <- c(name, base::setdiff(nm, name))
vector[ordered]
}
intercept_first <- function(vector) {
reorder_first(vector, "(Intercept)")
}
read_spark_vector <- function(jobj, field) {
object <- invoke(jobj, field)
invoke(object, "toArray")
}
read_spark_matrix <- function(jobj, field) {
object <- invoke(jobj, field)
nrow <- invoke(object, "numRows")
ncol <- invoke(object, "numCols")
data <- invoke(object, "toArray")
matrix(data, nrow = nrow, ncol = ncol)
}
ensure_scalar_integer <- function(object) {
if (length(object) != 1 || !is.numeric(object)) {
deparsed <- deparse(substitute(object))
errMsg <- sprintf("'%s' is not a length-one numeric value", deparsed)
stop(errMsg)
}
as.integer(object)
}
ensure_scalar_double <- function(object) {
if (length(object) != 1 || !is.numeric(object)) {
deparsed <- deparse(substitute(object))
errMsg <- sprintf("'%s' is not a length-one numeric value", deparsed)
stop(errMsg)
}
as.double(object)
}
ensure_scalar_boolean <- function(object, allow.na = FALSE, default = NULL) {
if (!is.null(default) && is.null(object)) {
object = default
}
if (length(object) != 1) {
deparsed <- deparse(substitute(object))
stop(sprintf("'%s' is not a length-one logical value", deparsed))
}
value <- as.logical(object)
if (!allow.na && is.na(value)) {
deparsed <- deparse(substitute(object))
stop(sprintf("'%s' is NA (must be TRUE/FALSE)", deparsed))
}
value
}
ensure_scalar_character <- function(object) {
if (length(object) != 1 || !is.character(object)) {
deparsed <- deparse(substitute(object))
stop(sprintf("'%s' is not a length-one character vector", deparsed))
}
as.character(object)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{numeric_to_vector_text}
\alias{numeric_to_vector_text}
\title{Convert numeric to string}
\usage{
numeric_to_vector_text(x)
}
\arguments{
\item{x}{Numeric vector.}
}
\description{
Convert a numeric vector to a string in which each number is seperated by
\code{,}.
}
| /man/numeric_to_vector_text.Rd | permissive | DavidBarke/QWUtils | R | false | true | 357 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{numeric_to_vector_text}
\alias{numeric_to_vector_text}
\title{Convert numeric to string}
\usage{
numeric_to_vector_text(x)
}
\arguments{
\item{x}{Numeric vector.}
}
\description{
Convert a numeric vector to a string in which each number is seperated by
\code{,}.
}
|
# Jake Yeung
# Date of Creation: 2019-08-16
# File: ~/projects/scchic/scripts/scripts_analysis/scrnaseq/zebrafish_marrow.R
# Zebrafish
rm(list=ls())
require(statmod)
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(irlba)
library(umap)
library(scchicFuncs)
library(Seurat)
library(hash)
library(xlsx)
# Functions ---------------------------------------------------------------
Vectorize(plog2p <- function(p){
return(ifelse(p == 0, 0, p * log2(p)))
}, vectorize.args = "p")
CalculateEntropy <- function(p, normalize.p = FALSE){
if (normalize.p){
p <- p / sum(p)
}
S <- -sum(plog2p(p))
return(S)
}
# Load -------------------------------------------------------------------
inf <- "/Users/yeung/data/scchic/public_data/Zebrafish_WKM/For_Jake/the_massive_complete_zf_dataset.csv.gz"
inf.meta <- "/Users/yeung/data/scchic/public_data/Zebrafish_WKM/For_Jake/tsne_clusterID_zebrafish_GateID_dataset.csv"
inf.meta2 <- "/Users/yeung/data/scchic/public_data/Zebrafish_WKM/For_Jake/cluster_info_JY_edited.xlsx"
dat <- fread(inf, stringsAsFactors = FALSE)
meta <- fread(inf.meta, stringsAsFactors = TRUE)
meta2 <- read.xlsx2(inf.meta2, sheetIndex = 1, header = FALSE, colClasses = c("integer", "character")); colnames(meta2) <- c("ClusterID", "celltype")
colnames(meta) <- c("rname", "V1", "V2", "ClusterID", "experi")
colnames(dat)[[1]] <- "gene"
meta <- left_join(meta, meta2)
# add celltype from excle fie
# Load GLM output ---------------------------------------------------------
# run a negbinom regression to estimate expression and remove count noise (analogous to DESeq2)
zf <- readRDS("/Users/yeung/data/scchic/public_data/Zebrafish_WKM/For_Jake/the_massive_complete_zf_dataset_PenalizedNegBinRegress.rds")
# Calculate entropy -------------------------------------------------------
# # do it on the raw counts... also try the denoised counts
# genes.keep <- gsub("-", "_", rownames(zf@assays$RNA)) # make rownames compatible with raw mat
# cells.keep <- colnames(zf@assays$RNA)
#
# genes.keep.i <- which(rownames(mat) %in% genes.keep)
# cells.keep.i <- which(colnames(mat) %in% cells.keep)
# mat.filt <- mat[sigVariedGene, cells.keep]
mat.filt <- zf@assays$RNA@counts[sigVariedGene, ]
# mat.filt <- sweep(exp(mat.filt), MARGIN = 2, STATS = colSums(exp(mat.filt)), FUN = "/")
S.vec <- apply(mat.filt, 2, function(jcell) CalculateEntropy(jcell, normalize.p = TRUE))
jmeta <- data.frame(S = S.vec)
rownames(jmeta) <- names(S.vec)
# Do umap -----------------------------------------------------------------
# add meta data
zf@meta.data$cell <- rownames(zf@meta.data)
jhash <- hash(meta$rname, meta$experi)
jhash2 <- hash(meta$rname, meta$ClusterID)
jhash3 <- hash(meta$rname, as.character(meta$celltype))
jhash.entropy <- hash(rownames(jmeta), jmeta$S)
zf@meta.data$experi <- sapply(zf@meta.data$cell, function(x){
ifelse(!is.null(jhash[[x]]), jhash[[x]], NA)
})
zf@meta.data$clusterid <- sapply(zf@meta.data$cell, function(x){
ifelse(!is.null(jhash2[[x]]), jhash2[[x]], NA)
})
zf@meta.data$celltype <- sapply(zf@meta.data$cell, function(x){
ifelse(!is.null(jhash3[[x]]), jhash3[[x]], NA)
})
zf@meta.data$entropy <- sapply(zf@meta.data$cell, function(x){
ifelse(!is.null(jhash.entropy[[x]]), jhash.entropy[[x]], NA)
})
zf <- RunPCA(zf, verbose = FALSE)
zf <- RunUMAP(zf, dims = 1:30, verbose = FALSE)
zf <- FindNeighbors(zf, dims = 1:30, verbose = FALSE)
zf <- FindClusters(zf, verbose = FALSE)
cbPalette <- c("#696969", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#C0C0C0", "#32CD32", "#D3D3D3")
DimPlot(zf, label = TRUE, group.by = "celltype", cols = cbPalette)
FeaturePlot(object = zf, features = 'entropy') + scale_color_viridis_c()
# add the cell labels
# try my own variable gene implementation
# select variable features
mat <- mat.filt
gene.mean <- rowMeans(mat)
gene.var <- apply(mat, 1, function(jrow) var(jrow))
gene.cv2 <- gene.var / gene.mean ^ 2
# plot
smoothScatter(x = log10(gene.mean), y = log10(gene.cv2), pch = 20)
abline(a = 0, b = -1)
plot(density(log10(Matrix::rowSums(mat))))
plot(density(log10(Matrix::colSums(mat))))
minMeanForFit <- 10^-3
useForFit <- gene.mean >= minMeanForFit # & spikeins
print(length(which(useForFit)))
smoothScatter(x = log10(gene.mean), y = log10(gene.cv2), pch = 20)
points(x = log10(gene.mean[useForFit]), y = log10(gene.cv2[useForFit]), pch = 20, col = 'red')
fit <- glmgam.fit( cbind( a0 = 1, a1tilde = 1/gene.mean[useForFit] ), gene.cv2[useForFit] )
a0 <- unname( fit$coefficients["a0"] )
a1 <- unname( fit$coefficients["a1tilde"])
fit$coefficients
xg <- 10^(seq( min(log10(gene.mean)), max(log10(gene.mean)), length.out=1000 ))
vfit <- a1/xg + a0
smoothScatter(x = log10(gene.mean), y = log10(gene.cv2), pch = 20)
lines( log10(xg), log10(vfit), col="black", lwd=3 )
# add CI
# dof <- nrow(mat) - 1
# dof <-
dof <- ncol(mat) - 1
lines(log10(xg),log10(vfit * qchisq(0.95,dof)/dof),lty=1,col="black")
lines(log10(xg),log10(vfit * qchisq(0.05,dof)/dof),lty=1,col="black")
afit <- a1/gene.mean+a0
varFitRatio <- gene.var/(afit*gene.mean^2)
pval <- pchisq(varFitRatio*dof,df=dof,lower.tail=F)
adj.pval <- p.adjust(pval,"fdr")
sigVariedGenes <- adj.pval<1e-3;
table(sigVariedGenes)
#
#
# # Calculate entropy -------------------------------------------------------
#
#
#
#
# zf <- CreateSeuratObject(counts = mat[, cells.keep], project = "zf", min.cells = 3, min.features = 200, meta.data = jmeta)
# # zf <- CreateSeuratObject(counts = mat, project = "zf", min.cells = 3, min.features = 200, meta.data = jmeta)
#
# # zf@assays$RNA@var.features <- names(which(sigVariedGenes))
# VariableFeatures(zf) <- sigVariedGenes
#
# # zf <- FindVariableFeatures(zf, selection.method = "vst", nfeatures = 2000)
#
# # Identify the 10 most highly variable genes
# top10 <- head(VariableFeatures(zf), 10)
#
# # plot variable features with and without labels
# # plot1 <- VariableFeaturePlot(zf)
# # plot2 <- LabelPoints(plot = plot1, points = top10, repel = TRUE)
# # CombinePlots(plots = list(plot1, plot2))
#
# # add labels?
# zf@meta.data$cell <- rownames(zf@meta.data)
# jhash <- hash(meta$rname, meta$experi)
# jhash2 <- hash(meta$rname, meta$ClusterID)
# jhash3 <- hash(meta$rname, as.character(meta$celltype))
# zf@meta.data$experi <- sapply(zf@meta.data$cell, function(x){
# ifelse(!is.null(jhash[[x]]), jhash[[x]], NA)
# })
# zf@meta.data$clusterid <- sapply(zf@meta.data$cell, function(x){
# ifelse(!is.null(jhash2[[x]]), jhash2[[x]], NA)
# })
# zf@meta.data$celltype <- sapply(zf@meta.data$cell, function(x){
# ifelse(!is.null(jhash3[[x]]), jhash3[[x]], NA)
# })
#
# all.genes <- rownames(zf)
# zf <- ScaleData(zf, features = all.genes)
# zf <- RunPCA(zf, features = VariableFeatures(object = zf))
# zf <- RunUMAP(zf, dims = 1:10)
# DimPlot(zf, reduction = "umap", label = FALSE, group.by = "clusterid", na.value = "#C0C0C0") + scale_color_viridis_d()
#
# # label by entropy
# # DimPlot(zf, reduction = "umap", label = FALSE, group.by = "clusterid", na.value = "#C0C0C0") + scale_color_viridis_d()
#
# cbPalette <- c("#696969", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#C0C0C0", "#32CD32", "#D3D3D3")
# DimPlot(zf, reduction = "umap", label = FALSE, group.by = "celltype", na.value = "#C0C0C0", cols = cbPalette)
#
# FeaturePlot(object = zf, features = 'S') + scale_color_viridis_c()
# cbPalette <- c("#696969", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#C0C0C0", "#32CD32", "D3D3D3")
# lsi.out <- scchicFuncs::RunLSI(t(as.matrix(mat[sigVariedGenes, cells.keep])))
#
# jsettings <- umap.defaults
# jsettings$n_neighbors <- 30
# jsettings$min_dist <- 0.15
# umap.out <- umap(as.matrix(lsi.out$v), config = jsettings)
# dat.umap.out <- data.frame(cell = rownames(umap.out$layout), umap1 = umap.out$layout[, 1], umap2 = umap.out$layout[, 2])
#
# ggplot(dat.umap.out, aes(x = umap1, y = umap2)) + geom_point() +
# theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
# add labels
# zf <- CreateSeuratObject(counts = mat[, cells.keep])
# zf <- SCTransform(zf, verbose=TRUE)
# pbmc <- RunUMAP(pbmc, dims = 1:30, verbose = FALSE)
| /scripts/scripts_analysis/scrnaseq/zebrafish_marrow_entropy.R | no_license | jakeyeung/sortchicAllScripts | R | false | false | 8,313 | r | # Jake Yeung
# Date of Creation: 2019-08-16
# File: ~/projects/scchic/scripts/scripts_analysis/scrnaseq/zebrafish_marrow.R
# Zebrafish
rm(list=ls())
require(statmod)
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(irlba)
library(umap)
library(scchicFuncs)
library(Seurat)
library(hash)
library(xlsx)
# Functions ---------------------------------------------------------------
Vectorize(plog2p <- function(p){
return(ifelse(p == 0, 0, p * log2(p)))
}, vectorize.args = "p")
CalculateEntropy <- function(p, normalize.p = FALSE){
if (normalize.p){
p <- p / sum(p)
}
S <- -sum(plog2p(p))
return(S)
}
# Load -------------------------------------------------------------------
inf <- "/Users/yeung/data/scchic/public_data/Zebrafish_WKM/For_Jake/the_massive_complete_zf_dataset.csv.gz"
inf.meta <- "/Users/yeung/data/scchic/public_data/Zebrafish_WKM/For_Jake/tsne_clusterID_zebrafish_GateID_dataset.csv"
inf.meta2 <- "/Users/yeung/data/scchic/public_data/Zebrafish_WKM/For_Jake/cluster_info_JY_edited.xlsx"
dat <- fread(inf, stringsAsFactors = FALSE)
meta <- fread(inf.meta, stringsAsFactors = TRUE)
meta2 <- read.xlsx2(inf.meta2, sheetIndex = 1, header = FALSE, colClasses = c("integer", "character")); colnames(meta2) <- c("ClusterID", "celltype")
colnames(meta) <- c("rname", "V1", "V2", "ClusterID", "experi")
colnames(dat)[[1]] <- "gene"
meta <- left_join(meta, meta2)
# add celltype from excle fie
# Load GLM output ---------------------------------------------------------
# run a negbinom regression to estimate expression and remove count noise (analogous to DESeq2)
zf <- readRDS("/Users/yeung/data/scchic/public_data/Zebrafish_WKM/For_Jake/the_massive_complete_zf_dataset_PenalizedNegBinRegress.rds")
# Calculate entropy -------------------------------------------------------
# # do it on the raw counts... also try the denoised counts
# genes.keep <- gsub("-", "_", rownames(zf@assays$RNA)) # make rownames compatible with raw mat
# cells.keep <- colnames(zf@assays$RNA)
#
# genes.keep.i <- which(rownames(mat) %in% genes.keep)
# cells.keep.i <- which(colnames(mat) %in% cells.keep)
# mat.filt <- mat[sigVariedGene, cells.keep]
mat.filt <- zf@assays$RNA@counts[sigVariedGene, ]
# mat.filt <- sweep(exp(mat.filt), MARGIN = 2, STATS = colSums(exp(mat.filt)), FUN = "/")
S.vec <- apply(mat.filt, 2, function(jcell) CalculateEntropy(jcell, normalize.p = TRUE))
jmeta <- data.frame(S = S.vec)
rownames(jmeta) <- names(S.vec)
# Do umap -----------------------------------------------------------------
# add meta data
zf@meta.data$cell <- rownames(zf@meta.data)
jhash <- hash(meta$rname, meta$experi)
jhash2 <- hash(meta$rname, meta$ClusterID)
jhash3 <- hash(meta$rname, as.character(meta$celltype))
jhash.entropy <- hash(rownames(jmeta), jmeta$S)
zf@meta.data$experi <- sapply(zf@meta.data$cell, function(x){
ifelse(!is.null(jhash[[x]]), jhash[[x]], NA)
})
zf@meta.data$clusterid <- sapply(zf@meta.data$cell, function(x){
ifelse(!is.null(jhash2[[x]]), jhash2[[x]], NA)
})
zf@meta.data$celltype <- sapply(zf@meta.data$cell, function(x){
ifelse(!is.null(jhash3[[x]]), jhash3[[x]], NA)
})
zf@meta.data$entropy <- sapply(zf@meta.data$cell, function(x){
ifelse(!is.null(jhash.entropy[[x]]), jhash.entropy[[x]], NA)
})
zf <- RunPCA(zf, verbose = FALSE)
zf <- RunUMAP(zf, dims = 1:30, verbose = FALSE)
zf <- FindNeighbors(zf, dims = 1:30, verbose = FALSE)
zf <- FindClusters(zf, verbose = FALSE)
cbPalette <- c("#696969", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#C0C0C0", "#32CD32", "#D3D3D3")
DimPlot(zf, label = TRUE, group.by = "celltype", cols = cbPalette)
FeaturePlot(object = zf, features = 'entropy') + scale_color_viridis_c()
# add the cell labels
# try my own variable gene implementation
# select variable features
mat <- mat.filt
gene.mean <- rowMeans(mat)
gene.var <- apply(mat, 1, function(jrow) var(jrow))
gene.cv2 <- gene.var / gene.mean ^ 2
# plot
smoothScatter(x = log10(gene.mean), y = log10(gene.cv2), pch = 20)
abline(a = 0, b = -1)
plot(density(log10(Matrix::rowSums(mat))))
plot(density(log10(Matrix::colSums(mat))))
minMeanForFit <- 10^-3
useForFit <- gene.mean >= minMeanForFit # & spikeins
print(length(which(useForFit)))
smoothScatter(x = log10(gene.mean), y = log10(gene.cv2), pch = 20)
points(x = log10(gene.mean[useForFit]), y = log10(gene.cv2[useForFit]), pch = 20, col = 'red')
fit <- glmgam.fit( cbind( a0 = 1, a1tilde = 1/gene.mean[useForFit] ), gene.cv2[useForFit] )
a0 <- unname( fit$coefficients["a0"] )
a1 <- unname( fit$coefficients["a1tilde"])
fit$coefficients
xg <- 10^(seq( min(log10(gene.mean)), max(log10(gene.mean)), length.out=1000 ))
vfit <- a1/xg + a0
smoothScatter(x = log10(gene.mean), y = log10(gene.cv2), pch = 20)
lines( log10(xg), log10(vfit), col="black", lwd=3 )
# add CI
# dof <- nrow(mat) - 1
# dof <-
dof <- ncol(mat) - 1
lines(log10(xg),log10(vfit * qchisq(0.95,dof)/dof),lty=1,col="black")
lines(log10(xg),log10(vfit * qchisq(0.05,dof)/dof),lty=1,col="black")
afit <- a1/gene.mean+a0
varFitRatio <- gene.var/(afit*gene.mean^2)
pval <- pchisq(varFitRatio*dof,df=dof,lower.tail=F)
adj.pval <- p.adjust(pval,"fdr")
sigVariedGenes <- adj.pval<1e-3;
table(sigVariedGenes)
#
#
# # Calculate entropy -------------------------------------------------------
#
#
#
#
# zf <- CreateSeuratObject(counts = mat[, cells.keep], project = "zf", min.cells = 3, min.features = 200, meta.data = jmeta)
# # zf <- CreateSeuratObject(counts = mat, project = "zf", min.cells = 3, min.features = 200, meta.data = jmeta)
#
# # zf@assays$RNA@var.features <- names(which(sigVariedGenes))
# VariableFeatures(zf) <- sigVariedGenes
#
# # zf <- FindVariableFeatures(zf, selection.method = "vst", nfeatures = 2000)
#
# # Identify the 10 most highly variable genes
# top10 <- head(VariableFeatures(zf), 10)
#
# # plot variable features with and without labels
# # plot1 <- VariableFeaturePlot(zf)
# # plot2 <- LabelPoints(plot = plot1, points = top10, repel = TRUE)
# # CombinePlots(plots = list(plot1, plot2))
#
# # add labels?
# zf@meta.data$cell <- rownames(zf@meta.data)
# jhash <- hash(meta$rname, meta$experi)
# jhash2 <- hash(meta$rname, meta$ClusterID)
# jhash3 <- hash(meta$rname, as.character(meta$celltype))
# zf@meta.data$experi <- sapply(zf@meta.data$cell, function(x){
# ifelse(!is.null(jhash[[x]]), jhash[[x]], NA)
# })
# zf@meta.data$clusterid <- sapply(zf@meta.data$cell, function(x){
# ifelse(!is.null(jhash2[[x]]), jhash2[[x]], NA)
# })
# zf@meta.data$celltype <- sapply(zf@meta.data$cell, function(x){
# ifelse(!is.null(jhash3[[x]]), jhash3[[x]], NA)
# })
#
# all.genes <- rownames(zf)
# zf <- ScaleData(zf, features = all.genes)
# zf <- RunPCA(zf, features = VariableFeatures(object = zf))
# zf <- RunUMAP(zf, dims = 1:10)
# DimPlot(zf, reduction = "umap", label = FALSE, group.by = "clusterid", na.value = "#C0C0C0") + scale_color_viridis_d()
#
# # label by entropy
# # DimPlot(zf, reduction = "umap", label = FALSE, group.by = "clusterid", na.value = "#C0C0C0") + scale_color_viridis_d()
#
# cbPalette <- c("#696969", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#C0C0C0", "#32CD32", "#D3D3D3")
# DimPlot(zf, reduction = "umap", label = FALSE, group.by = "celltype", na.value = "#C0C0C0", cols = cbPalette)
#
# FeaturePlot(object = zf, features = 'S') + scale_color_viridis_c()
# cbPalette <- c("#696969", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#C0C0C0", "#32CD32", "D3D3D3")
# lsi.out <- scchicFuncs::RunLSI(t(as.matrix(mat[sigVariedGenes, cells.keep])))
#
# jsettings <- umap.defaults
# jsettings$n_neighbors <- 30
# jsettings$min_dist <- 0.15
# umap.out <- umap(as.matrix(lsi.out$v), config = jsettings)
# dat.umap.out <- data.frame(cell = rownames(umap.out$layout), umap1 = umap.out$layout[, 1], umap2 = umap.out$layout[, 2])
#
# ggplot(dat.umap.out, aes(x = umap1, y = umap2)) + geom_point() +
# theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
# add labels
# zf <- CreateSeuratObject(counts = mat[, cells.keep])
# zf <- SCTransform(zf, verbose=TRUE)
# pbmc <- RunUMAP(pbmc, dims = 1:30, verbose = FALSE)
|
#!/usr/bin/env Rscript
#scriptdir = getSrcDirectory(function(x) {x})
scriptdir <- dirname(sys.frame(1)$ofile)
message("ScriptDir: ", scriptdir)
main = function() {
suppressPackageStartupMessages(library("argparse"))
parser = ArgumentParser()
parser$add_argument("--numA", help="numA", type="integer", required=TRUE, nargs=1)
parser$add_argument("--numB", help="numB", type="integer", required=TRUE, nargs=1)
args = parser$parse_args()
x = addme(11,20)
message("11 + 20 = ", x)
message(args$numA, "+", args$numB, "=", addme(args$numA, args$numB))
}
addme = function(a, b) {
return(a+b);
}
test.addme = function() {
checkEquals(addme(1,2), 3)
checkEquals(addme(4,5), 9)
}
if (interactive()) {
main()
}
| /R_test_fun/example.funcs.R | no_license | brianjohnhaas/misc | R | false | false | 778 | r | #!/usr/bin/env Rscript
#scriptdir = getSrcDirectory(function(x) {x})
scriptdir <- dirname(sys.frame(1)$ofile)
message("ScriptDir: ", scriptdir)
main = function() {
suppressPackageStartupMessages(library("argparse"))
parser = ArgumentParser()
parser$add_argument("--numA", help="numA", type="integer", required=TRUE, nargs=1)
parser$add_argument("--numB", help="numB", type="integer", required=TRUE, nargs=1)
args = parser$parse_args()
x = addme(11,20)
message("11 + 20 = ", x)
message(args$numA, "+", args$numB, "=", addme(args$numA, args$numB))
}
addme = function(a, b) {
return(a+b);
}
test.addme = function() {
checkEquals(addme(1,2), 3)
checkEquals(addme(4,5), 9)
}
if (interactive()) {
main()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_gene_info.R
\name{get_previous_gene}
\alias{get_previous_gene}
\title{Get previous genes bnumbers for a given vector of genes, with respect to strand orientation}
\usage{
get_previous_gene(list_genes)
}
\arguments{
\item{list_genes}{A character vector of gene names, bnumbers, symbols}
}
\value{
A vector of same size as `bnumbers
}
\description{
Get previous genes bnumbers for a given vector of genes, with respect to strand orientation
}
| /man/get_previous_gene.Rd | no_license | PGC-CCG/EcoliGenes | R | false | true | 523 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_gene_info.R
\name{get_previous_gene}
\alias{get_previous_gene}
\title{Get previous genes bnumbers for a given vector of genes, with respect to strand orientation}
\usage{
get_previous_gene(list_genes)
}
\arguments{
\item{list_genes}{A character vector of gene names, bnumbers, symbols}
}
\value{
A vector of same size as `bnumbers
}
\description{
Get previous genes bnumbers for a given vector of genes, with respect to strand orientation
}
|
#clean console
cat(c("\014"))
#google trends on methodologies: change filetoRead with your working directory
#file.to.read <- "D:/workspaceR/prove/hall-of-fame-methodologies/resources/week-search-about-software-development-methodologies.csv"
file.to.read <- "C:/repo-progetti-R/Hall-of-Fame-methodologies/input/week-search-about-software-development-methodologies.csv"
#file.to.read <- "../input/week-search-about-software-development-methodologies.csv"
raw.week.data <- read.table(header = TRUE, sep = ';', file = file.to.read)
#add ID column to dataframe
raw.week.data$ID <- seq.int(nrow(raw.week.data))
| /R/1-load-data.R | permissive | alepuzio/Hall-of-Fame-methodologies | R | false | false | 608 | r | #clean console
cat(c("\014"))
#google trends on methodologies: change filetoRead with your working directory
#file.to.read <- "D:/workspaceR/prove/hall-of-fame-methodologies/resources/week-search-about-software-development-methodologies.csv"
file.to.read <- "C:/repo-progetti-R/Hall-of-Fame-methodologies/input/week-search-about-software-development-methodologies.csv"
#file.to.read <- "../input/week-search-about-software-development-methodologies.csv"
raw.week.data <- read.table(header = TRUE, sep = ';', file = file.to.read)
#add ID column to dataframe
raw.week.data$ID <- seq.int(nrow(raw.week.data))
|
rankall <- function(outcome, num="best"){
data <- read.csv("outcome-of-care-measures.csv", stringsAsFactors = FALSE, na.strings = "Not Available")
## Read outcome data
relevant <- data[,c(2, 7, 11, 17, 23)]
names(relevant) <- c("hospital", "state", "heart attack", "heart failure", "pneumonia")
outcomes <- relevant[, c(3, 4, 5)]
names(outcomes) <- c("heart attack", "heart failure", "pneumonia")
if(!(outcome %in% names(outcomes)))
stop("Invalid Outcome")
col_outcome <- which(names(outcomes) == outcome) +2 #heart_attack=first of names(outcomes) and third of all col (data)
relevant <- relevant[,c(1, 2, col_outcome)]
relevant <- na.omit(relevant)
names(relevant) <- c("hospital", "state", "outcomek")
result <- arrange(relevant, state, outcomek, hospital)
result <- split(result, result$state)
helper <- function(dat){
if(num == "best")
r <- dat[1, "hospital"]
if(num == "worst")
r <- dat[nrow(dat), "hospital"]
else
r <- dat[num, "hospital"]
}
result <- sapply(result, helper, USE.NAMES = TRUE)
result
data.frame(result, state=names(result), row.names=names(result))
}
| /rankall.R | no_license | ArunimaSethi/datasciencecoursera | R | false | false | 1,135 | r | rankall <- function(outcome, num="best"){
data <- read.csv("outcome-of-care-measures.csv", stringsAsFactors = FALSE, na.strings = "Not Available")
## Read outcome data
relevant <- data[,c(2, 7, 11, 17, 23)]
names(relevant) <- c("hospital", "state", "heart attack", "heart failure", "pneumonia")
outcomes <- relevant[, c(3, 4, 5)]
names(outcomes) <- c("heart attack", "heart failure", "pneumonia")
if(!(outcome %in% names(outcomes)))
stop("Invalid Outcome")
col_outcome <- which(names(outcomes) == outcome) +2 #heart_attack=first of names(outcomes) and third of all col (data)
relevant <- relevant[,c(1, 2, col_outcome)]
relevant <- na.omit(relevant)
names(relevant) <- c("hospital", "state", "outcomek")
result <- arrange(relevant, state, outcomek, hospital)
result <- split(result, result$state)
helper <- function(dat){
if(num == "best")
r <- dat[1, "hospital"]
if(num == "worst")
r <- dat[nrow(dat), "hospital"]
else
r <- dat[num, "hospital"]
}
result <- sapply(result, helper, USE.NAMES = TRUE)
result
data.frame(result, state=names(result), row.names=names(result))
}
|
no_function()
library(tidyverse)
masstools::setwd_project()
rm(list = ls())
source("code/tools.R")
source("code/modified_dtw.R")
source("code/lagged_correlation.R")
####load data
###CGM
load("data/24_7_study/cgm/data_preparation/sample_info")
load("data/24_7_study/cgm/data_preparation/variable_info")
load("data/24_7_study/cgm/data_preparation/expression_data")
cgm_expression_data = expression_data
cgm_sample_info = sample_info
cgm_variable_info = variable_info
###total_protein
load("data/24_7_study/total_protein/data_preparation/sample_info")
load("data/24_7_study/total_protein/data_preparation/variable_info")
load("data/24_7_study/total_protein/data_preparation/expression_data")
total_protein_sample_info = sample_info
total_protein_variable_info = variable_info
total_protein_expression_data = expression_data
load("data/24_7_study/summary_info/day_night_df")
####this is for the day night time
day_night_df =
day_night_df %>%
dplyr::mutate(
start_time = as.POSIXct(hms::as_hms(start)),
end_time = as.POSIXct(hms::as_hms(end)),
week = format(day, "%a")
) %>%
dplyr::mutate(week = paste(
week,
lubridate::month(day),
lubridate::day(day),
sep = "-"
)) %>%
dplyr::mutate(week = factor(week, unique(week)))
######cgm vs total_protein
dir.create("data/24_7_study/wearable_omics_correlation/cgm_omics_correlation/cgm_total_protein")
setwd("data/24_7_study/wearable_omics_correlation/cgm_omics_correlation/cgm_total_protein")
#####---------------------------------------------------------------------------
#####---------------------------------------------------------------------------
###correlation between cgm and total_proteins
#global correlation
dir.create("lagged_correlation")
lagged_cor = rep(NA, nrow(total_protein_expression_data))
global_cor = rep(NA, nrow(total_protein_expression_data))
lagged_result = vector(mode = "list", length = nrow(total_protein_expression_data))
# for(i in 1:nrow(total_protein_expression_data)){
# cat(i, " ")
# x = as.numeric(total_protein_expression_data[i, ])
# time1 = total_protein_sample_info$accurate_time
# y = as.numeric(cgm_expression_data[1, ])
# time2 = cgm_sample_info$accurate_time
#
# result = lagged_correlation(
# x = x,
# y = y,
# time1 = time1,
# time2 = time2,
# time_tol = 60/60,
# step = 5/60
# )
# lagged_result[[i]] = result
# }
#
# names(lagged_result) = rownames(total_protein_expression_data)
# save(lagged_result, file = "lagged_correlation/lagged_result")
load("lagged_correlation/lagged_result")
lagged_cor =
lagged_result %>%
purrr::map(function(x){
x$max_cor
}) %>%
unlist()
global_cor =
lagged_result %>%
purrr::map(function(x){
x$global_cor
}) %>%
unlist()
shift_time =
lagged_result %>%
purrr::map(function(x){
x$shift_time[x$which_max_idx] %>%
stringr::str_replace("\\(", "") %>%
stringr::str_replace("\\]", "") %>%
stringr::str_split(",") %>%
`[[`(1) %>%
as.numeric() %>%
mean()
}) %>%
unlist()
names(lagged_cor) = names(global_cor) =
total_protein_variable_info$variable_id
cor_data =
data.frame(wearable = "CGM",
total_protein_variable_info,
global_cor = global_cor,
lagged_cor = lagged_cor,
shift_time = shift_time)
p_value =
cor_data %>%
t() %>%
as.data.frame() %>%
purrr::map(function(x){
# cat(x[2], " ")
x[!is.na(x)] = stringr::str_trim(x[!is.na(x)], side = "both")
result = lagged_result[[x[2]]]
###lagged correlation p value
x_value = result$x
y_value = result$y
y_value =
result$max_idx %>%
purrr::map(function(idx){
mean(y_value[idx])
}) %>%
unlist()
x_value = x_value[!is.na(y_value)]
y_value = y_value[!is.na(y_value)]
lagged_cor_p =
cor.test(x = x_value, y = y_value, method = "pearson")$p.value
###global correlation p value
x_value = result$x
y_value = result$y
y_value =
result$global_idx %>%
purrr::map(function(idx){
mean(y_value[idx])
}) %>%
unlist()
x_value = x_value[!is.na(y_value)]
y_value = y_value[!is.na(y_value)]
global_cor_p =
cor.test(x = x_value, y = y_value, method = "pearson")$p.value
c(global_cor_p = global_cor_p,
lagged_cor_p = lagged_cor_p)
}) %>%
do.call(rbind, .) %>%
as.data.frame()
cor_data =
data.frame(cor_data, p_value)
cor_data$global_cor_p_adjust = p.adjust(cor_data$global_cor_p, method = "BH")
cor_data$lagged_cor_p_adjust = p.adjust(cor_data$lagged_cor_p, method = "BH")
library(openxlsx)
# wb <- createWorkbook()
# modifyBaseFont(wb, fontSize = 12, fontName = "Time New Roma")
# addWorksheet(wb, sheetName = "CGM total_protein global cor",
# gridLines = TRUE)
# freezePane(wb, sheet = 1, firstRow = TRUE, firstCol = TRUE)
# writeDataTable(wb, sheet = 1, x = cor_data,
# colNames = TRUE, rowNames = TRUE)
# saveWorkbook(wb, "lagged_correlation/cor_data.xlsx", overwrite = TRUE)
##output the top 10 negative and top 100 positive
pos_top_10 =
cor_data %>%
dplyr::arrange(lagged_cor) %>%
dplyr::filter(lagged_cor > 0) %>%
tail(10)
neg_top_10 =
cor_data %>%
dplyr::arrange(lagged_cor) %>%
dplyr::filter(lagged_cor < 0) %>%
head(10)
dir.create("cor_plot")
temp =
rbind(neg_top_10,
pos_top_10)
# for (i in 1:nrow(temp)) {
# cat(i, " ")
# plot1 =
# lagged_alignment_plot(object = lagged_result[[temp$variable_id[i]]],
# day_night_df = day_night_df,
# internal_omics_color = class_color["total_protein"],
# wearable_color = wearable_color["cgm"],
# internal_omics_name = temp$mol_name[i],
# warable_name = "CGM",
# which = "max",
# x_limit = c(1,1000),
# non_matched_point_size = 0.1,
# wearable_point_size = 0.5,
# internal_omics_point_size = 2,
# integrated = FALSE)
#
# plot2 =
# lagged_alignment_plot(object = lagged_result[[temp$variable_id[i]]],
# day_night_df = day_night_df,
# internal_omics_color = class_color["total_protein"],
# wearable_color = wearable_color["cgm"],
# internal_omics_name = temp$mol_name[i],
# warable_name = "CGM",
# which = "max",
# x_limit = c(1,10),
# non_matched_point_size = 0.1,
# wearable_point_size = 0.5,
# internal_omics_point_size = 2,
# integrated = FALSE)
#
# plot3 =
# lagged_alignment_plot(object = lagged_result[[temp$variable_id[i]]],
# day_night_df = day_night_df,
# internal_omics_color = class_color["total_protein"],
# wearable_color = wearable_color["cgm"],
# internal_omics_name = temp$mol_name[i],
# warable_name = "CGM",
# which = "max",
# x_limit = c(1,1000),
# non_matched_point_size = 0.1,
# wearable_point_size = 0.5,
# internal_omics_point_size = 2,
# integrated = TRUE)
#
#
# plot4 =
# lagged_alignment_plot(object = lagged_result[[temp$variable_id[i]]],
# day_night_df = day_night_df,
# internal_omics_color = class_color["total_protein"],
# wearable_color = wearable_color["cgm"],
# internal_omics_name = temp$mol_name[i],
# warable_name = "CGM",
# which = "max",
# x_limit = c(1,30),
# non_matched_point_size = 3,
# wearable_point_size = 3,
# internal_omics_point_size = 3,
# integrated = TRUE)
#
# name = paste("CGM vs",temp$mol_name[i])
# ggsave(plot1,
# filename = file.path("cor_plot", paste(name, "plot1.pdf", sep = "")),
# width = 20, height = 7)
#
# ggsave(plot2,
# filename = file.path("cor_plot", paste(name, "plot2.pdf", sep = "")),
# width = 20, height = 7)
#
# ggsave(plot3,
# filename = file.path("cor_plot", paste(name, "plot3.pdf", sep = "")),
# width = 20, height = 7)
#
# ggsave(plot4,
# filename = file.path("cor_plot", paste(name, "plot4.pdf", sep = "")),
# width = 20, height = 7)
# }
cor_data %>%
ggplot(aes(global_cor, lagged_cor)) +
geom_point()
plot =
cor_data %>%
dplyr::mutate(direction = case_when(
shift_time > 0 ~ "After",
shift_time < 0 ~ "Before",
shift_time == 0 ~ "Synchronization"
)) %>%
ggplot(aes(global_cor, lagged_cor)) +
geom_hline(yintercept = 0) +
geom_vline(xintercept = 0) +
geom_abline(slope = 1, intercept = 0) +
geom_point(aes(fill = direction),
shape = 21, size = 5) +
labs(x = "Global Pearson correlation",
y = "Lagged correlation") +
scale_fill_manual(values = c(
"After" = ggsci::pal_aaas()(n = 10)[1],
"Before" = ggsci::pal_aaas()(n = 10)[2],
"Synchronization" = "grey"
)) +
shadowtext::geom_shadowtext(aes(label = ifelse(lagged_cor > quantile(cor_data$lagged_cor[cor_data$lagged_cor > 0], 0.75) |
lagged_cor < quantile(cor_data$lagged_cor[cor_data$lagged_cor < 0], 0.25),
mol_name, NA)),
check_overlap = TRUE,
bg.colour='white',
color = "black") +
base_theme
plot
# ggsave(plot, filename = "global_lagged_correlation.pdf", width = 9, height = 7)
plot =
cor_data %>%
dplyr::mutate(direction = case_when(
shift_time > 0 ~ "After",
shift_time < 0 ~ "Before",
shift_time == 0 ~ "Synchronization"
)) %>%
ggplot(aes(shift_time, lagged_cor)) +
geom_hline(yintercept = 0) +
geom_vline(xintercept = 0) +
geom_abline(slope = 1, intercept = 0) +
geom_point(aes(fill = direction,
size = abs(lagged_cor)),
shape = 21,
show.legend = TRUE) +
labs(x = "Shift time",
y = "Lagged correlation") +
scale_fill_manual(values = c(
"After" = ggsci::pal_aaas()(n = 10)[1],
"Before" = ggsci::pal_aaas()(n = 10)[2],
"Synchronization" = "grey"
)) +
ggrepel::geom_text_repel(aes(label = ifelse(lagged_cor > quantile(cor_data$lagged_cor[cor_data$lagged_cor > 0], 0.75) |
lagged_cor < quantile(cor_data$lagged_cor[cor_data$lagged_cor < 0], 0.25),
mol_name, NA))) +
base_theme
plot
# ggsave(plot, filename = "shift_lagged_correlation.pdf", width = 9, height = 7)
####output the shift time vs lagged cor plot for the important proteins
important_total_protein =
cor_data
# dplyr::filter(lagged_cor > quantile(cor_data$lagged_cor[cor_data$lagged_cor >= 0], 0.75) |
# lagged_cor < quantile(cor_data$lagged_cor[cor_data$lagged_cor <= 0], 0.25))
dir.create("shift_time_vs_cor")
# for(i in 1:nrow(important_total_protein)){
# cat(i, "")
# result = lagged_result[[important_total_protein$variable_id[i]]]
#
# temp_data =
# result[c("shift_time", "all_cor")] %>%
# do.call(cbind, .) %>%
# as.data.frame() %>%
# dplyr::mutate(shift_time = stringr::str_replace(shift_time, "\\(", "")) %>%
# dplyr::mutate(shift_time = stringr::str_replace(shift_time, "\\]", "")) %>%
# dplyr::mutate(shift_time = stringr::str_split(shift_time, ",")) %>%
# dplyr::mutate(all_cor = round(as.numeric(all_cor), 4))
#
# temp_data$shift_time =
# temp_data$shift_time %>%
# purrr::map(function(x){
# mean(as.numeric(x))
# }) %>%
# unlist()
#
# plot =
# temp_data %>%
# ggplot(aes(x = shift_time, y = all_cor)) +
# geom_vline(xintercept = temp_data$shift_time[result$which_max_idx],
# color = "red") +
# geom_hline(yintercept = 0) +
# annotate(geom = "text",
# x = temp_data$shift_time[result$which_max_idx],
# y = result$max_cor,
# label = round(result$max_cor, 4)) +
# annotate(geom = "text",
# x = temp_data$shift_time[result$which_global_idx],
# y = result$global_cor,
# label = round(result$global_cor, 4)) +
# geom_point() +
# geom_line(aes(group = 1)) +
# base_theme +
# labs(x = "Shift time (Omics - CGM, min)",
# y = "Pearsom correlation") +
# theme()
#
# name =
# proteomics_variable_info$mol_name[match(names(lagged_result)[i], proteomics_variable_info$variable_id)]
#
# ggsave(
# plot,
# file = file.path("shift_time_vs_cor", paste(name, ".pdf", sep = "")),
# width = 8,
# height = 7
# )
# }
##Pathway analysis for the negative or positive correlation analysis
##because the correlation is too small, so here we just use the cutoff from
##0.75
##Pathway analysis for the negative or positive correlation analysis
library(clusterProfiler)
library(org.Hs.eg.db)
dir.create("pathway_enrichment")
important_total_protein =
rbind(
cor_data %>%
dplyr::filter(lagged_cor > 0) %>%
# dplyr::filter(lagged_cor > quantile(lagged_cor, 0.75)) %>%
dplyr::mutate(class1 = "positive correlation"),
cor_data %>%
dplyr::filter(lagged_cor < 0) %>%
# dplyr::filter(lagged_cor < quantile(lagged_cor, 0.25)) %>%
dplyr::mutate(class1 = "negative correlation")
)
save(important_total_protein, file = "important_total_protein")
| /code/24_7_study/wearable_omics_correlation/CGM/cgm_total_protein_correlation.R | permissive | jaspershen/microsampling_multiomics | R | false | false | 14,148 | r | no_function()
library(tidyverse)
masstools::setwd_project()
rm(list = ls())
source("code/tools.R")
source("code/modified_dtw.R")
source("code/lagged_correlation.R")
####load data
###CGM
load("data/24_7_study/cgm/data_preparation/sample_info")
load("data/24_7_study/cgm/data_preparation/variable_info")
load("data/24_7_study/cgm/data_preparation/expression_data")
cgm_expression_data = expression_data
cgm_sample_info = sample_info
cgm_variable_info = variable_info
###total_protein
load("data/24_7_study/total_protein/data_preparation/sample_info")
load("data/24_7_study/total_protein/data_preparation/variable_info")
load("data/24_7_study/total_protein/data_preparation/expression_data")
total_protein_sample_info = sample_info
total_protein_variable_info = variable_info
total_protein_expression_data = expression_data
load("data/24_7_study/summary_info/day_night_df")
####this is for the day night time
day_night_df =
day_night_df %>%
dplyr::mutate(
start_time = as.POSIXct(hms::as_hms(start)),
end_time = as.POSIXct(hms::as_hms(end)),
week = format(day, "%a")
) %>%
dplyr::mutate(week = paste(
week,
lubridate::month(day),
lubridate::day(day),
sep = "-"
)) %>%
dplyr::mutate(week = factor(week, unique(week)))
######cgm vs total_protein
dir.create("data/24_7_study/wearable_omics_correlation/cgm_omics_correlation/cgm_total_protein")
setwd("data/24_7_study/wearable_omics_correlation/cgm_omics_correlation/cgm_total_protein")
#####---------------------------------------------------------------------------
#####---------------------------------------------------------------------------
###correlation between cgm and total_proteins
#global correlation
dir.create("lagged_correlation")
lagged_cor = rep(NA, nrow(total_protein_expression_data))
global_cor = rep(NA, nrow(total_protein_expression_data))
lagged_result = vector(mode = "list", length = nrow(total_protein_expression_data))
# for(i in 1:nrow(total_protein_expression_data)){
# cat(i, " ")
# x = as.numeric(total_protein_expression_data[i, ])
# time1 = total_protein_sample_info$accurate_time
# y = as.numeric(cgm_expression_data[1, ])
# time2 = cgm_sample_info$accurate_time
#
# result = lagged_correlation(
# x = x,
# y = y,
# time1 = time1,
# time2 = time2,
# time_tol = 60/60,
# step = 5/60
# )
# lagged_result[[i]] = result
# }
#
# names(lagged_result) = rownames(total_protein_expression_data)
# save(lagged_result, file = "lagged_correlation/lagged_result")
load("lagged_correlation/lagged_result")
lagged_cor =
lagged_result %>%
purrr::map(function(x){
x$max_cor
}) %>%
unlist()
global_cor =
lagged_result %>%
purrr::map(function(x){
x$global_cor
}) %>%
unlist()
shift_time =
lagged_result %>%
purrr::map(function(x){
x$shift_time[x$which_max_idx] %>%
stringr::str_replace("\\(", "") %>%
stringr::str_replace("\\]", "") %>%
stringr::str_split(",") %>%
`[[`(1) %>%
as.numeric() %>%
mean()
}) %>%
unlist()
names(lagged_cor) = names(global_cor) =
total_protein_variable_info$variable_id
cor_data =
data.frame(wearable = "CGM",
total_protein_variable_info,
global_cor = global_cor,
lagged_cor = lagged_cor,
shift_time = shift_time)
p_value =
cor_data %>%
t() %>%
as.data.frame() %>%
purrr::map(function(x){
# cat(x[2], " ")
x[!is.na(x)] = stringr::str_trim(x[!is.na(x)], side = "both")
result = lagged_result[[x[2]]]
###lagged correlation p value
x_value = result$x
y_value = result$y
y_value =
result$max_idx %>%
purrr::map(function(idx){
mean(y_value[idx])
}) %>%
unlist()
x_value = x_value[!is.na(y_value)]
y_value = y_value[!is.na(y_value)]
lagged_cor_p =
cor.test(x = x_value, y = y_value, method = "pearson")$p.value
###global correlation p value
x_value = result$x
y_value = result$y
y_value =
result$global_idx %>%
purrr::map(function(idx){
mean(y_value[idx])
}) %>%
unlist()
x_value = x_value[!is.na(y_value)]
y_value = y_value[!is.na(y_value)]
global_cor_p =
cor.test(x = x_value, y = y_value, method = "pearson")$p.value
c(global_cor_p = global_cor_p,
lagged_cor_p = lagged_cor_p)
}) %>%
do.call(rbind, .) %>%
as.data.frame()
cor_data =
data.frame(cor_data, p_value)
cor_data$global_cor_p_adjust = p.adjust(cor_data$global_cor_p, method = "BH")
cor_data$lagged_cor_p_adjust = p.adjust(cor_data$lagged_cor_p, method = "BH")
library(openxlsx)
# wb <- createWorkbook()
# modifyBaseFont(wb, fontSize = 12, fontName = "Time New Roma")
# addWorksheet(wb, sheetName = "CGM total_protein global cor",
# gridLines = TRUE)
# freezePane(wb, sheet = 1, firstRow = TRUE, firstCol = TRUE)
# writeDataTable(wb, sheet = 1, x = cor_data,
# colNames = TRUE, rowNames = TRUE)
# saveWorkbook(wb, "lagged_correlation/cor_data.xlsx", overwrite = TRUE)
##output the top 10 negative and top 100 positive
pos_top_10 =
cor_data %>%
dplyr::arrange(lagged_cor) %>%
dplyr::filter(lagged_cor > 0) %>%
tail(10)
neg_top_10 =
cor_data %>%
dplyr::arrange(lagged_cor) %>%
dplyr::filter(lagged_cor < 0) %>%
head(10)
dir.create("cor_plot")
temp =
rbind(neg_top_10,
pos_top_10)
# for (i in 1:nrow(temp)) {
# cat(i, " ")
# plot1 =
# lagged_alignment_plot(object = lagged_result[[temp$variable_id[i]]],
# day_night_df = day_night_df,
# internal_omics_color = class_color["total_protein"],
# wearable_color = wearable_color["cgm"],
# internal_omics_name = temp$mol_name[i],
# warable_name = "CGM",
# which = "max",
# x_limit = c(1,1000),
# non_matched_point_size = 0.1,
# wearable_point_size = 0.5,
# internal_omics_point_size = 2,
# integrated = FALSE)
#
# plot2 =
# lagged_alignment_plot(object = lagged_result[[temp$variable_id[i]]],
# day_night_df = day_night_df,
# internal_omics_color = class_color["total_protein"],
# wearable_color = wearable_color["cgm"],
# internal_omics_name = temp$mol_name[i],
# warable_name = "CGM",
# which = "max",
# x_limit = c(1,10),
# non_matched_point_size = 0.1,
# wearable_point_size = 0.5,
# internal_omics_point_size = 2,
# integrated = FALSE)
#
# plot3 =
# lagged_alignment_plot(object = lagged_result[[temp$variable_id[i]]],
# day_night_df = day_night_df,
# internal_omics_color = class_color["total_protein"],
# wearable_color = wearable_color["cgm"],
# internal_omics_name = temp$mol_name[i],
# warable_name = "CGM",
# which = "max",
# x_limit = c(1,1000),
# non_matched_point_size = 0.1,
# wearable_point_size = 0.5,
# internal_omics_point_size = 2,
# integrated = TRUE)
#
#
# plot4 =
# lagged_alignment_plot(object = lagged_result[[temp$variable_id[i]]],
# day_night_df = day_night_df,
# internal_omics_color = class_color["total_protein"],
# wearable_color = wearable_color["cgm"],
# internal_omics_name = temp$mol_name[i],
# warable_name = "CGM",
# which = "max",
# x_limit = c(1,30),
# non_matched_point_size = 3,
# wearable_point_size = 3,
# internal_omics_point_size = 3,
# integrated = TRUE)
#
# name = paste("CGM vs",temp$mol_name[i])
# ggsave(plot1,
# filename = file.path("cor_plot", paste(name, "plot1.pdf", sep = "")),
# width = 20, height = 7)
#
# ggsave(plot2,
# filename = file.path("cor_plot", paste(name, "plot2.pdf", sep = "")),
# width = 20, height = 7)
#
# ggsave(plot3,
# filename = file.path("cor_plot", paste(name, "plot3.pdf", sep = "")),
# width = 20, height = 7)
#
# ggsave(plot4,
# filename = file.path("cor_plot", paste(name, "plot4.pdf", sep = "")),
# width = 20, height = 7)
# }
cor_data %>%
ggplot(aes(global_cor, lagged_cor)) +
geom_point()
plot =
cor_data %>%
dplyr::mutate(direction = case_when(
shift_time > 0 ~ "After",
shift_time < 0 ~ "Before",
shift_time == 0 ~ "Synchronization"
)) %>%
ggplot(aes(global_cor, lagged_cor)) +
geom_hline(yintercept = 0) +
geom_vline(xintercept = 0) +
geom_abline(slope = 1, intercept = 0) +
geom_point(aes(fill = direction),
shape = 21, size = 5) +
labs(x = "Global Pearson correlation",
y = "Lagged correlation") +
scale_fill_manual(values = c(
"After" = ggsci::pal_aaas()(n = 10)[1],
"Before" = ggsci::pal_aaas()(n = 10)[2],
"Synchronization" = "grey"
)) +
shadowtext::geom_shadowtext(aes(label = ifelse(lagged_cor > quantile(cor_data$lagged_cor[cor_data$lagged_cor > 0], 0.75) |
lagged_cor < quantile(cor_data$lagged_cor[cor_data$lagged_cor < 0], 0.25),
mol_name, NA)),
check_overlap = TRUE,
bg.colour='white',
color = "black") +
base_theme
plot
# ggsave(plot, filename = "global_lagged_correlation.pdf", width = 9, height = 7)
plot =
cor_data %>%
dplyr::mutate(direction = case_when(
shift_time > 0 ~ "After",
shift_time < 0 ~ "Before",
shift_time == 0 ~ "Synchronization"
)) %>%
ggplot(aes(shift_time, lagged_cor)) +
geom_hline(yintercept = 0) +
geom_vline(xintercept = 0) +
geom_abline(slope = 1, intercept = 0) +
geom_point(aes(fill = direction,
size = abs(lagged_cor)),
shape = 21,
show.legend = TRUE) +
labs(x = "Shift time",
y = "Lagged correlation") +
scale_fill_manual(values = c(
"After" = ggsci::pal_aaas()(n = 10)[1],
"Before" = ggsci::pal_aaas()(n = 10)[2],
"Synchronization" = "grey"
)) +
ggrepel::geom_text_repel(aes(label = ifelse(lagged_cor > quantile(cor_data$lagged_cor[cor_data$lagged_cor > 0], 0.75) |
lagged_cor < quantile(cor_data$lagged_cor[cor_data$lagged_cor < 0], 0.25),
mol_name, NA))) +
base_theme
plot
# ggsave(plot, filename = "shift_lagged_correlation.pdf", width = 9, height = 7)
####output the shift time vs lagged cor plot for the important proteins
important_total_protein =
cor_data
# dplyr::filter(lagged_cor > quantile(cor_data$lagged_cor[cor_data$lagged_cor >= 0], 0.75) |
# lagged_cor < quantile(cor_data$lagged_cor[cor_data$lagged_cor <= 0], 0.25))
dir.create("shift_time_vs_cor")
# for(i in 1:nrow(important_total_protein)){
# cat(i, "")
# result = lagged_result[[important_total_protein$variable_id[i]]]
#
# temp_data =
# result[c("shift_time", "all_cor")] %>%
# do.call(cbind, .) %>%
# as.data.frame() %>%
# dplyr::mutate(shift_time = stringr::str_replace(shift_time, "\\(", "")) %>%
# dplyr::mutate(shift_time = stringr::str_replace(shift_time, "\\]", "")) %>%
# dplyr::mutate(shift_time = stringr::str_split(shift_time, ",")) %>%
# dplyr::mutate(all_cor = round(as.numeric(all_cor), 4))
#
# temp_data$shift_time =
# temp_data$shift_time %>%
# purrr::map(function(x){
# mean(as.numeric(x))
# }) %>%
# unlist()
#
# plot =
# temp_data %>%
# ggplot(aes(x = shift_time, y = all_cor)) +
# geom_vline(xintercept = temp_data$shift_time[result$which_max_idx],
# color = "red") +
# geom_hline(yintercept = 0) +
# annotate(geom = "text",
# x = temp_data$shift_time[result$which_max_idx],
# y = result$max_cor,
# label = round(result$max_cor, 4)) +
# annotate(geom = "text",
# x = temp_data$shift_time[result$which_global_idx],
# y = result$global_cor,
# label = round(result$global_cor, 4)) +
# geom_point() +
# geom_line(aes(group = 1)) +
# base_theme +
# labs(x = "Shift time (Omics - CGM, min)",
# y = "Pearsom correlation") +
# theme()
#
# name =
# proteomics_variable_info$mol_name[match(names(lagged_result)[i], proteomics_variable_info$variable_id)]
#
# ggsave(
# plot,
# file = file.path("shift_time_vs_cor", paste(name, ".pdf", sep = "")),
# width = 8,
# height = 7
# )
# }
##Pathway analysis for the negative or positive correlation analysis
##because the correlation is too small, so here we just use the cutoff from
##0.75
##Pathway analysis for the negative or positive correlation analysis
library(clusterProfiler)
library(org.Hs.eg.db)
dir.create("pathway_enrichment")
important_total_protein =
rbind(
cor_data %>%
dplyr::filter(lagged_cor > 0) %>%
# dplyr::filter(lagged_cor > quantile(lagged_cor, 0.75)) %>%
dplyr::mutate(class1 = "positive correlation"),
cor_data %>%
dplyr::filter(lagged_cor < 0) %>%
# dplyr::filter(lagged_cor < quantile(lagged_cor, 0.25)) %>%
dplyr::mutate(class1 = "negative correlation")
)
save(important_total_protein, file = "important_total_protein")
|
library(data.table)
library(dplyr)
library(tidyr)
library(NbClust)
library(ggplot2)
library(gridExtra)
library(ggdendro)
library(cluster)
library(purrr)
library(tibble)
library(ggradar)
library(naniar)
###############################################################################
################################ TABLES LOADING ###############################
###############################################################################
#load tables - XXX to be filled as required
setwd("~/XXX")
df <- fread("~/XXX.csv")
df_diag <- fread("~/XXX.txt")
df_bio <- fread("~/XXX.csv")
df_genetic <- readRDS("~/XXX.rds")
df_genetic_dict <- fread("~/XXX.csv")
df_hes_main <- fread("~/XXX.txt")
df_hes_diag <- fread("~/XXX.txt")
df_death_main <- fread("~/XXX.txt")
df_death_cause <- fread("~/XXX.txt")
df <- as.data.frame(df)
df_diag <- as.data.frame(df_diag)
df_bio <- as.data.frame(df_bio)
df_genetic <- as.data.frame(df_genetic)
df_hes_main <- as.data.frame(df_hes_main)
df_hes_diag <- as.data.frame(df_hes_diag)
df_death_main <- as.data.frame(df_death_main)
df_death_cause <- as.data.frame(df_death_cause)
df_genetic_dict <- as.data.frame(df_genetic_dict)
df_genetic <- rownames_to_column(df_genetic, var = "eid")
df_genetic[ ,-1] <- sapply(df_genetic[ ,-1], as.integer)
df_genetic <- df_genetic %>%
na_if(0)
###############################################################################
################################# GET FUNCTIONS ###############################
###############################################################################
source("~/functions.R")
###############################################################################
############################# GENES DATA PROCESSING ###########################
###############################################################################
#remove snps not wanted
snps_to_remove <- c("rs5215")
#create dataframe with number of risk allele
df_genes <- df_genetic %>%
dplyr::select(!snps_to_remove)
#populate dataframe with number of risk alleles
for (j in 2:ncol(df_genes)){
#save info of dict
dict <- df_genetic_dict %>%
filter(snp == names(df_genes)[j])
risk_allele <- dict[5]
minor_allele <- dict[8]
#select column
col <- df_genes[, j]
#update values with number of risk alleles
if (minor_allele == risk_allele){
col[col == 1] = 2
col[col == 2] = 1
col[col == 3] = 0
} else {
col[col == 1] = 0
col[col == 2] = 1
col[col == 3] = 2
}
df_genes[, j] <- col
}
#calculate scores
##select snps
df_genetic_dict_secr <- df_genetic_dict %>%
filter(type == "Secretion") %>%
filter(snp != snps_to_remove) %>%
dplyr::select(c("snp", "insulin_effect"))
df_genetic_dict_res <- df_genetic_dict %>%
filter(type == "Resistance") %>%
filter(snp != snps_to_remove) %>%
dplyr::select(c("snp", "insulin_effect"))
##weighted scores
score_secr_weighted <- as.matrix(df_genes[ ,df_genetic_dict_secr$snp]) %*%
as.matrix(df_genetic_dict_secr$insulin_effect)
score_secr_weighted <-
score_secr_weighted/(sum(df_genetic_dict_secr$insulin_effect)) * nrow(df_genetic_dict_secr)
score_res_weighted <- as.matrix(df_genes[ ,df_genetic_dict_res$snp]) %*%
as.matrix(df_genetic_dict_res$insulin_effect)
score_res_weighted <-
score_res_weighted/(sum(df_genetic_dict_res$insulin_effect)) * nrow(df_genetic_dict_res)
##non-weighted scores
score_secr_nweighted <- rowSums(df_genes[ ,df_genetic_dict_secr$snp])
score_res_nweighted <- rowSums(df_genes[ ,df_genetic_dict_res$snp])
##save in dataframe
df_genes$score_secr_weighted <- score_secr_weighted
df_genes$score_res_weighted <- score_res_weighted
df_genes$score_secr_nweighted <- score_secr_nweighted
df_genes$score_res_nweighted <- score_res_nweighted
###############################################################################
############################### HES DATA PROCESSING ###########################
###############################################################################
#join the two tables
df_hes <- left_join(df_hes_diag[ ,c("eid", "ins_index", "arr_index",
"level", "diag_icd10")],
df_hes_main[ ,c("eid", "ins_index", "epistart",
"epiend", "mainspef_uni", "tretspef_uni")],
by = c("eid", "ins_index"))
###############################################################################
############################## MULTI STATES MODEL #############################
###############################################################################
##################### 1.SELECT PATIENTS FOR STUDY
#####################################################
#filter out rows with no bio data
df_msm <- df %>%
filter(eid %in% df_bio$eid)
nrow(df_msm)
#filter out rows with no 2 assessment centers
df_msm <- df_msm%>%
filter((!(is.na(`53-0.0`))) & (!(is.na(`53-1.0`))))
nrow(df_msm)
#filter out rows with missing glycated haemoglobin
eid_msm <- df_msm$eid
df_bio_msm <- df_bio %>%
filter(eid %in% eid_msm)
nrow(df_bio_msm) #to check
df_bio_msm <- df_bio_msm %>%
filter((!(is.na(`30750-0.0`))) & (!(is.na(`30750-1.0`))))
nrow(df_bio_msm)
df_msm <- df_msm %>%
filter(eid %in% df_bio_msm$eid)
nrow(df_msm) #to check
##################### 2. CREATE GENES AND HES DATAFRAMES
#####################################################
#create genes dataset
df_genes_msm <- df_genes %>%
filter(eid %in% unlist(df_bio_msm$eid))
nrow(df_genes_msm) #to check
#filter out rows with no genes data
df_bio_msm <- df_bio_msm %>%
filter(eid %in% unlist(df_genes_msm$eid))
nrow(df_bio_msm)
df_msm <- df_msm %>%
filter(eid %in% unlist(df_genes_msm$eid))
nrow(df_msm)
#create hes dataset
df_hes_msm <- df_hes %>%
filter(eid %in% unlist(df_bio_msm$eid))
##################### 3. DATA FRAMES SAVING
#####################################################
#save dataframes
write.csv(df_msm,"~/XXX.csv", row.names = FALSE)
write.csv(df_bio_msm,"~/XXX.csv", row.names = FALSE)
write.csv(df_genes_msm,"~/XXX.csv", row.names = FALSE)
write.csv(df_hes_msm,"~/XXX.csv", row.names = FALSE)
###############################################################################
############################### DIABETES CLUSTERING ###########################
###############################################################################
##################### 1. BASIC PATIENTS SELECTION
#####################################################
#filter out rows with no biochemestry data
df_main_dc <- df %>%
filter(eid %in% df_bio$eid)
nrow(df_main_dc)
df_bio_dc <- df_bio %>%
filter(eid %in% df_main_dc$eid)
nrow(df_bio_dc)
#filter out rows with missing glycated haemoglobin at initial
df_bio_dc <- df_bio_dc %>%
filter((!(is.na(`30750-0.0`))))
nrow(df_bio_dc)
df_main_dc <- df_main_dc %>%
filter(eid %in% df_bio_dc$eid)
nrow(df_main_dc) #to check
##################### 2. CREATE GENES AND HES DATAFRAMES
#####################################################
#create genes dataset
df_genes_dc <- df_genes %>%
filter(eid %in% unlist(df_bio_dc$eid))
nrow(df_genes_dc) #to check
#filter out rows with no genes data
df_bio_dc <- df_bio_dc %>%
filter(eid %in% unlist(df_genes_dc$eid))
nrow(df_bio_dc)
df_main_dc <- df_main_dc %>%
filter(eid %in% unlist(df_genes_dc$eid))
nrow(df_main_dc)
#create hes dataset
df_hes_dc <- df_hes %>%
filter(eid %in% unlist(df_bio_dc$eid))
##################### 3. GET STATUS
#####################################################
#get info on patients at init and fu
info_init_dc <- split_by_status(df_main_dc, df_bio_dc, df_genes_dc, df_hes_dc, 0)
info_fu_dc <- split_by_status(df_main_dc, df_bio_dc, df_genes_dc, df_hes_dc, 1)
#add info columns in df_main
df_main_dc <- df_main_dc %>%
inner_join(info_init_dc, by = "eid")
df_main_dc <- df_main_dc %>%
inner_join(info_fu_dc, by = "eid")
##################### 4. SELECT PATIENTS WITH DIABETES
#####################################################
df_main_dc_sel <- df_main_dc %>%
filter(status_init == "diabetes" | status_fu == "diabetes")
df_bio_dc_sel <- df_bio_dc %>%
filter(eid %in% unlist(df_main_dc_sel$eid))
df_genes_dc_sel <- df_genes_dc %>%
filter(eid %in% unlist(df_main_dc_sel$eid))
df_hes_dc_sel <- df_hes_dc %>%
filter(eid %in% unlist(df_main_dc_sel$eid))
##################### 5. DATA FRAMES SAVING
#####################################################
write.csv(df_main_dc_sel,"~/XXX/df_main_dc_sel.csv", row.names = FALSE)
write.csv(df_bio_dc_sel,"~/XXX/df_bio_dc_sel.csv", row.names = FALSE)
write.csv(df_genes_dc_sel,"~/XXX/df_genes_dc_sel.csv", row.names = FALSE)
write.csv(df_hes_dc_sel,"~/XXX/df_hes_dc_sel.csv", row.names = FALSE)
| /patients_selection/patient_selection.R | no_license | shalabysar/T2D_study | R | false | false | 8,745 | r | library(data.table)
library(dplyr)
library(tidyr)
library(NbClust)
library(ggplot2)
library(gridExtra)
library(ggdendro)
library(cluster)
library(purrr)
library(tibble)
library(ggradar)
library(naniar)
###############################################################################
################################ TABLES LOADING ###############################
###############################################################################
#load tables - XXX to be filled as required
setwd("~/XXX")
df <- fread("~/XXX.csv")
df_diag <- fread("~/XXX.txt")
df_bio <- fread("~/XXX.csv")
df_genetic <- readRDS("~/XXX.rds")
df_genetic_dict <- fread("~/XXX.csv")
df_hes_main <- fread("~/XXX.txt")
df_hes_diag <- fread("~/XXX.txt")
df_death_main <- fread("~/XXX.txt")
df_death_cause <- fread("~/XXX.txt")
df <- as.data.frame(df)
df_diag <- as.data.frame(df_diag)
df_bio <- as.data.frame(df_bio)
df_genetic <- as.data.frame(df_genetic)
df_hes_main <- as.data.frame(df_hes_main)
df_hes_diag <- as.data.frame(df_hes_diag)
df_death_main <- as.data.frame(df_death_main)
df_death_cause <- as.data.frame(df_death_cause)
df_genetic_dict <- as.data.frame(df_genetic_dict)
df_genetic <- rownames_to_column(df_genetic, var = "eid")
df_genetic[ ,-1] <- sapply(df_genetic[ ,-1], as.integer)
df_genetic <- df_genetic %>%
na_if(0)
###############################################################################
################################# GET FUNCTIONS ###############################
###############################################################################
source("~/functions.R")
###############################################################################
############################# GENES DATA PROCESSING ###########################
###############################################################################
#remove snps not wanted
snps_to_remove <- c("rs5215")
#create dataframe with number of risk allele
df_genes <- df_genetic %>%
dplyr::select(!snps_to_remove)
#populate dataframe with number of risk alleles
for (j in 2:ncol(df_genes)){
#save info of dict
dict <- df_genetic_dict %>%
filter(snp == names(df_genes)[j])
risk_allele <- dict[5]
minor_allele <- dict[8]
#select column
col <- df_genes[, j]
#update values with number of risk alleles
if (minor_allele == risk_allele){
col[col == 1] = 2
col[col == 2] = 1
col[col == 3] = 0
} else {
col[col == 1] = 0
col[col == 2] = 1
col[col == 3] = 2
}
df_genes[, j] <- col
}
#calculate scores
##select snps
df_genetic_dict_secr <- df_genetic_dict %>%
filter(type == "Secretion") %>%
filter(snp != snps_to_remove) %>%
dplyr::select(c("snp", "insulin_effect"))
df_genetic_dict_res <- df_genetic_dict %>%
filter(type == "Resistance") %>%
filter(snp != snps_to_remove) %>%
dplyr::select(c("snp", "insulin_effect"))
##weighted scores
score_secr_weighted <- as.matrix(df_genes[ ,df_genetic_dict_secr$snp]) %*%
as.matrix(df_genetic_dict_secr$insulin_effect)
score_secr_weighted <-
score_secr_weighted/(sum(df_genetic_dict_secr$insulin_effect)) * nrow(df_genetic_dict_secr)
score_res_weighted <- as.matrix(df_genes[ ,df_genetic_dict_res$snp]) %*%
as.matrix(df_genetic_dict_res$insulin_effect)
score_res_weighted <-
score_res_weighted/(sum(df_genetic_dict_res$insulin_effect)) * nrow(df_genetic_dict_res)
##non-weighted scores
score_secr_nweighted <- rowSums(df_genes[ ,df_genetic_dict_secr$snp])
score_res_nweighted <- rowSums(df_genes[ ,df_genetic_dict_res$snp])
##save in dataframe
df_genes$score_secr_weighted <- score_secr_weighted
df_genes$score_res_weighted <- score_res_weighted
df_genes$score_secr_nweighted <- score_secr_nweighted
df_genes$score_res_nweighted <- score_res_nweighted
###############################################################################
############################### HES DATA PROCESSING ###########################
###############################################################################
#join the two tables
df_hes <- left_join(df_hes_diag[ ,c("eid", "ins_index", "arr_index",
"level", "diag_icd10")],
df_hes_main[ ,c("eid", "ins_index", "epistart",
"epiend", "mainspef_uni", "tretspef_uni")],
by = c("eid", "ins_index"))
###############################################################################
############################## MULTI STATES MODEL #############################
###############################################################################
##################### 1.SELECT PATIENTS FOR STUDY
#####################################################
#filter out rows with no bio data
df_msm <- df %>%
filter(eid %in% df_bio$eid)
nrow(df_msm)
#filter out rows with no 2 assessment centers
df_msm <- df_msm%>%
filter((!(is.na(`53-0.0`))) & (!(is.na(`53-1.0`))))
nrow(df_msm)
#filter out rows with missing glycated haemoglobin
eid_msm <- df_msm$eid
df_bio_msm <- df_bio %>%
filter(eid %in% eid_msm)
nrow(df_bio_msm) #to check
df_bio_msm <- df_bio_msm %>%
filter((!(is.na(`30750-0.0`))) & (!(is.na(`30750-1.0`))))
nrow(df_bio_msm)
df_msm <- df_msm %>%
filter(eid %in% df_bio_msm$eid)
nrow(df_msm) #to check
##################### 2. CREATE GENES AND HES DATAFRAMES
#####################################################
#create genes dataset
df_genes_msm <- df_genes %>%
filter(eid %in% unlist(df_bio_msm$eid))
nrow(df_genes_msm) #to check
#filter out rows with no genes data
df_bio_msm <- df_bio_msm %>%
filter(eid %in% unlist(df_genes_msm$eid))
nrow(df_bio_msm)
df_msm <- df_msm %>%
filter(eid %in% unlist(df_genes_msm$eid))
nrow(df_msm)
#create hes dataset
df_hes_msm <- df_hes %>%
filter(eid %in% unlist(df_bio_msm$eid))
##################### 3. DATA FRAMES SAVING
#####################################################
#save dataframes
write.csv(df_msm,"~/XXX.csv", row.names = FALSE)
write.csv(df_bio_msm,"~/XXX.csv", row.names = FALSE)
write.csv(df_genes_msm,"~/XXX.csv", row.names = FALSE)
write.csv(df_hes_msm,"~/XXX.csv", row.names = FALSE)
###############################################################################
############################### DIABETES CLUSTERING ###########################
###############################################################################
##################### 1. BASIC PATIENTS SELECTION
#####################################################
#filter out rows with no biochemestry data
df_main_dc <- df %>%
filter(eid %in% df_bio$eid)
nrow(df_main_dc)
df_bio_dc <- df_bio %>%
filter(eid %in% df_main_dc$eid)
nrow(df_bio_dc)
#filter out rows with missing glycated haemoglobin at initial
df_bio_dc <- df_bio_dc %>%
filter((!(is.na(`30750-0.0`))))
nrow(df_bio_dc)
df_main_dc <- df_main_dc %>%
filter(eid %in% df_bio_dc$eid)
nrow(df_main_dc) #to check
##################### 2. CREATE GENES AND HES DATAFRAMES
#####################################################
#create genes dataset
df_genes_dc <- df_genes %>%
filter(eid %in% unlist(df_bio_dc$eid))
nrow(df_genes_dc) #to check
#filter out rows with no genes data
df_bio_dc <- df_bio_dc %>%
filter(eid %in% unlist(df_genes_dc$eid))
nrow(df_bio_dc)
df_main_dc <- df_main_dc %>%
filter(eid %in% unlist(df_genes_dc$eid))
nrow(df_main_dc)
#create hes dataset
df_hes_dc <- df_hes %>%
filter(eid %in% unlist(df_bio_dc$eid))
##################### 3. GET STATUS
#####################################################
#get info on patients at init and fu
info_init_dc <- split_by_status(df_main_dc, df_bio_dc, df_genes_dc, df_hes_dc, 0)
info_fu_dc <- split_by_status(df_main_dc, df_bio_dc, df_genes_dc, df_hes_dc, 1)
#add info columns in df_main
df_main_dc <- df_main_dc %>%
inner_join(info_init_dc, by = "eid")
df_main_dc <- df_main_dc %>%
inner_join(info_fu_dc, by = "eid")
##################### 4. SELECT PATIENTS WITH DIABETES
#####################################################
df_main_dc_sel <- df_main_dc %>%
filter(status_init == "diabetes" | status_fu == "diabetes")
df_bio_dc_sel <- df_bio_dc %>%
filter(eid %in% unlist(df_main_dc_sel$eid))
df_genes_dc_sel <- df_genes_dc %>%
filter(eid %in% unlist(df_main_dc_sel$eid))
df_hes_dc_sel <- df_hes_dc %>%
filter(eid %in% unlist(df_main_dc_sel$eid))
##################### 5. DATA FRAMES SAVING
#####################################################
write.csv(df_main_dc_sel,"~/XXX/df_main_dc_sel.csv", row.names = FALSE)
write.csv(df_bio_dc_sel,"~/XXX/df_bio_dc_sel.csv", row.names = FALSE)
write.csv(df_genes_dc_sel,"~/XXX/df_genes_dc_sel.csv", row.names = FALSE)
write.csv(df_hes_dc_sel,"~/XXX/df_hes_dc_sel.csv", row.names = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rm_repeated_characters.R
\name{rm_repeated_characters}
\alias{rm_repeated_characters}
\alias{ex_repeated_characters}
\title{Remove/Replace/Extract Words With Repeating Characters}
\usage{
rm_repeated_characters(text.var, trim = !extract, clean = TRUE,
pattern = "@rm_repeated_characters", replacement = "", extract = FALSE,
dictionary = getOption("regex.library"), ...)
ex_repeated_characters(text.var, trim = !extract, clean = TRUE,
pattern = "@rm_repeated_characters", replacement = "", extract = TRUE,
dictionary = getOption("regex.library"), ...)
}
\arguments{
\item{text.var}{The text variable.}
\item{trim}{logical. If \code{TRUE} removes leading and trailing white
spaces.}
\item{clean}{trim logical. If \code{TRUE} extra white spaces and escaped
character will be removed.}
\item{pattern}{A character string containing a regular expression (or
character string for \code{fixed = TRUE}) to be matched in the given
character vector. Default, \code{@rm_repeated_characters} uses the
\code{rm_repeated_characters} regex from the regular expression dictionary from
the \code{dictionary} argument.}
\item{replacement}{Replacement for matched \code{pattern}.}
\item{extract}{logical. If \code{TRUE} the words with repeating characters
are extracted into a list of vectors.}
\item{dictionary}{A dictionary of canned regular expressions to search within
if \code{pattern} begins with \code{"@rm_"}.}
\item{\dots}{Other arguments passed to \code{\link[base]{gsub}}.}
}
\value{
Returns a character string with percentages removed.
}
\description{
Remove/replace/extract words with repeating characters. The word must
contain characters, each repeating at east 2 times
}
\examples{
x <- "aaaahahahahaha that was a good joke peep and pepper and pepe"
rm_repeated_characters(x)
ex_repeated_characters(x)
}
\references{
\url{http://stackoverflow.com/a/29438461/1000343}
}
\seealso{
\code{\link[base]{gsub}},
\code{\link[stringi]{stri_extract_all_regex}}
Other rm_ functions: \code{\link{rm_abbreviation}},
\code{\link{rm_between}}, \code{\link{rm_bracket}},
\code{\link{rm_caps_phrase}}, \code{\link{rm_caps}},
\code{\link{rm_citation_tex}}, \code{\link{rm_citation}},
\code{\link{rm_city_state_zip}},
\code{\link{rm_city_state}}, \code{\link{rm_date}},
\code{\link{rm_default}}, \code{\link{rm_dollar}},
\code{\link{rm_email}}, \code{\link{rm_emoticon}},
\code{\link{rm_endmark}}, \code{\link{rm_hash}},
\code{\link{rm_nchar_words}}, \code{\link{rm_non_ascii}},
\code{\link{rm_non_words}}, \code{\link{rm_number}},
\code{\link{rm_percent}}, \code{\link{rm_phone}},
\code{\link{rm_postal_code}},
\code{\link{rm_repeated_phrases}},
\code{\link{rm_repeated_words}}, \code{\link{rm_tag}},
\code{\link{rm_time}}, \code{\link{rm_title_name}},
\code{\link{rm_url}}, \code{\link{rm_white}},
\code{\link{rm_zip}}
}
\author{
\href{http://stackoverflow.com/}{stackoverflow's} vks and Tyler Rinker <tyler.rinker@gmail.com>.
}
\keyword{characters}
\keyword{repeat}
| /man/rm_repeated_characters.Rd | no_license | dajxyz/qdapRegex | R | false | true | 3,089 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rm_repeated_characters.R
\name{rm_repeated_characters}
\alias{rm_repeated_characters}
\alias{ex_repeated_characters}
\title{Remove/Replace/Extract Words With Repeating Characters}
\usage{
rm_repeated_characters(text.var, trim = !extract, clean = TRUE,
pattern = "@rm_repeated_characters", replacement = "", extract = FALSE,
dictionary = getOption("regex.library"), ...)
ex_repeated_characters(text.var, trim = !extract, clean = TRUE,
pattern = "@rm_repeated_characters", replacement = "", extract = TRUE,
dictionary = getOption("regex.library"), ...)
}
\arguments{
\item{text.var}{The text variable.}
\item{trim}{logical. If \code{TRUE} removes leading and trailing white
spaces.}
\item{clean}{trim logical. If \code{TRUE} extra white spaces and escaped
character will be removed.}
\item{pattern}{A character string containing a regular expression (or
character string for \code{fixed = TRUE}) to be matched in the given
character vector. Default, \code{@rm_repeated_characters} uses the
\code{rm_repeated_characters} regex from the regular expression dictionary from
the \code{dictionary} argument.}
\item{replacement}{Replacement for matched \code{pattern}.}
\item{extract}{logical. If \code{TRUE} the words with repeating characters
are extracted into a list of vectors.}
\item{dictionary}{A dictionary of canned regular expressions to search within
if \code{pattern} begins with \code{"@rm_"}.}
\item{\dots}{Other arguments passed to \code{\link[base]{gsub}}.}
}
\value{
Returns a character string with percentages removed.
}
\description{
Remove/replace/extract words with repeating characters. The word must
contain characters, each repeating at east 2 times
}
\examples{
x <- "aaaahahahahaha that was a good joke peep and pepper and pepe"
rm_repeated_characters(x)
ex_repeated_characters(x)
}
\references{
\url{http://stackoverflow.com/a/29438461/1000343}
}
\seealso{
\code{\link[base]{gsub}},
\code{\link[stringi]{stri_extract_all_regex}}
Other rm_ functions: \code{\link{rm_abbreviation}},
\code{\link{rm_between}}, \code{\link{rm_bracket}},
\code{\link{rm_caps_phrase}}, \code{\link{rm_caps}},
\code{\link{rm_citation_tex}}, \code{\link{rm_citation}},
\code{\link{rm_city_state_zip}},
\code{\link{rm_city_state}}, \code{\link{rm_date}},
\code{\link{rm_default}}, \code{\link{rm_dollar}},
\code{\link{rm_email}}, \code{\link{rm_emoticon}},
\code{\link{rm_endmark}}, \code{\link{rm_hash}},
\code{\link{rm_nchar_words}}, \code{\link{rm_non_ascii}},
\code{\link{rm_non_words}}, \code{\link{rm_number}},
\code{\link{rm_percent}}, \code{\link{rm_phone}},
\code{\link{rm_postal_code}},
\code{\link{rm_repeated_phrases}},
\code{\link{rm_repeated_words}}, \code{\link{rm_tag}},
\code{\link{rm_time}}, \code{\link{rm_title_name}},
\code{\link{rm_url}}, \code{\link{rm_white}},
\code{\link{rm_zip}}
}
\author{
\href{http://stackoverflow.com/}{stackoverflow's} vks and Tyler Rinker <tyler.rinker@gmail.com>.
}
\keyword{characters}
\keyword{repeat}
|
load("~/Dropbox/__HTWG/Statistic/Stat_HS2018/handouts/inference/LR/unique2010.rda")
res = lm(Pax ~ ATM, data=unique2010)
predict(res,data.frame(ATM=c(20100, 24000, 500)))
plot(Pax ~ ATM, data=unique2010)
abline(res)
xi = seq(1,10,0.5)
n = length(xi)
beta0 = 2.0
beta1 = 1.0
yi = rnorm(n, mean=beta0 + beta1*xi, sd = 0.5)
plot(xi,yi)
res = lm(yi ~ xi)
xi = seq(1,10,0.5)
n = length(xi)
inters = rep(NA, 1000)
slopes = rep(NA, 1000)
in_ci = 0
for (i in 1:length(inters)){
yi = rnorm(n,mean=1*xi+2, sd = 0.5) #DGP
plot(xi, yi)
res = lm(yi ~ xi)
#abline(res)
#res
inters[i] = res$coefficients[1]
slopes[i] = res$coefficients[2]
l = confint(res, level=0.6)[2,1]
u = confint(res, level=0.6)[2,2]
if (l < 1 && u > 1) {
in_ci = in_ci + 1
}
}
#Change N, sd in DGP
hist(inters,20)
mean(inters)
var(inters)
hist(slopes,20)
mean(slopes)
sd(slopes)
in_ci / 1000
| /schliessende/LR.R | no_license | oduerr/stat | R | false | false | 896 | r | load("~/Dropbox/__HTWG/Statistic/Stat_HS2018/handouts/inference/LR/unique2010.rda")
res = lm(Pax ~ ATM, data=unique2010)
predict(res,data.frame(ATM=c(20100, 24000, 500)))
plot(Pax ~ ATM, data=unique2010)
abline(res)
xi = seq(1,10,0.5)
n = length(xi)
beta0 = 2.0
beta1 = 1.0
yi = rnorm(n, mean=beta0 + beta1*xi, sd = 0.5)
plot(xi,yi)
res = lm(yi ~ xi)
xi = seq(1,10,0.5)
n = length(xi)
inters = rep(NA, 1000)
slopes = rep(NA, 1000)
in_ci = 0
for (i in 1:length(inters)){
yi = rnorm(n,mean=1*xi+2, sd = 0.5) #DGP
plot(xi, yi)
res = lm(yi ~ xi)
#abline(res)
#res
inters[i] = res$coefficients[1]
slopes[i] = res$coefficients[2]
l = confint(res, level=0.6)[2,1]
u = confint(res, level=0.6)[2,2]
if (l < 1 && u > 1) {
in_ci = in_ci + 1
}
}
#Change N, sd in DGP
hist(inters,20)
mean(inters)
var(inters)
hist(slopes,20)
mean(slopes)
sd(slopes)
in_ci / 1000
|
dat <- data.frame(x=1:10, y=10:1)
ffdat <- as.ffdf(dat)
with(ffdat, {x+y})
| /examples/with.R | no_license | edwindj/ffbase | R | false | false | 77 | r | dat <- data.frame(x=1:10, y=10:1)
ffdat <- as.ffdf(dat)
with(ffdat, {x+y})
|
# prepareZiref.R
#
# Purpose: prepare (global) object ziRef with character information
#
# Precondition: The cedict dictionary source file exists in
# "../cedict/cedict_ts.u8.txt"
# ziFreq has been loaded
#
# Postcondition: (global) object ziRef exists and has been saved to
# "../data/ziRef.RData"
#
# Notes:
#
#
# V 1.0
# Date: November 2016
# Author: Boris Steipe and Yi Chen
#
# ToDo
#
# V 1.0 First code
#
# ==============================================================================
setwd(WENJIDIR)
# ==== PACKAGES ================================================================
# ==== DEFINITIONS =============================================================
cedictFile <- "../cedict/cedict_ts.u8.txt"
# ==== FUNCTIONS ===============================================================
# ==== PROCESS =================================================================
cedict <- readLines(cedictFile)
cedict <- cedict[- grep("^#", cedict)] # discard comments
cedict <- cedict[substr(cedict, 2, 2) == " "] # keep single character entries
# cedict <- cedict[- grep(" \\[[A-Z]", cedict)] # drop surnames (capital PY)
# cedict <- cedict[- grep("variant of", cedict)] # drop variants
l <- length(ziFreq)
ziRef <- data.frame(S = character(l),
T = character(l),
PY = character(l),
def = character(l),
stringsAsFactors = FALSE)
for (i in 1:l) {
if (! i %% 500) { print(i) }
s <- cedict[substr(cedict, 3, 3) == names(ziFreq)[i]]
if (length(s) == 0) { # character not found - is it a traditional one?
s <- cedict[substr(cedict, 1, 1) == names(ziFreq)[i]]
}
if (length(s) > 0) { # more than one entry - attempt ordering
priority <- rep(1L, length(s))
priority[grep(" \\[[A-Z]", s)] <- 0 # capital pinyin = surname
priority[grep("/\\(classical\\)", s)] <- 0 # old use
priority[grep("/\\(archaic\\)", s)] <- 0 # old use
priority[grep("/\\(arch.\\)", s)] <- 0 # old use
priority[grep("/\\(onom.\\)", s)] <- 0 # onomatopoetic, not meaning
priority[grep("/variant", s)] <- 0
priority[grep("/old variant", s)] <- 0
priority[grep("/erroneous variant", s)] <- 0
priority[grep("/see ", s)] <- 0
s <- s[order(priority, decreasing = TRUE)]
}
if (length(s) > 0) { # process it
ziRef$S[i] <- substr(s[1], 3, 3)
ziRef$T[i] <- substr(s[1], 1, 1)
m <- regexec("(\\[.+?\\])", s[1])
ziRef$PY[i] <- unlist(regmatches(s[1], m))[1]
m <- regexec("(/.+/$)", s[1])
ziRef$def[i] <- unlist(regmatches(s[1], m))[1]
if (length(s) > 1) {
ziRef$def[i] <- paste(c(ziRef$def[i], s[-1]),
collapse = "+")
}
} else { #not found
ziRef$S[i] <- names(ziFreq)[i]
ziRef$T[i] <- names(ziFreq)[i]
ziRef$PY[i] <- "[?]"
ziRef$def[i] <- "/?/"
}
}
row.names(ziRef) <- names(ziFreq)
# check
for (i in 1:300) {
cat(sprintf("%s %s %8s\t%s\n",
ziRef$S[i],
ziRef$T[i],
ziRef$PY[i],
substr(ziRef$def[i], 1, 50)))
}
# save(ziRef, file = "../data/ziRef.RData")
rm(cedictFile)
rm(cedict)
rm(l)
rm(i)
rm(s)
rm(m)
#
# ==== TESTS ===================================================================
# [END] | /prepareZiRef.R | no_license | scylense/wenji | R | false | false | 3,358 | r | # prepareZiref.R
#
# Purpose: prepare (global) object ziRef with character information
#
# Precondition: The cedict dictionary source file exists in
# "../cedict/cedict_ts.u8.txt"
# ziFreq has been loaded
#
# Postcondition: (global) object ziRef exists and has been saved to
# "../data/ziRef.RData"
#
# Notes:
#
#
# V 1.0
# Date: November 2016
# Author: Boris Steipe and Yi Chen
#
# ToDo
#
# V 1.0 First code
#
# ==============================================================================
setwd(WENJIDIR)
# ==== PACKAGES ================================================================
# ==== DEFINITIONS =============================================================
cedictFile <- "../cedict/cedict_ts.u8.txt"
# ==== FUNCTIONS ===============================================================
# ==== PROCESS =================================================================
cedict <- readLines(cedictFile)
cedict <- cedict[- grep("^#", cedict)] # discard comments
cedict <- cedict[substr(cedict, 2, 2) == " "] # keep single character entries
# cedict <- cedict[- grep(" \\[[A-Z]", cedict)] # drop surnames (capital PY)
# cedict <- cedict[- grep("variant of", cedict)] # drop variants
l <- length(ziFreq)
ziRef <- data.frame(S = character(l),
T = character(l),
PY = character(l),
def = character(l),
stringsAsFactors = FALSE)
for (i in 1:l) {
if (! i %% 500) { print(i) }
s <- cedict[substr(cedict, 3, 3) == names(ziFreq)[i]]
if (length(s) == 0) { # character not found - is it a traditional one?
s <- cedict[substr(cedict, 1, 1) == names(ziFreq)[i]]
}
if (length(s) > 0) { # more than one entry - attempt ordering
priority <- rep(1L, length(s))
priority[grep(" \\[[A-Z]", s)] <- 0 # capital pinyin = surname
priority[grep("/\\(classical\\)", s)] <- 0 # old use
priority[grep("/\\(archaic\\)", s)] <- 0 # old use
priority[grep("/\\(arch.\\)", s)] <- 0 # old use
priority[grep("/\\(onom.\\)", s)] <- 0 # onomatopoetic, not meaning
priority[grep("/variant", s)] <- 0
priority[grep("/old variant", s)] <- 0
priority[grep("/erroneous variant", s)] <- 0
priority[grep("/see ", s)] <- 0
s <- s[order(priority, decreasing = TRUE)]
}
if (length(s) > 0) { # process it
ziRef$S[i] <- substr(s[1], 3, 3)
ziRef$T[i] <- substr(s[1], 1, 1)
m <- regexec("(\\[.+?\\])", s[1])
ziRef$PY[i] <- unlist(regmatches(s[1], m))[1]
m <- regexec("(/.+/$)", s[1])
ziRef$def[i] <- unlist(regmatches(s[1], m))[1]
if (length(s) > 1) {
ziRef$def[i] <- paste(c(ziRef$def[i], s[-1]),
collapse = "+")
}
} else { #not found
ziRef$S[i] <- names(ziFreq)[i]
ziRef$T[i] <- names(ziFreq)[i]
ziRef$PY[i] <- "[?]"
ziRef$def[i] <- "/?/"
}
}
row.names(ziRef) <- names(ziFreq)
# check
for (i in 1:300) {
cat(sprintf("%s %s %8s\t%s\n",
ziRef$S[i],
ziRef$T[i],
ziRef$PY[i],
substr(ziRef$def[i], 1, 50)))
}
# save(ziRef, file = "../data/ziRef.RData")
rm(cedictFile)
rm(cedict)
rm(l)
rm(i)
rm(s)
rm(m)
#
# ==== TESTS ===================================================================
# [END] |
.rowdata <- DataFrame(
A=rep(letters[1:3], each=6),
B=rep(letters[1:6], each=3),
C=1:18
)
rownames(.rowdata) = letters[1:18]
.coldata <- DataFrame(
D=rep(letters[1:2], each=6),
E=rep(letters[1:3], each=4),
F=1:12
)
rownames(.coldata) = month.abb[1:12]
.a1 <- matrix(1:216, nrow = 18)
.a2 <- matrix(216:1, nrow = 18)
.se <- SummarizedExperiment::SummarizedExperiment(
assays = list(A1=.a1, A2=.a2),
rowData = .rowdata,
colData = .coldata
)
test_that("filter works on SE", {
SE <- .se %>% filter(C>4, .dim="row")
expect_equal(SE, .se[5:18, ])
SE <- .se %>% filter(`F`>4, `F`<10, .dim="col")
expect_equal(SE, .se[, 5:9])
SE <- .se %>% filter(D %in% "b", `F`<10, .dim="col")
expect_equal(SE, .se[, 7:9])
# Reference other dims
SE <- .se %>% filter(rowSums(A1) > 1300, .dim="row")
expect_equal(SE, .se[10:18, ])
SE <- .se %>% filter(colSums(A1) > 2000, .dim="col")
expect_equal(SE, .se[, 7:12])
# Reference multiple dims
SE <- .se %>% filter(rowSums(A1[, `F` > 5]) > 1071, .dim="row")
expect_equal(SE, .se[10:18, ])
SE <- .se %>% filter(colSums(A1[C > 5, ]) > 1400, .dim="col")
expect_equal(SE, .se[, 7:12])
})
# test_that("filter works on grouped SErame (single grouping var)", {
# GSE <- group_by(.se, A)
# SE <- GSE %>% filter(C>4)
# expect_equal(SE, GSE[5:12, ] %>% copy_groups(SE))
# expect_equal(as.list(group_data(SE)$.rows), list(1:2,3:8))
#
# SE <- GSE %>% filter(C>4, C<10)
# expect_equal(SE, GSE[5:9, ] %>% copy_groups(SE))
# expect_equal(as.list(group_data(SE)$.rows), list(1:2,3:5))
#
# SE <- GSE %>% filter(A %in% "b", C<10)
# expect_equal(SE, GSE[7:9, ] %>% copy_groups(SE))
# expect_equal(as.list(group_data(SE)$.rows), list(1:3))
# })
| /tests/testthat/test-filter-se.R | no_license | ahmohamed/slyr | R | false | false | 1,729 | r | .rowdata <- DataFrame(
A=rep(letters[1:3], each=6),
B=rep(letters[1:6], each=3),
C=1:18
)
rownames(.rowdata) = letters[1:18]
.coldata <- DataFrame(
D=rep(letters[1:2], each=6),
E=rep(letters[1:3], each=4),
F=1:12
)
rownames(.coldata) = month.abb[1:12]
.a1 <- matrix(1:216, nrow = 18)
.a2 <- matrix(216:1, nrow = 18)
.se <- SummarizedExperiment::SummarizedExperiment(
assays = list(A1=.a1, A2=.a2),
rowData = .rowdata,
colData = .coldata
)
test_that("filter works on SE", {
SE <- .se %>% filter(C>4, .dim="row")
expect_equal(SE, .se[5:18, ])
SE <- .se %>% filter(`F`>4, `F`<10, .dim="col")
expect_equal(SE, .se[, 5:9])
SE <- .se %>% filter(D %in% "b", `F`<10, .dim="col")
expect_equal(SE, .se[, 7:9])
# Reference other dims
SE <- .se %>% filter(rowSums(A1) > 1300, .dim="row")
expect_equal(SE, .se[10:18, ])
SE <- .se %>% filter(colSums(A1) > 2000, .dim="col")
expect_equal(SE, .se[, 7:12])
# Reference multiple dims
SE <- .se %>% filter(rowSums(A1[, `F` > 5]) > 1071, .dim="row")
expect_equal(SE, .se[10:18, ])
SE <- .se %>% filter(colSums(A1[C > 5, ]) > 1400, .dim="col")
expect_equal(SE, .se[, 7:12])
})
# test_that("filter works on grouped SErame (single grouping var)", {
# GSE <- group_by(.se, A)
# SE <- GSE %>% filter(C>4)
# expect_equal(SE, GSE[5:12, ] %>% copy_groups(SE))
# expect_equal(as.list(group_data(SE)$.rows), list(1:2,3:8))
#
# SE <- GSE %>% filter(C>4, C<10)
# expect_equal(SE, GSE[5:9, ] %>% copy_groups(SE))
# expect_equal(as.list(group_data(SE)$.rows), list(1:2,3:5))
#
# SE <- GSE %>% filter(A %in% "b", C<10)
# expect_equal(SE, GSE[7:9, ] %>% copy_groups(SE))
# expect_equal(as.list(group_data(SE)$.rows), list(1:3))
# })
|
timestamp <- Sys.time()
library(caret)
library(plyr)
library(recipes)
library(dplyr)
model <- "ordinalNet"
## In case the package or one of its dependencies uses random numbers
## on startup so we'll pre-load the required libraries:
for(i in getModelInfo(model)[[1]]$library)
do.call("requireNamespace", list(package = i))
#########################################################################
set.seed(2)
training <- twoClassSim(100, ordinal = TRUE)
testing <- twoClassSim(500, ordinal = TRUE)
trainX <- training[, -ncol(training)]
trainY <- training$Class
wts <- runif(nrow(trainX))
rec_cls <- recipe(Class ~ ., data = training) %>%
step_center(all_predictors()) %>%
step_scale(all_predictors())
weight_test <- function (data, lev = NULL, model = NULL) {
mean(data$weights)
postResample(data[, "pred"], data[, "obs"])
}
cctrl1 <- trainControl(method = "cv", number = 3, returnResamp = "all",
classProbs = TRUE)
cctrl2 <- trainControl(method = "LOOCV",
classProbs = TRUE)
cctrl3 <- trainControl(method = "none",
classProbs = TRUE)
cctrl4 <- trainControl(method = "cv", number = 3,
summaryFunction = weight_test)
cctrl5 <- trainControl(method = "LOOCV", summaryFunction = weight_test)
set.seed(849)
test_class_cv_model <- train(trainX, trainY,
method ="ordinalNet",
trControl = cctrl1,
metric = "Kappa",
preProc = c("center", "scale"))
set.seed(849)
test_class_cv_form <- train(Class ~ ., data = training,
method ="ordinalNet",
trControl = cctrl1,
metric = "Kappa",
preProc = c("center", "scale"))
test_class_pred <- predict(test_class_cv_model, testing[, -ncol(testing)])
test_class_prob <- predict(test_class_cv_model, testing[, -ncol(testing)], type = "prob")
test_class_pred_form <- predict(test_class_cv_form, testing[, -ncol(testing)])
test_class_prob_form <- predict(test_class_cv_form, testing[, -ncol(testing)], type = "prob")
set.seed(849)
test_class_loo_model <- train(trainX, trainY,
method ="ordinalNet",
trControl = cctrl2,
metric = "Kappa",
preProc = c("center", "scale"))
set.seed(849)
test_class_none_model <- train(trainX, trainY,
method ="ordinalNet",
trControl = cctrl3,
tuneGrid = test_class_cv_model$bestTune,
metric = "Kappa",
preProc = c("center", "scale"))
test_class_none_pred <- predict(test_class_none_model, testing[, -ncol(testing)])
test_class_none_prob <- predict(test_class_none_model, testing[, -ncol(testing)], type = "prob")
set.seed(849)
test_class_cv_weight <- train(trainX, trainY,
weights = wts,
method ="ordinalNet",
trControl = cctrl4,
tuneLength = 1,
metric = "Accuracy",
preProc = c("center", "scale"))
set.seed(849)
test_class_loo_weight <- train(trainX, trainY,
weights = wts,
method ="ordinalNet",
trControl = cctrl5,
tuneLength = 1,
metric = "Accuracy",
preProc = c("center", "scale"))
set.seed(849)
test_class_cv_model <- train(trainX, trainY,
method ="ordinalNet",
trControl = cctrl1,
metric = "Kappa",
preProc = c("center", "scale"))
set.seed(849)
test_class_cv_form_rec <- train(rec_cls, data = training,
method ="ordinalNet",
trControl = cctrl1,
metric = "Kappa")
if(
!isTRUE(
all.equal(test_class_cv_form$results,
test_class_cv_form_rec$results))
)
stop("CV weights not giving the same results")
set.seed(849)
test_class_loo_model_rec <- train(rec_cls, data = training,
method ="ordinalNet",
trControl = cctrl2,
metric = "Kappa")
if(
!isTRUE(
all.equal(test_class_loo_model_rec$results,
test_class_loo_model$results))
)
stop("CV weights not giving the same results")
tmp <- training
tmp$wts <- wts
weight_rec <- recipe(Class ~ ., data = tmp) %>%
add_role(wts, new_role = "case weight") %>%
step_center(all_predictors()) %>%
step_scale(all_predictors())
set.seed(849)
test_class_cv_weight_rec <- train(weight_rec, data = tmp,
method ="ordinalNet",
trControl = cctrl4,
tuneLength = 1,
metric = "Accuracy")
if(
!isTRUE(
all.equal(test_class_cv_weight_rec$results,
test_class_cv_weight$results))
)
stop("CV weights not giving the same results")
test_levels <- levels(test_class_cv_model)
if(!all(levels(trainY) %in% test_levels))
cat("wrong levels")
#########################################################################
test_class_predictors1 <- predictors(test_class_cv_model)
#########################################################################
test_class_imp <- varImp(test_class_cv_model)
#########################################################################
tests <- grep("test_", ls(), fixed = TRUE, value = TRUE)
sInfo <- sessionInfo()
timestamp_end <- Sys.time()
save(list = c(tests, "sInfo", "timestamp", "timestamp_end"),
file = file.path(getwd(), paste(model, ".RData", sep = "")))
q("no")
| /RegressionTests/Code/ordinalNet.R | no_license | tymenschreuder/caret | R | false | false | 6,142 | r | timestamp <- Sys.time()
library(caret)
library(plyr)
library(recipes)
library(dplyr)
model <- "ordinalNet"
## In case the package or one of its dependencies uses random numbers
## on startup so we'll pre-load the required libraries:
for(i in getModelInfo(model)[[1]]$library)
do.call("requireNamespace", list(package = i))
#########################################################################
set.seed(2)
training <- twoClassSim(100, ordinal = TRUE)
testing <- twoClassSim(500, ordinal = TRUE)
trainX <- training[, -ncol(training)]
trainY <- training$Class
wts <- runif(nrow(trainX))
rec_cls <- recipe(Class ~ ., data = training) %>%
step_center(all_predictors()) %>%
step_scale(all_predictors())
weight_test <- function (data, lev = NULL, model = NULL) {
mean(data$weights)
postResample(data[, "pred"], data[, "obs"])
}
cctrl1 <- trainControl(method = "cv", number = 3, returnResamp = "all",
classProbs = TRUE)
cctrl2 <- trainControl(method = "LOOCV",
classProbs = TRUE)
cctrl3 <- trainControl(method = "none",
classProbs = TRUE)
cctrl4 <- trainControl(method = "cv", number = 3,
summaryFunction = weight_test)
cctrl5 <- trainControl(method = "LOOCV", summaryFunction = weight_test)
set.seed(849)
test_class_cv_model <- train(trainX, trainY,
method ="ordinalNet",
trControl = cctrl1,
metric = "Kappa",
preProc = c("center", "scale"))
set.seed(849)
test_class_cv_form <- train(Class ~ ., data = training,
method ="ordinalNet",
trControl = cctrl1,
metric = "Kappa",
preProc = c("center", "scale"))
test_class_pred <- predict(test_class_cv_model, testing[, -ncol(testing)])
test_class_prob <- predict(test_class_cv_model, testing[, -ncol(testing)], type = "prob")
test_class_pred_form <- predict(test_class_cv_form, testing[, -ncol(testing)])
test_class_prob_form <- predict(test_class_cv_form, testing[, -ncol(testing)], type = "prob")
set.seed(849)
test_class_loo_model <- train(trainX, trainY,
method ="ordinalNet",
trControl = cctrl2,
metric = "Kappa",
preProc = c("center", "scale"))
set.seed(849)
test_class_none_model <- train(trainX, trainY,
method ="ordinalNet",
trControl = cctrl3,
tuneGrid = test_class_cv_model$bestTune,
metric = "Kappa",
preProc = c("center", "scale"))
test_class_none_pred <- predict(test_class_none_model, testing[, -ncol(testing)])
test_class_none_prob <- predict(test_class_none_model, testing[, -ncol(testing)], type = "prob")
set.seed(849)
test_class_cv_weight <- train(trainX, trainY,
weights = wts,
method ="ordinalNet",
trControl = cctrl4,
tuneLength = 1,
metric = "Accuracy",
preProc = c("center", "scale"))
set.seed(849)
test_class_loo_weight <- train(trainX, trainY,
weights = wts,
method ="ordinalNet",
trControl = cctrl5,
tuneLength = 1,
metric = "Accuracy",
preProc = c("center", "scale"))
set.seed(849)
test_class_cv_model <- train(trainX, trainY,
method ="ordinalNet",
trControl = cctrl1,
metric = "Kappa",
preProc = c("center", "scale"))
set.seed(849)
test_class_cv_form_rec <- train(rec_cls, data = training,
method ="ordinalNet",
trControl = cctrl1,
metric = "Kappa")
if(
!isTRUE(
all.equal(test_class_cv_form$results,
test_class_cv_form_rec$results))
)
stop("CV weights not giving the same results")
set.seed(849)
test_class_loo_model_rec <- train(rec_cls, data = training,
method ="ordinalNet",
trControl = cctrl2,
metric = "Kappa")
if(
!isTRUE(
all.equal(test_class_loo_model_rec$results,
test_class_loo_model$results))
)
stop("CV weights not giving the same results")
tmp <- training
tmp$wts <- wts
weight_rec <- recipe(Class ~ ., data = tmp) %>%
add_role(wts, new_role = "case weight") %>%
step_center(all_predictors()) %>%
step_scale(all_predictors())
set.seed(849)
test_class_cv_weight_rec <- train(weight_rec, data = tmp,
method ="ordinalNet",
trControl = cctrl4,
tuneLength = 1,
metric = "Accuracy")
if(
!isTRUE(
all.equal(test_class_cv_weight_rec$results,
test_class_cv_weight$results))
)
stop("CV weights not giving the same results")
test_levels <- levels(test_class_cv_model)
if(!all(levels(trainY) %in% test_levels))
cat("wrong levels")
#########################################################################
test_class_predictors1 <- predictors(test_class_cv_model)
#########################################################################
test_class_imp <- varImp(test_class_cv_model)
#########################################################################
tests <- grep("test_", ls(), fixed = TRUE, value = TRUE)
sInfo <- sessionInfo()
timestamp_end <- Sys.time()
save(list = c(tests, "sInfo", "timestamp", "timestamp_end"),
file = file.path(getwd(), paste(model, ".RData", sep = "")))
q("no")
|
### Atividade prática
## Vamos começar carregando um ambiente previamente criado para esta aula.
## Nas aulas seguintes trabalharemos com fontes de dados em arquivos de formatos diversos.
load("aula-02/data/dados_exercicio.RData")
### 1 ####
## Inicie mostrando uma prévia do conteúdo da variável acessos_alunos
##
## Dica 1: No material sobre estruturas de dados vimos como exibir uma prévia do conteúdo de uma variável com 2 funções diferentes
## Dica 2: Na primeira aula vimos uma função do RStudio que permite visualizar o conteúdo de uma variável, mas neste caso
## quero ver uma saída na Console.
### # ####
str(acessos_alunos)
### 2 ###
## Quantos elementos a variável acessos_alunos possui? Utilize uma função do R que retorna o tamanho da variável.
## Dica: Vimos um exemplo no mesmo material sobre estruturas de dados
### # ###
length(acessos_alunos)
### 3 ###
## Utilizando o seu código de aluno da Uniritter como nome de um valor da lista, imprima uma linha informando quantos acessos
## você fez. A linha deve ser impressa na Console, com um texto que diga o seu código de aluno e o valor conforme o seguinte exemplo:
## "O aluno <alu...> realizou N acessos."
## Dica 1: Utilize a função paste() para composição do texto que será impresso.
## Dica 2: Vimos exemplos disto nos materiais dos tipos numéricos e das estruturas de dados.
### # ###
print(paste("O aluno alu201830282 realizou " , acessos_alunos["alu201830282"] , " acessos."))
### 4 ###
## A operação abaixo cria um vetor com todas as quantidades de acessos por aluno.
acessos <- unlist(acessos_alunos)
## Após a criação deste vetor, determine quantos colegas fizeram mais acessos que você.
## Faça isso em 3 etapas:
## 1. Crie uma variável com o resultado de um teste de comparação (relacional) entre o seu número de acessos e os demais.
## 2. Com uma operação de indexação, crie um outro vetor contendo somente os valores maiores
## 3. Determine o tamanho do vetor da operação 2, imprimindo o resultado na Console
### # ###
str(acessos)
x <- acessos > acessos_alunos$alu201830282
y <- acessos[x]
paste("alu com + acessos: ", length(y))
### 5 ###
## Combine todas as etapas acima em uma única chamada, sem a criação dos vetores auxiliares
### # ###
length(acessos > acessos_alunos["alu201830282"])
### 6 ###
## Agora determine quantos colegas fizeram menos acessos que você.
## Faça isso utilizando a função sum!
## Dica: Lembre que falamos sobre como o R faz conversões implícitas entre o tipo lógico e tipos numéricos
### # ###
sum(acessos_alunos < acessos_alunos$alu201830282)
### 7 ###
## Supondo que eu quero atribuir uma nota de participação baseada na quantidade de acessos, com a seguinte definição:
## - Alunos que não acessaram não recebem nota de participação
## - Alunos que acessaram, mas menos que 10 vezes, recebem 1 ponto
## - Alunos que acessaram 10 vezes ou mais recebem 2 pontos
## Crie um vetor chamado notas com a nota de cada aluno, na mesma ordem do vetor de acessos criado para o exercício 4.
## Dica: Pode ser mais fácil se iniciar o vetor notas como uma cópia do vetor acessos, modificando os valores conforme as regras
## OBSERVAÇÃO :: Não avaliarei participação na forma do enunciado deste exercício.
### # ###
notas <- acessos
notas[which(notas < 1)] <- 0
notas[which(notas > 0 & notas < 10)] <- 1
notas[which(notas > 9)] <- 2
### 8 ###
## Visualização da quantidade de alunos com cada nota de participação. Esta não é uma atividade, apenas uma ilustração de como
## criar uma tabela com esta contagem
table(notas)
### 9 ###
## Abaixo, criei uma versão modificada da lista acessos_alunos, com a inclusão de um acesso convidado.
## Não foi possível determinar o número de acessos por não existir um login para este tipo de acesso.
acessos_alunos_e_guest <- acessos_alunos
acessos_alunos_e_guest$guest <- NA
## Repita as atividades 4, 5, 6, e 7 utilizando o acessos_com_guest no lugar da lista acessos_alunos.
## Tome o devido cuidado de sempre criar variáveis com nomes diferentes das já utilizadas!
acessos_alunos_guest <- acessos_alunos
acessos_alunos_guest$guest <- NA
guest <- unlist(acessos_alunos_guest)
comparacao <- guest[guest != acessos_alunos["alu201830282"]]
comparacao
maior <- guest[guest > acessos_alunos["alu201830282"]]
maior
length(maior)
length(guest[guest > acessos_alunos["alu201830282"]])
menor <- guest[guest < acessos_alunos["alu201830282"]]
sum(menor < acessos_alunos["alu201830282"] )
notag <- guest
notag[which(notag == 0)] <- NA # row numbers
notag[which(notag > 0 & notag < 10)] <- 1
notag[which(notag >= 10)] <- 2
notag
### 10 ###
## Responda as seguintes perguntas:
# 1. Houve modificação no número de alunos com mais e com menos acessos que você?
# Sim, houve alteração.
# 2. Como você conclui que o R trata comparações (operações relacionais) entre valores numéricos e NA?
# O R se tras resultato tanto para valores numéricos quanto pra NA
# 3. Qual o resultado do uso da função sum na presença de NA? Trará o NA no resultado
# O que você conclui sobre a operação de soma de todos os valores de um vetor na presença de NA? O valor NA será retornado
#
# 4. Execute o comando abaixo para ler a documentação da função sum e veja se há como modificar a chamada da função sum na presença
# de NAs. Teste os exemplos da página de help da função sum.
help(sum)
| /aula-02/05_atividade.R | no_license | mauriciolena/data-analysis_with_R-201801 | R | false | false | 5,479 | r | ### Atividade prática
## Vamos começar carregando um ambiente previamente criado para esta aula.
## Nas aulas seguintes trabalharemos com fontes de dados em arquivos de formatos diversos.
load("aula-02/data/dados_exercicio.RData")
### 1 ####
## Inicie mostrando uma prévia do conteúdo da variável acessos_alunos
##
## Dica 1: No material sobre estruturas de dados vimos como exibir uma prévia do conteúdo de uma variável com 2 funções diferentes
## Dica 2: Na primeira aula vimos uma função do RStudio que permite visualizar o conteúdo de uma variável, mas neste caso
## quero ver uma saída na Console.
### # ####
str(acessos_alunos)
### 2 ###
## Quantos elementos a variável acessos_alunos possui? Utilize uma função do R que retorna o tamanho da variável.
## Dica: Vimos um exemplo no mesmo material sobre estruturas de dados
### # ###
length(acessos_alunos)
### 3 ###
## Utilizando o seu código de aluno da Uniritter como nome de um valor da lista, imprima uma linha informando quantos acessos
## você fez. A linha deve ser impressa na Console, com um texto que diga o seu código de aluno e o valor conforme o seguinte exemplo:
## "O aluno <alu...> realizou N acessos."
## Dica 1: Utilize a função paste() para composição do texto que será impresso.
## Dica 2: Vimos exemplos disto nos materiais dos tipos numéricos e das estruturas de dados.
### # ###
print(paste("O aluno alu201830282 realizou " , acessos_alunos["alu201830282"] , " acessos."))
### 4 ###
## A operação abaixo cria um vetor com todas as quantidades de acessos por aluno.
acessos <- unlist(acessos_alunos)
## Após a criação deste vetor, determine quantos colegas fizeram mais acessos que você.
## Faça isso em 3 etapas:
## 1. Crie uma variável com o resultado de um teste de comparação (relacional) entre o seu número de acessos e os demais.
## 2. Com uma operação de indexação, crie um outro vetor contendo somente os valores maiores
## 3. Determine o tamanho do vetor da operação 2, imprimindo o resultado na Console
### # ###
str(acessos)
x <- acessos > acessos_alunos$alu201830282
y <- acessos[x]
paste("alu com + acessos: ", length(y))
### 5 ###
## Combine todas as etapas acima em uma única chamada, sem a criação dos vetores auxiliares
### # ###
length(acessos > acessos_alunos["alu201830282"])
### 6 ###
## Agora determine quantos colegas fizeram menos acessos que você.
## Faça isso utilizando a função sum!
## Dica: Lembre que falamos sobre como o R faz conversões implícitas entre o tipo lógico e tipos numéricos
### # ###
sum(acessos_alunos < acessos_alunos$alu201830282)
### 7 ###
## Supondo que eu quero atribuir uma nota de participação baseada na quantidade de acessos, com a seguinte definição:
## - Alunos que não acessaram não recebem nota de participação
## - Alunos que acessaram, mas menos que 10 vezes, recebem 1 ponto
## - Alunos que acessaram 10 vezes ou mais recebem 2 pontos
## Crie um vetor chamado notas com a nota de cada aluno, na mesma ordem do vetor de acessos criado para o exercício 4.
## Dica: Pode ser mais fácil se iniciar o vetor notas como uma cópia do vetor acessos, modificando os valores conforme as regras
## OBSERVAÇÃO :: Não avaliarei participação na forma do enunciado deste exercício.
### # ###
notas <- acessos
notas[which(notas < 1)] <- 0
notas[which(notas > 0 & notas < 10)] <- 1
notas[which(notas > 9)] <- 2
### 8 ###
## Visualização da quantidade de alunos com cada nota de participação. Esta não é uma atividade, apenas uma ilustração de como
## criar uma tabela com esta contagem
table(notas)
### 9 ###
## Abaixo, criei uma versão modificada da lista acessos_alunos, com a inclusão de um acesso convidado.
## Não foi possível determinar o número de acessos por não existir um login para este tipo de acesso.
acessos_alunos_e_guest <- acessos_alunos
acessos_alunos_e_guest$guest <- NA
## Repita as atividades 4, 5, 6, e 7 utilizando o acessos_com_guest no lugar da lista acessos_alunos.
## Tome o devido cuidado de sempre criar variáveis com nomes diferentes das já utilizadas!
acessos_alunos_guest <- acessos_alunos
acessos_alunos_guest$guest <- NA
guest <- unlist(acessos_alunos_guest)
comparacao <- guest[guest != acessos_alunos["alu201830282"]]
comparacao
maior <- guest[guest > acessos_alunos["alu201830282"]]
maior
length(maior)
length(guest[guest > acessos_alunos["alu201830282"]])
menor <- guest[guest < acessos_alunos["alu201830282"]]
sum(menor < acessos_alunos["alu201830282"] )
notag <- guest
notag[which(notag == 0)] <- NA # row numbers
notag[which(notag > 0 & notag < 10)] <- 1
notag[which(notag >= 10)] <- 2
notag
### 10 ###
## Responda as seguintes perguntas:
# 1. Houve modificação no número de alunos com mais e com menos acessos que você?
# Sim, houve alteração.
# 2. Como você conclui que o R trata comparações (operações relacionais) entre valores numéricos e NA?
# O R se tras resultato tanto para valores numéricos quanto pra NA
# 3. Qual o resultado do uso da função sum na presença de NA? Trará o NA no resultado
# O que você conclui sobre a operação de soma de todos os valores de um vetor na presença de NA? O valor NA será retornado
#
# 4. Execute o comando abaixo para ler a documentação da função sum e veja se há como modificar a chamada da função sum na presença
# de NAs. Teste os exemplos da página de help da função sum.
help(sum)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.wafregional_operations.R
\name{get_byte_match_set}
\alias{get_byte_match_set}
\title{Returns the ByteMatchSet specified by ByteMatchSetId}
\usage{
get_byte_match_set(ByteMatchSetId)
}
\arguments{
\item{ByteMatchSetId}{[required] The \code{ByteMatchSetId} of the ByteMatchSet that you want to get. \code{ByteMatchSetId} is returned by CreateByteMatchSet and by ListByteMatchSets.}
}
\description{
Returns the ByteMatchSet specified by \code{ByteMatchSetId}.
}
\section{Accepted Parameters}{
\preformatted{get_byte_match_set(
ByteMatchSetId = "string"
)
}
}
\examples{
# The following example returns the details of a byte match set with the
# ID exampleIDs3t-46da-4fdb-b8d5-abc321j569j5.
\donttest{get_byte_match_set(
ByteMatchSetId = "exampleIDs3t-46da-4fdb-b8d5-abc321j569j5"
)}
}
| /service/paws.wafregional/man/get_byte_match_set.Rd | permissive | CR-Mercado/paws | R | false | true | 871 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.wafregional_operations.R
\name{get_byte_match_set}
\alias{get_byte_match_set}
\title{Returns the ByteMatchSet specified by ByteMatchSetId}
\usage{
get_byte_match_set(ByteMatchSetId)
}
\arguments{
\item{ByteMatchSetId}{[required] The \code{ByteMatchSetId} of the ByteMatchSet that you want to get. \code{ByteMatchSetId} is returned by CreateByteMatchSet and by ListByteMatchSets.}
}
\description{
Returns the ByteMatchSet specified by \code{ByteMatchSetId}.
}
\section{Accepted Parameters}{
\preformatted{get_byte_match_set(
ByteMatchSetId = "string"
)
}
}
\examples{
# The following example returns the details of a byte match set with the
# ID exampleIDs3t-46da-4fdb-b8d5-abc321j569j5.
\donttest{get_byte_match_set(
ByteMatchSetId = "exampleIDs3t-46da-4fdb-b8d5-abc321j569j5"
)}
}
|
##' Equivalence t-Test Summary Function
##'
##' \code{summary.equivttest} is a summary function for objects of the class \code{equivttest}. This summary function should be paired with the \code{equiv.t.test} function.
##'
##' @seealso \code{\link{equiv.t.test}}
##'
##' @examples
##' # Wellek p 124
##' x=c(10.3,11.3,2,-6.1,6.2,6.8,3.7,-3.3,-3.6,-3.5,13.7,12.6)
##' y=c(3.3,17.7,6.7,11.1,-5.8,6.9,5.8,3,6,3.5,18.7,9.6)
##' eps=c(.5,1)
##' res=equiv.t.test(x,y,eps_sub=eps)
##' summary(res)
##' @export
summary.equivttest <- function(res){
cat("Equivalence t-test \n")
cat(paste0("Input: ", res$intype, ", ", res$std.err.type, " SE = ", round(res$SE,3), "\n"))
cat(paste("T-statistic critical interval:", round(res$critical.const[1],3),"to", round(res$critical.const[2],3)), "\n")
cat(paste("Substantive equivalence CI:", round(res$CI_sub[1],3),"to", round(res$CI_sub[2],3)), "\n")
cat(paste("Standardized equivalence CI:", round(res$CI_std[1],3),"to", round(res$CI_std[2],3)), "\n")
cat(paste0("Reject the null hypothesis? ", paste0(res$rej), ", p-value of ",round(res$p,3), "\n"))
if(!is.na(res$power)) cat(paste("Power of the test =", round(res$power,3)), "\n")
}
| /R/summary_equivttest.R | permissive | jwbowers/equivtest | R | false | false | 1,202 | r | ##' Equivalence t-Test Summary Function
##'
##' \code{summary.equivttest} is a summary function for objects of the class \code{equivttest}. This summary function should be paired with the \code{equiv.t.test} function.
##'
##' @seealso \code{\link{equiv.t.test}}
##'
##' @examples
##' # Wellek p 124
##' x=c(10.3,11.3,2,-6.1,6.2,6.8,3.7,-3.3,-3.6,-3.5,13.7,12.6)
##' y=c(3.3,17.7,6.7,11.1,-5.8,6.9,5.8,3,6,3.5,18.7,9.6)
##' eps=c(.5,1)
##' res=equiv.t.test(x,y,eps_sub=eps)
##' summary(res)
##' @export
summary.equivttest <- function(res){
cat("Equivalence t-test \n")
cat(paste0("Input: ", res$intype, ", ", res$std.err.type, " SE = ", round(res$SE,3), "\n"))
cat(paste("T-statistic critical interval:", round(res$critical.const[1],3),"to", round(res$critical.const[2],3)), "\n")
cat(paste("Substantive equivalence CI:", round(res$CI_sub[1],3),"to", round(res$CI_sub[2],3)), "\n")
cat(paste("Standardized equivalence CI:", round(res$CI_std[1],3),"to", round(res$CI_std[2],3)), "\n")
cat(paste0("Reject the null hypothesis? ", paste0(res$rej), ", p-value of ",round(res$p,3), "\n"))
if(!is.na(res$power)) cat(paste("Power of the test =", round(res$power,3)), "\n")
}
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Wilmington Arrests by Census Tract 2010-2018"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("year", label = "Year of Arrest",
min = 2010, max = 2018, value = 2010, sep = "", animate = animationOptions(interval = 500, loop = TRUE)),
selectInput("data",label = "Demographic", c("Total Arrests", "Black Arrests", "White Arrests")),
selectInput("stat", label = "Statistic", c("None", "Percent Population Arrested","Percent of Total Arrests", "SIR","Poisson Regression"))
),
# Show a plot of the generated distribution
mainPanel(
textOutput("text"),
plotOutput("map"),
tableOutput("table")
)
)
))
| /ui.R | no_license | jbm5582/case_study_4 | R | false | false | 1,177 | r | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Wilmington Arrests by Census Tract 2010-2018"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("year", label = "Year of Arrest",
min = 2010, max = 2018, value = 2010, sep = "", animate = animationOptions(interval = 500, loop = TRUE)),
selectInput("data",label = "Demographic", c("Total Arrests", "Black Arrests", "White Arrests")),
selectInput("stat", label = "Statistic", c("None", "Percent Population Arrested","Percent of Total Arrests", "SIR","Poisson Regression"))
),
# Show a plot of the generated distribution
mainPanel(
textOutput("text"),
plotOutput("map"),
tableOutput("table")
)
)
))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tl_labels.R
\name{tl_labels}
\alias{tl_labels}
\title{tl_labels}
\usage{
tl_labels(data, spss = FALSE)
}
\arguments{
\item{data}{Dataframe from which you want to draw labels}
\item{spss}{Whether the dataset is from SPSS or not. Defaults to FALSE.}
}
\value{
Returns a data object of variable names and associated labels to the environment for other functions to use
}
\description{
Create a dataframe of variables and labels to print in tables
}
\examples{
tl_labels(omnibus0319, SPSS = TRUE)
}
| /man/tl_labels.Rd | no_license | willdebras/topliner | R | false | true | 576 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tl_labels.R
\name{tl_labels}
\alias{tl_labels}
\title{tl_labels}
\usage{
tl_labels(data, spss = FALSE)
}
\arguments{
\item{data}{Dataframe from which you want to draw labels}
\item{spss}{Whether the dataset is from SPSS or not. Defaults to FALSE.}
}
\value{
Returns a data object of variable names and associated labels to the environment for other functions to use
}
\description{
Create a dataframe of variables and labels to print in tables
}
\examples{
tl_labels(omnibus0319, SPSS = TRUE)
}
|
##Version Updates
#Version 1: everything is new!
#Someday, I will describe the model here
###Notes for Alexander###
##Things that need fixing:
#~~noticed that detour sometimes goes to a resource further away than AG destinations. This should not be happening.
##Things to improve:
#~~Additive gravity rule (weight of j vs. G)
#~~Equation for when primates leave a patch
#~~Clustering and other spatial statistics of resources
#~~Magic numbers removed and added as parameters (extraction_rate)
##Things to add:
#~~resource birth/death
#~~new rules for starting locations
#~~limited LTM, LTM updating by sight
#~~increase functionality with multiple primates
#~~rank order behaviors and other group behavior rules
##Things to consider
#~~Detour will go to any resource closer than AG, consider requiring detour to move toward AG
#~~implementation of NNR could be informative, could also unnecessarily slow processing time
#####################################SET-UP##################################################
library(plyr)
library(calibrate)
library(graphics)
defaultEnergy = 0 #energy that primates start with by default. Can be overwritten during primate creation
fieldSize = 10 #maximum absolute value for x and y coordinates of a resource. Model currently only supports square fields
resourceAvail = 600 #average total resource availability. Expected mean resource size will be this value divided by resource num
resSizeRange = 7 #size by which any resource is able to deviate from the expected mean. Resource size is a random uniform draw.
resourceNum = 20 #number of resources generated
cluster = 0.2 #probability that a new resource will be created between 1 and 2 units away from the previous resource
depletionIndex = 0 #scales the diminishing return factor of resources. At 0, resource density has no impact on foraging return. At 1, foraging return is multiplied by the density
k = 1 #distance exponent in gravity model. The simple gravity score is Size/ Distance ^ k
primateSpeed = 1 #how many units a primate can move per step
groupSizes = c(1) #each element of the vector represents a group, the value is the size of the group. Currently, all individuals are created on the same point, making more than one individual indistinguisable from increasing the foraging rate of a single individual. plotEnergy function does not handle multiple primates yet
moveCost = 2 #energy decrease each step a primate moves
metabCost = 2 #energy decrease each step. If primate moves, this is added to moveCost. Also currently used to determine if a primate should leave current resource (primate leaves when expected foraging return is less than this value)
rRegen = c(min = 0, max = 0) #determines density increase of resources for each step. Resources are assigned a value uniformly drawn from this range during environment creation. To scale regen rate to resource value, manually change in fillRegen_rate function
types = c("AG", "hybrid") #movement models to simulate across all created environments. Model can handle vector of any length, but currently only has methods for "AG" and "hybrid"
P = list(defEn = defaultEnergy, fieldSize = fieldSize, resAvail = resourceAvail, resSR = resSizeRange, resNum = resourceNum, clus = cluster, depI = depletionIndex, k = k, pSpeed = 1, grSizes = groupSizes, mCost = moveCost, metabCost = metabCost, rRegen = rRegen, types = types) #saves all parameters to a list that can easily be passed to functions
###################################FUNCTIONS####################################################
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Environment Creation~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
createEnviron = function(P) {
#input: List of parameters
#output: data frame with number of rows = resourceNum and capital letters for row names.
#data frame created contains all resource information including coordinates, return_value, extract_rate, density (starts at 1), and regen_rate. If more than 26 resources, row-names will become NA which will affect some functions in model
environ = data.frame(matrix(0, nrow = P$resNum, ncol = 5, dimnames = list(LETTERS[1:P$resNum], c("coordinates", "return_value", "extract_rate", "density", "regen_rate"))))
environ$coordinates = vector("list", length(environ$coordinates)) #changes coordinates column to a list (allows mapply to fill each element with a vector)
for (i in seq(nrow(environ))) environ$coordinates[[i]] = fillCoords(environ$coordinates, i, P)[[i]]
environ$return_value = sapply(environ$return_value, function(x) round(runif(n = 1, min = P$resAvail/P$resNum - P$resSR, max = P$resAvail/P$resNum + P$resSR), digits = 0))
environ$extract_rate = 1 #If changed from one, add as a parameter
environ$density = 1
environ$regen_rate = mapply(fillRegen_rate, environ$regen_rate, MoreArgs = list(P = P))
environ
}
fillCoords = function(coords, i, P) {
#input: a list of coordinates (can be empty), index to fill, and list of parameters
#output: new list of coordinates
#only generates one set of coordinates, but takes whole list as input so previous coordinates can be referenced. element of list prior to i should be filled
#note* The output is a whole list of corrdinates, so be sure to index the result if you only want the coordinates that were generated.
if (i == 1) {coords[[i]] = c(x = round(runif(n = 1, min = -P$fieldSize, max = P$fieldSize), digits = 2), y = round(runif(n = 1, min = -P$fieldSize, max = P$fieldSize), digits = 2)) #First element of list is generated randomly. Adbsolute value of generated coordinates mus be less than field size
} else {
while(! min(mapply(getDist, c1 = coords[-i], MoreArgs = list(c2 = coords[i])), na.rm = TRUE) > 1 || min(mapply(getDist, c1 = coords[-i], MoreArgs = list(c2 = coords[i])), na.rm = TRUE) == Inf) { #Prevents resources from being created too close to others. If the minumum distance of the current coordinate from any other coordinate is < 1(works if current coordinate has not been assigned) assign a new value to the current coordinate (Second condition allows for vector of NA's)
if (runif(n = 1) >= P$clus) { coords[[i]] = c(x = round(runif(n = 1, min = -P$fieldSize, max = P$fieldSize), digits = 2), y = round(runif(n = 1, min = -P$fieldSize, max = P$fieldSize), digits = 2)) #**NEEDS ATTENTION**runs probability of clustering, and if TRUE new resource x AND y coordinates must be between 1 and 2 units from previous resource. Should limit total distance instead
}else coords[[i]] = c(round(coords[[i-1]]["x"] + 1 + runif(n = 1, min = -1, max = 1), digits = 2), round(coords[[i-1]]["y"] + 1 + runif(n = 1, min = -1, max = 1), digits = 2))
}
}
coords
}
fillRegen_rate = function(x, P) {
#generates a single regenration rate using parameters. Can probably be integrated with createEnviron
x = round(runif(n = 1, min = P$rRegen["min"], max = P$rRegen["max"]), digits = 4)
x
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Primate Creation~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
primateValidity = function(object) {
#Checks that input values of a newly created primate are valid. Returns appropriate error messages in cases where they are not
#NEEDS EXPANDING
if (! length(object@location) == 2) {return("location contains more than 2 coordinates")
if (! length(object@energy) == 1) return("energy is not a single value") }
else TRUE
}
newPrimate = setClass("primate", slots = c(location = "numeric", LTM = "data.frame", energy = "numeric", forageEf = "numeric", speed = "numeric", type = "character"), validity = primateValidity) #creates the primate class of objects
#*note there are currently no primate specific methods. This exists as a class mostly to allow expansion of subclasses by movement type or dominance in future versions
buildPrimate = function(location, environ, P, type) {
#input: location create a new primate, environment in which to create primate, model parameters, and type of movement primate should use
#output: an object of class primate
#most slots are filled from the parameters list. If forage efficiency is change, add as a parameter.
#LTM filled from environment, with columns added for gravity and j for location. *NOTE* Future version may implement limited memory size, which will need a more sophistacted LTM creation
primate = newPrimate(location = location, LTM = environ, energy = P$defEn, forageEf = 0.2, speed = P$pSpeed, type = type)
primate@LTM$gravity = mapply(calcGravity, coordinates = primate@LTM$coordinates,
return_value = primate@LTM$return_value, density = primate@LTM$density, MoreArgs= list(location = primate@location, k = P$k))
primate@LTM$j = mapply(calc_j, i = 1:nrow(primate@LTM),
MoreArgs = list(coords = primate@LTM$coordinates, values = primate@LTM$return_value, dens = primate@LTM$density, k = P$k))
primate@LTM = transform(primate@LTM, AG = gravity + j/nrow(primate@LTM))
primate
}
createGroup = function(grSize, environ, P, type) {
#allows groups of primates to be created in seperate lists. This currently has no functionality, but allows for easier addition of group behavior rules in later versions
group = replicate(grSize, buildPrimate(location = environ$coordinates[[which(environ$return_value == max(environ$return_value))[1]]], environ = environ, P = P, type = type)) #places all primates on most valueable resource
group
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Calculations and Tools~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
calcForage = function(primate, environ, P) {
patch = which(environ$coordinates %in% list(primate@location))
if (environ$density[patch] < primate@forageEf) { 0 #disallows foraging if patch density is lower than primates foraging efficiency
} else environ$return_value[patch] * primate@forageEf * environ$extract_rate[patch] * (environ$density[patch]/(P$depI + (1 - P$depI) * environ$density[patch]))
}
calcGravity = function(location, coordinates, return_value, density, k) {
#*note* uses actual coordinates for location rather than primate object because resource coodinates are also used as inout when calculating j.
distance = getDist(location, coordinates)
mass = return_value * density
if (distance == 0) return(NA)
if (distance < 1) {return(mass) #moving to resource takes full step, won't divide by less than 1
} else return(mass /(distance ** k))
}
calc_j = function(i, coords, values, dens, k) {
sum(mapply(calcGravity, coordinates = coords, return_value = values, density = dens, MoreArgs = list(location = coords[[i]], k = k)), na.rm = TRUE)
}
calcDetour = function(primate, possDetours, AGPatch, P) {
LTM = primate@LTM[possDetours,]
target = sampleMod(which(LTM$dist == min(LTM$dist)))
addRes = LTM$return_value[target] * LTM$density[target]
addTrav = (P$mCost + P$metabCost) * (getDist(primate@location, LTM$coordinates[[target]]) + getDist(LTM$coordinates[[target]], LTM$coordinates[[which(row.names(LTM) == AGPatch)]]) - getDist(primate@location, LTM$coordinates[[which(row.names(LTM) == AGPatch)]]))
if (addRes >= addTrav) {return(LTM$coordinates[[target]])
} else calcDetour(primate, possDetours[-which(possDetours == row.names(LTM[target,]))], AGPatch, P) #Recursion! If closest patch not an acceptable detour, try next closest patch
}
getDist = function(c1, c2) {
#input: two sets of coordinates. x and y coordinates must be names
#output: Euclidean distance between input points
c1 = unlist(c1)
c2 = unlist(c2)
if (! is.numeric(c1) || ! is.numeric(c2)) return(NA)
if (identical(c1, c2)) return(0)
unname(sqrt(((c1["x"] - c2["x"]) ** 2) + ((c1["y"] - c2["y"]) ** 2))) #pythagorean theorem
}
sampleMod = function(x) {
#modifies the built in sample function so an input of a single value returns that value rather than sampling all integers between zero and input
if (length(x) == 1 ) return(x) else return(sample(x, size = 1))
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Step Actions~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
runStep = function(prevStep) {
#fully updates data by one step
#*NOTE* returns NA if no valid action available (most commonly meaning all resources are depleted past the point that foraging outways metabolism cost)
environ = prevStep$environ
allPrimates = prevStep$allPrimates
P = prevStep$P
for (i in seq(length(allPrimates))) { #execute step action for each primate
for (j in seq(length(allPrimates[[i]]))) {
primate = allPrimates[[i]][[j]]
action = decideAction(primate, environ, P)
if (action == "move") {primate = move(primate, environ, P)
primate@energy = primate@energy - P$mCost
} else {
if (action == "forage") {
forageReturn = calcForage(primate, environ, P)
primate@energy = primate@energy + forageReturn
patch = which(environ$coordinates %in% list(primate@location))
environ$density[patch] = round(environ$density[patch] - primate@forageEf, digits = 3) #*NOTE* depI and extraction rate are calculated in forage return, but not here for desnity depletion. Consider updating
} else stop("invalid action")
}
allPrimates[[i]][[j]] = primate
}
}
for (i in seq(length(allPrimates))) { #updates each primates LTM and energy after all primates have taken action
for (j in seq(length(allPrimates[[i]]))) {
allPrimates[[i]][[j]]@energy = allPrimates[[i]][[j]]@energy - P$metabCost #this line could be moved to previous set of for loops
allPrimates[[i]][[j]]@LTM = updateLTM(allPrimates[[i]][[j]], environ)
}
}
environ = transform(environ, density = round(density + regen_rate, digits = 4)) #resource regeneration
environ$density = sapply(environ$density, function(x) if (x > 1) 1 else x) #limits density to 1 *NOTE* may also need statment to not allow density below 0 if new rules are implemented for staying at a resource
list(allPrimates = allPrimates, environ = environ, P = P)
}
decideAction = function(primate, environ, P) {
if (! list(primate@location) %in% environ$coordinates) return("move")
if (calcForage(primate, environ, P) < P$metabCost) { return("move")
} else return("forage")
}
move = function(primate, environ, P) {
if (primate@type == "AG") {dest = getDestAG(primate)
} else { if (primate@type == "hybrid") {dest = getDestHybrid(primate, P)
} else stop("invalid movement type")
}
if (getDist(primate@location, dest) <= 1) {primate@location = dest
} else primate@location = updateLoc(primate, dest)
primate
}
getDestAG = function(primate) {
dest = primate@LTM$coordinates[[sampleMod(which(primate@LTM$AG == max(primate@LTM$AG, na.rm = TRUE)))]]
}
getDestHybrid = function(primate, P) {
AGPatch = row.names(primate@LTM[sampleMod(which(primate@LTM$AG == max(primate@LTM$AG, na.rm = TRUE))),])
primate@LTM$dist = mapply(getDist, c1 = primate@LTM$coordinates, MoreArgs = list(c2 = primate@location))
rCurLoc = primate@LTM[which(primate@LTM$dist > 0),] #LTM without resource of current location
rLowDens = rCurLoc[which(rCurLoc$density > primate@forageEf),]
possDetours = row.names(rLowDens[which(rLowDens$dist <= rLowDens[AGPatch, "dist"]),])
dest = calcDetour(primate, possDetours, AGPatch, P)
dest
}
updateLoc = function(primate, dest) {
heading = unname(atan2(x = dest["x"] - primate@location["x"], y = dest["y"] - primate@location["y"]))
moveVector = c(x = cos(heading) * primate@speed, y = sin(heading) * primate@speed)
primate@location + moveVector
}
updateLTM = function(primate, environ) {
primate@LTM$density = environ$density
primate@LTM$gravity = mapply(calcGravity, coordinates = primate@LTM$coordinates,
return_value = primate@LTM$return_value, density = primate@LTM$density, MoreArgs= list(location = primate@location, k = P$k))
primate@LTM$j = mapply(calc_j, i = 1:nrow(primate@LTM),
MoreArgs = list(coords = primate@LTM$coordinates, values = primate@LTM$return_value, dens = primate@LTM$density, k = P$k))
primate@LTM = transform(primate@LTM, AG = gravity + j/nrow(primate@LTM))
primate@LTM
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Run Functions~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
initialize = function(P, environ, type) {
allPrimates = as.list(P$grSizes)
allPrimates = mapply(createGroup, allPrimates, MoreArgs = list(environ = environ, P = P, type = type), SIMPLIFY = FALSE)
initialStep = list(allPrimates = allPrimates, environ = environ, P = P)
initialStep
}
runSimulation = function(P, nsteps) {
allTypes = vector("list", length(types))
names(allTypes) = P$types
initEnviron = createEnviron(P)
for (type in P$types) {
allTypes[[type]] = vector("list", nsteps)
allTypes[[type]][[1]] = initialize(P, initEnviron, type)
n = 2
while (n <= nsteps && max(allTypes[[type]][[n - 1]]$environ$density) >= allTypes[[type]][[n - 1]]$allPrimates[[1]][[1]]@forageEf) {
allTypes[[type]][[n]] = runStep(allTypes[[type]][[n - 1]])
n = n + 1
}
}
allTypes
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Plotting and Data Analysis~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
plotRun = function(run) {
#Plots an entire data set one step at a time. One plot in graphics window for each movement type simulated plus another for trace of energy in each type
#Stops if input is NA, generally indicating the primates has consumed all available resources in a time step.
par(mfcol = c(1, length(run) + 1))
for (i in seq(length(run[[1]]))) {
for (j in seq(length(run))) {
plotStep(run[[j]][[i]], i)
}
plotEnergy(run, i)
Sys.sleep(1)
}
}
plotStep = function(step, stepNum) {
resX = vector("list", nrow(step$environ))
resX = mapply(function(x, coords) coords["x"], x = resX, coords = step$environ$coordinates)
resY = vector("list", nrow(step$environ))
resY = mapply(function(y, coords) coords["y"], y = resY, coords = step$environ$coordinates)
primX = vector("list", length(unlist(step$allPrimates)))
primX = mapply(function(x, prim) prim@location["x"], x = primX, prim = unlist(step$allPrimates))
primY = vector("list", length(unlist(step$allPrimates)))
primY = mapply(function(y, prim) prim@location["y"], y = primY, prim = unlist(step$allPrimates))
energies = vector("list", length(unlist(step$allPrimates)))
energies = mapply(function(x, prim) prim@energy, x = energies, prim = unlist(step$allPrimates))
plot(x = resX, y = resY, main = paste(step$allPrimates[[1]][[1]]@type, "step", stepNum, sep = " "))
points(x = primX, y = primY, col = 2, pch = 4)
textxy(primX, primY, energies, col = 2, cex = 1)
}
plotEnergy = function(run, steps = 0) {
#Does not work for more than one primate per step. Consider using sum(unlist(allPrimates)) to implement this functionality
if (steps == 0) steps = length(run[[1]])
if (steps == 1) {plot(x = seq(2), y = c(0, 0), type = "l")
} else {
energies = vector("list", length(run))
run = lapply(run, function(run) run[1:steps])
energies = mapply(mapply, step = run, MoreArgs = list(FUN = function(step) step$allPrimates[[1]][[1]]@energy))
plot(energies[,1], type = "l")
for (i in 2:length(run)) lines(x = energies[,i], col = i)
}
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
####################################Run Scripts################################################
#~~~~~~Simple~~~~~~~~#
#uncomment following lines to run
nsteps = 100
run = runSimulation(P, nsteps)
plotRun(run)
| /navigation sim v1.r | no_license | aqvining/Animal-nav-abm | R | false | false | 20,230 | r | ##Version Updates
#Version 1: everything is new!
#Someday, I will describe the model here
###Notes for Alexander###
##Things that need fixing:
#~~noticed that detour sometimes goes to a resource further away than AG destinations. This should not be happening.
##Things to improve:
#~~Additive gravity rule (weight of j vs. G)
#~~Equation for when primates leave a patch
#~~Clustering and other spatial statistics of resources
#~~Magic numbers removed and added as parameters (extraction_rate)
##Things to add:
#~~resource birth/death
#~~new rules for starting locations
#~~limited LTM, LTM updating by sight
#~~increase functionality with multiple primates
#~~rank order behaviors and other group behavior rules
##Things to consider
#~~Detour will go to any resource closer than AG, consider requiring detour to move toward AG
#~~implementation of NNR could be informative, could also unnecessarily slow processing time
#####################################SET-UP##################################################
library(plyr)
library(calibrate)
library(graphics)
defaultEnergy = 0 #energy that primates start with by default. Can be overwritten during primate creation
fieldSize = 10 #maximum absolute value for x and y coordinates of a resource. Model currently only supports square fields
resourceAvail = 600 #average total resource availability. Expected mean resource size will be this value divided by resource num
resSizeRange = 7 #size by which any resource is able to deviate from the expected mean. Resource size is a random uniform draw.
resourceNum = 20 #number of resources generated
cluster = 0.2 #probability that a new resource will be created between 1 and 2 units away from the previous resource
depletionIndex = 0 #scales the diminishing return factor of resources. At 0, resource density has no impact on foraging return. At 1, foraging return is multiplied by the density
k = 1 #distance exponent in gravity model. The simple gravity score is Size/ Distance ^ k
primateSpeed = 1 #how many units a primate can move per step
groupSizes = c(1) #each element of the vector represents a group, the value is the size of the group. Currently, all individuals are created on the same point, making more than one individual indistinguisable from increasing the foraging rate of a single individual. plotEnergy function does not handle multiple primates yet
moveCost = 2 #energy decrease each step a primate moves
metabCost = 2 #energy decrease each step. If primate moves, this is added to moveCost. Also currently used to determine if a primate should leave current resource (primate leaves when expected foraging return is less than this value)
rRegen = c(min = 0, max = 0) #determines density increase of resources for each step. Resources are assigned a value uniformly drawn from this range during environment creation. To scale regen rate to resource value, manually change in fillRegen_rate function
types = c("AG", "hybrid") #movement models to simulate across all created environments. Model can handle vector of any length, but currently only has methods for "AG" and "hybrid"
P = list(defEn = defaultEnergy, fieldSize = fieldSize, resAvail = resourceAvail, resSR = resSizeRange, resNum = resourceNum, clus = cluster, depI = depletionIndex, k = k, pSpeed = 1, grSizes = groupSizes, mCost = moveCost, metabCost = metabCost, rRegen = rRegen, types = types) #saves all parameters to a list that can easily be passed to functions
###################################FUNCTIONS####################################################
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Environment Creation~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
createEnviron = function(P) {
#input: List of parameters
#output: data frame with number of rows = resourceNum and capital letters for row names.
#data frame created contains all resource information including coordinates, return_value, extract_rate, density (starts at 1), and regen_rate. If more than 26 resources, row-names will become NA which will affect some functions in model
environ = data.frame(matrix(0, nrow = P$resNum, ncol = 5, dimnames = list(LETTERS[1:P$resNum], c("coordinates", "return_value", "extract_rate", "density", "regen_rate"))))
environ$coordinates = vector("list", length(environ$coordinates)) #changes coordinates column to a list (allows mapply to fill each element with a vector)
for (i in seq(nrow(environ))) environ$coordinates[[i]] = fillCoords(environ$coordinates, i, P)[[i]]
environ$return_value = sapply(environ$return_value, function(x) round(runif(n = 1, min = P$resAvail/P$resNum - P$resSR, max = P$resAvail/P$resNum + P$resSR), digits = 0))
environ$extract_rate = 1 #If changed from one, add as a parameter
environ$density = 1
environ$regen_rate = mapply(fillRegen_rate, environ$regen_rate, MoreArgs = list(P = P))
environ
}
fillCoords = function(coords, i, P) {
#input: a list of coordinates (can be empty), index to fill, and list of parameters
#output: new list of coordinates
#only generates one set of coordinates, but takes whole list as input so previous coordinates can be referenced. element of list prior to i should be filled
#note* The output is a whole list of corrdinates, so be sure to index the result if you only want the coordinates that were generated.
if (i == 1) {coords[[i]] = c(x = round(runif(n = 1, min = -P$fieldSize, max = P$fieldSize), digits = 2), y = round(runif(n = 1, min = -P$fieldSize, max = P$fieldSize), digits = 2)) #First element of list is generated randomly. Adbsolute value of generated coordinates mus be less than field size
} else {
while(! min(mapply(getDist, c1 = coords[-i], MoreArgs = list(c2 = coords[i])), na.rm = TRUE) > 1 || min(mapply(getDist, c1 = coords[-i], MoreArgs = list(c2 = coords[i])), na.rm = TRUE) == Inf) { #Prevents resources from being created too close to others. If the minumum distance of the current coordinate from any other coordinate is < 1(works if current coordinate has not been assigned) assign a new value to the current coordinate (Second condition allows for vector of NA's)
if (runif(n = 1) >= P$clus) { coords[[i]] = c(x = round(runif(n = 1, min = -P$fieldSize, max = P$fieldSize), digits = 2), y = round(runif(n = 1, min = -P$fieldSize, max = P$fieldSize), digits = 2)) #**NEEDS ATTENTION**runs probability of clustering, and if TRUE new resource x AND y coordinates must be between 1 and 2 units from previous resource. Should limit total distance instead
}else coords[[i]] = c(round(coords[[i-1]]["x"] + 1 + runif(n = 1, min = -1, max = 1), digits = 2), round(coords[[i-1]]["y"] + 1 + runif(n = 1, min = -1, max = 1), digits = 2))
}
}
coords
}
fillRegen_rate = function(x, P) {
#generates a single regenration rate using parameters. Can probably be integrated with createEnviron
x = round(runif(n = 1, min = P$rRegen["min"], max = P$rRegen["max"]), digits = 4)
x
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Primate Creation~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
primateValidity = function(object) {
#Checks that input values of a newly created primate are valid. Returns appropriate error messages in cases where they are not
#NEEDS EXPANDING
if (! length(object@location) == 2) {return("location contains more than 2 coordinates")
if (! length(object@energy) == 1) return("energy is not a single value") }
else TRUE
}
newPrimate = setClass("primate", slots = c(location = "numeric", LTM = "data.frame", energy = "numeric", forageEf = "numeric", speed = "numeric", type = "character"), validity = primateValidity) #creates the primate class of objects
#*note there are currently no primate specific methods. This exists as a class mostly to allow expansion of subclasses by movement type or dominance in future versions
buildPrimate = function(location, environ, P, type) {
#input: location create a new primate, environment in which to create primate, model parameters, and type of movement primate should use
#output: an object of class primate
#most slots are filled from the parameters list. If forage efficiency is change, add as a parameter.
#LTM filled from environment, with columns added for gravity and j for location. *NOTE* Future version may implement limited memory size, which will need a more sophistacted LTM creation
primate = newPrimate(location = location, LTM = environ, energy = P$defEn, forageEf = 0.2, speed = P$pSpeed, type = type)
primate@LTM$gravity = mapply(calcGravity, coordinates = primate@LTM$coordinates,
return_value = primate@LTM$return_value, density = primate@LTM$density, MoreArgs= list(location = primate@location, k = P$k))
primate@LTM$j = mapply(calc_j, i = 1:nrow(primate@LTM),
MoreArgs = list(coords = primate@LTM$coordinates, values = primate@LTM$return_value, dens = primate@LTM$density, k = P$k))
primate@LTM = transform(primate@LTM, AG = gravity + j/nrow(primate@LTM))
primate
}
createGroup = function(grSize, environ, P, type) {
#allows groups of primates to be created in seperate lists. This currently has no functionality, but allows for easier addition of group behavior rules in later versions
group = replicate(grSize, buildPrimate(location = environ$coordinates[[which(environ$return_value == max(environ$return_value))[1]]], environ = environ, P = P, type = type)) #places all primates on most valueable resource
group
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Calculations and Tools~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
calcForage = function(primate, environ, P) {
patch = which(environ$coordinates %in% list(primate@location))
if (environ$density[patch] < primate@forageEf) { 0 #disallows foraging if patch density is lower than primates foraging efficiency
} else environ$return_value[patch] * primate@forageEf * environ$extract_rate[patch] * (environ$density[patch]/(P$depI + (1 - P$depI) * environ$density[patch]))
}
calcGravity = function(location, coordinates, return_value, density, k) {
#*note* uses actual coordinates for location rather than primate object because resource coodinates are also used as inout when calculating j.
distance = getDist(location, coordinates)
mass = return_value * density
if (distance == 0) return(NA)
if (distance < 1) {return(mass) #moving to resource takes full step, won't divide by less than 1
} else return(mass /(distance ** k))
}
calc_j = function(i, coords, values, dens, k) {
sum(mapply(calcGravity, coordinates = coords, return_value = values, density = dens, MoreArgs = list(location = coords[[i]], k = k)), na.rm = TRUE)
}
calcDetour = function(primate, possDetours, AGPatch, P) {
LTM = primate@LTM[possDetours,]
target = sampleMod(which(LTM$dist == min(LTM$dist)))
addRes = LTM$return_value[target] * LTM$density[target]
addTrav = (P$mCost + P$metabCost) * (getDist(primate@location, LTM$coordinates[[target]]) + getDist(LTM$coordinates[[target]], LTM$coordinates[[which(row.names(LTM) == AGPatch)]]) - getDist(primate@location, LTM$coordinates[[which(row.names(LTM) == AGPatch)]]))
if (addRes >= addTrav) {return(LTM$coordinates[[target]])
} else calcDetour(primate, possDetours[-which(possDetours == row.names(LTM[target,]))], AGPatch, P) #Recursion! If closest patch not an acceptable detour, try next closest patch
}
getDist = function(c1, c2) {
#input: two sets of coordinates. x and y coordinates must be names
#output: Euclidean distance between input points
c1 = unlist(c1)
c2 = unlist(c2)
if (! is.numeric(c1) || ! is.numeric(c2)) return(NA)
if (identical(c1, c2)) return(0)
unname(sqrt(((c1["x"] - c2["x"]) ** 2) + ((c1["y"] - c2["y"]) ** 2))) #pythagorean theorem
}
sampleMod = function(x) {
#modifies the built in sample function so an input of a single value returns that value rather than sampling all integers between zero and input
if (length(x) == 1 ) return(x) else return(sample(x, size = 1))
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Step Actions~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
runStep = function(prevStep) {
#fully updates data by one step
#*NOTE* returns NA if no valid action available (most commonly meaning all resources are depleted past the point that foraging outways metabolism cost)
environ = prevStep$environ
allPrimates = prevStep$allPrimates
P = prevStep$P
for (i in seq(length(allPrimates))) { #execute step action for each primate
for (j in seq(length(allPrimates[[i]]))) {
primate = allPrimates[[i]][[j]]
action = decideAction(primate, environ, P)
if (action == "move") {primate = move(primate, environ, P)
primate@energy = primate@energy - P$mCost
} else {
if (action == "forage") {
forageReturn = calcForage(primate, environ, P)
primate@energy = primate@energy + forageReturn
patch = which(environ$coordinates %in% list(primate@location))
environ$density[patch] = round(environ$density[patch] - primate@forageEf, digits = 3) #*NOTE* depI and extraction rate are calculated in forage return, but not here for desnity depletion. Consider updating
} else stop("invalid action")
}
allPrimates[[i]][[j]] = primate
}
}
for (i in seq(length(allPrimates))) { #updates each primates LTM and energy after all primates have taken action
for (j in seq(length(allPrimates[[i]]))) {
allPrimates[[i]][[j]]@energy = allPrimates[[i]][[j]]@energy - P$metabCost #this line could be moved to previous set of for loops
allPrimates[[i]][[j]]@LTM = updateLTM(allPrimates[[i]][[j]], environ)
}
}
environ = transform(environ, density = round(density + regen_rate, digits = 4)) #resource regeneration
environ$density = sapply(environ$density, function(x) if (x > 1) 1 else x) #limits density to 1 *NOTE* may also need statment to not allow density below 0 if new rules are implemented for staying at a resource
list(allPrimates = allPrimates, environ = environ, P = P)
}
decideAction = function(primate, environ, P) {
if (! list(primate@location) %in% environ$coordinates) return("move")
if (calcForage(primate, environ, P) < P$metabCost) { return("move")
} else return("forage")
}
move = function(primate, environ, P) {
if (primate@type == "AG") {dest = getDestAG(primate)
} else { if (primate@type == "hybrid") {dest = getDestHybrid(primate, P)
} else stop("invalid movement type")
}
if (getDist(primate@location, dest) <= 1) {primate@location = dest
} else primate@location = updateLoc(primate, dest)
primate
}
getDestAG = function(primate) {
dest = primate@LTM$coordinates[[sampleMod(which(primate@LTM$AG == max(primate@LTM$AG, na.rm = TRUE)))]]
}
getDestHybrid = function(primate, P) {
AGPatch = row.names(primate@LTM[sampleMod(which(primate@LTM$AG == max(primate@LTM$AG, na.rm = TRUE))),])
primate@LTM$dist = mapply(getDist, c1 = primate@LTM$coordinates, MoreArgs = list(c2 = primate@location))
rCurLoc = primate@LTM[which(primate@LTM$dist > 0),] #LTM without resource of current location
rLowDens = rCurLoc[which(rCurLoc$density > primate@forageEf),]
possDetours = row.names(rLowDens[which(rLowDens$dist <= rLowDens[AGPatch, "dist"]),])
dest = calcDetour(primate, possDetours, AGPatch, P)
dest
}
updateLoc = function(primate, dest) {
heading = unname(atan2(x = dest["x"] - primate@location["x"], y = dest["y"] - primate@location["y"]))
moveVector = c(x = cos(heading) * primate@speed, y = sin(heading) * primate@speed)
primate@location + moveVector
}
updateLTM = function(primate, environ) {
primate@LTM$density = environ$density
primate@LTM$gravity = mapply(calcGravity, coordinates = primate@LTM$coordinates,
return_value = primate@LTM$return_value, density = primate@LTM$density, MoreArgs= list(location = primate@location, k = P$k))
primate@LTM$j = mapply(calc_j, i = 1:nrow(primate@LTM),
MoreArgs = list(coords = primate@LTM$coordinates, values = primate@LTM$return_value, dens = primate@LTM$density, k = P$k))
primate@LTM = transform(primate@LTM, AG = gravity + j/nrow(primate@LTM))
primate@LTM
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Run Functions~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
initialize = function(P, environ, type) {
allPrimates = as.list(P$grSizes)
allPrimates = mapply(createGroup, allPrimates, MoreArgs = list(environ = environ, P = P, type = type), SIMPLIFY = FALSE)
initialStep = list(allPrimates = allPrimates, environ = environ, P = P)
initialStep
}
runSimulation = function(P, nsteps) {
allTypes = vector("list", length(types))
names(allTypes) = P$types
initEnviron = createEnviron(P)
for (type in P$types) {
allTypes[[type]] = vector("list", nsteps)
allTypes[[type]][[1]] = initialize(P, initEnviron, type)
n = 2
while (n <= nsteps && max(allTypes[[type]][[n - 1]]$environ$density) >= allTypes[[type]][[n - 1]]$allPrimates[[1]][[1]]@forageEf) {
allTypes[[type]][[n]] = runStep(allTypes[[type]][[n - 1]])
n = n + 1
}
}
allTypes
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Plotting and Data Analysis~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
plotRun = function(run) {
#Plots an entire data set one step at a time. One plot in graphics window for each movement type simulated plus another for trace of energy in each type
#Stops if input is NA, generally indicating the primates has consumed all available resources in a time step.
par(mfcol = c(1, length(run) + 1))
for (i in seq(length(run[[1]]))) {
for (j in seq(length(run))) {
plotStep(run[[j]][[i]], i)
}
plotEnergy(run, i)
Sys.sleep(1)
}
}
plotStep = function(step, stepNum) {
resX = vector("list", nrow(step$environ))
resX = mapply(function(x, coords) coords["x"], x = resX, coords = step$environ$coordinates)
resY = vector("list", nrow(step$environ))
resY = mapply(function(y, coords) coords["y"], y = resY, coords = step$environ$coordinates)
primX = vector("list", length(unlist(step$allPrimates)))
primX = mapply(function(x, prim) prim@location["x"], x = primX, prim = unlist(step$allPrimates))
primY = vector("list", length(unlist(step$allPrimates)))
primY = mapply(function(y, prim) prim@location["y"], y = primY, prim = unlist(step$allPrimates))
energies = vector("list", length(unlist(step$allPrimates)))
energies = mapply(function(x, prim) prim@energy, x = energies, prim = unlist(step$allPrimates))
plot(x = resX, y = resY, main = paste(step$allPrimates[[1]][[1]]@type, "step", stepNum, sep = " "))
points(x = primX, y = primY, col = 2, pch = 4)
textxy(primX, primY, energies, col = 2, cex = 1)
}
plotEnergy = function(run, steps = 0) {
#Does not work for more than one primate per step. Consider using sum(unlist(allPrimates)) to implement this functionality
if (steps == 0) steps = length(run[[1]])
if (steps == 1) {plot(x = seq(2), y = c(0, 0), type = "l")
} else {
energies = vector("list", length(run))
run = lapply(run, function(run) run[1:steps])
energies = mapply(mapply, step = run, MoreArgs = list(FUN = function(step) step$allPrimates[[1]][[1]]@energy))
plot(energies[,1], type = "l")
for (i in 2:length(run)) lines(x = energies[,i], col = i)
}
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
####################################Run Scripts################################################
#~~~~~~Simple~~~~~~~~#
#uncomment following lines to run
nsteps = 100
run = runSimulation(P, nsteps)
plotRun(run)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clean_table.R
\name{clean_byGroup}
\alias{clean_byGroup}
\title{clean table by the group}
\usage{
clean_byGroup(meta_table, group)
}
\arguments{
\item{meta_table}{tibble}
\item{group}{i have no idea}
}
\value{
tibble
}
\description{
clean table by the group
}
\examples{
data(combined_table)
clean_table_result <- clean_byGroup(combined_table, treatment)
}
| /man/clean_byGroup.Rd | permissive | luoboqingcai81/metaNIE | R | false | true | 436 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clean_table.R
\name{clean_byGroup}
\alias{clean_byGroup}
\title{clean table by the group}
\usage{
clean_byGroup(meta_table, group)
}
\arguments{
\item{meta_table}{tibble}
\item{group}{i have no idea}
}
\value{
tibble
}
\description{
clean table by the group
}
\examples{
data(combined_table)
clean_table_result <- clean_byGroup(combined_table, treatment)
}
|
# Removes unwanted features from the data, and name the rows
FilterDataColumns <- function(data, featureDim) {
indices <- which(grepl("mean()", featureDim, fixed = T) |
grepl("std()", featureDim, fixed = T))
newData <- data[, indices]
names(newData) <- featureDim[indices]
newData
}
# Replace activity ids by names
CleanActivity <- function(fact, dim) {
activity <- merge(fact, dim, by.x = "activityId", by.y = "id")[, 2, drop = F]
}
# Reads the data in a folder and cleans it
ReadData <- function(dirName, activityDim, featureDim) {
dataFile <- paste(dirName, "/X_", dirName, ".txt", sep = "")
activityFile <- paste(dirName, "/y_", dirName, ".txt", sep = "")
subjectFile <- paste(dirName, "/subject_", dirName, ".txt", sep = "")
data <- FilterDataColumns(read.table(dataFile), featureDim)
activity <- CleanActivity(
read.table(activityFile, col.names = "activityId"), activityDim)
subject <- read.table(subjectFile, col.name = "subjectId")
# Join the data with activity and subject info, dropping the first useless
# column each time
data <- merge(activity, data, by = 0)[-1]
data <- merge(subject, data, by = 0)[-1]
data
}
RunAnalysis <- function() {
# Labels
activityDim <- read.table("activity_labels.txt",
col.names = c("id", "activity"))
featureDim <- read.table("features.txt")[, 2]
# Data
trainData <- ReadData("train", activityDim, featureDim)
testData <- ReadData("test", activityDim, featureDim)
data <- rbind(trainData, testData)
data <- aggregate(data[, -1:-2], by = list(data[, 1], data[, 2]), FUN = mean)
names(data)[1:2] = c("subject", "activity")
data[order(data$subject), ]
}
| /run_analysis.R | no_license | Lordshinjo/getdata | R | false | false | 1,710 | r | # Removes unwanted features from the data, and name the rows
FilterDataColumns <- function(data, featureDim) {
indices <- which(grepl("mean()", featureDim, fixed = T) |
grepl("std()", featureDim, fixed = T))
newData <- data[, indices]
names(newData) <- featureDim[indices]
newData
}
# Replace activity ids by names
CleanActivity <- function(fact, dim) {
activity <- merge(fact, dim, by.x = "activityId", by.y = "id")[, 2, drop = F]
}
# Reads the data in a folder and cleans it
ReadData <- function(dirName, activityDim, featureDim) {
dataFile <- paste(dirName, "/X_", dirName, ".txt", sep = "")
activityFile <- paste(dirName, "/y_", dirName, ".txt", sep = "")
subjectFile <- paste(dirName, "/subject_", dirName, ".txt", sep = "")
data <- FilterDataColumns(read.table(dataFile), featureDim)
activity <- CleanActivity(
read.table(activityFile, col.names = "activityId"), activityDim)
subject <- read.table(subjectFile, col.name = "subjectId")
# Join the data with activity and subject info, dropping the first useless
# column each time
data <- merge(activity, data, by = 0)[-1]
data <- merge(subject, data, by = 0)[-1]
data
}
RunAnalysis <- function() {
# Labels
activityDim <- read.table("activity_labels.txt",
col.names = c("id", "activity"))
featureDim <- read.table("features.txt")[, 2]
# Data
trainData <- ReadData("train", activityDim, featureDim)
testData <- ReadData("test", activityDim, featureDim)
data <- rbind(trainData, testData)
data <- aggregate(data[, -1:-2], by = list(data[, 1], data[, 2]), FUN = mean)
names(data)[1:2] = c("subject", "activity")
data[order(data$subject), ]
}
|
install.packages("dplyr")
install.packages("tidyverse")
library(dplyr)
library(tidyverse)
data = read.csv(file = '/Users/Christian/Documents/Bracketlytics/data/2018TeamStats_Final.csv', header = TRUE, sep = ",")
sos.data = read.csv(file = '/Users/Christian/Documents/Bracketlytics/data/sos.csv')
####Compiling game data to season daata####
# Remove uncessary columns
data$gameid <- NULL
data$MP <- NULL
data$FG. <- NULL
data$X2P <- NULL
data$X2PA <- NULL
data$X2P. <- NULL
data$X3PA <- NULL
data$X3P. <- NULL
data$FT. <- NULL
data$TRB <- NULL
data$AST <- NULL
data$STL <- NULL
data$BLK <- NULL
data$PF <- NULL
data$PTS <- NULL
data$Opp.FG. <- NULL
data$Opp.2P <- NULL
data$Opp.2PA <- NULL
data$Opp.2P. <- NULL
data$Opp.3PA <- NULL
data$Opp.3P. <- NULL
data$Opp.FT. <- NULL
data$Opp.TRB <- NULL
data$Opp.AST <- NULL
data$Opp.STL <- NULL
data$Opp.BLK <- NULL
data$Opp.PF <- NULL
data$Opp.PTS <- NULL
data$Win. <- NULL
# Convert columns to numeric data type
data$FG <- as.numeric(as.character(data$FG))
data$FGA <- as.numeric(as.character(data$FGA))
data$X3P <- as.numeric(as.character(data$X3P))
data$FT <- as.numeric(as.character(data$FT))
data$ORB <- as.numeric(as.character(data$ORB))
data$DRB <- as.numeric(as.character(data$DRB))
data$TOV <- as.numeric(as.character(data$TOV))
data$FTA <- as.numeric(as.character(data$FTA))
data$Opp.FG <- as.numeric(as.character(data$Opp.FG))
data$Opp.FGA <- as.numeric(as.character(data$Opp.FGA))
data$Opp.3P <- as.numeric(as.character(data$Opp.3P))
data$Opp.FT <- as.numeric(as.character(data$Opp.FT))
data$Opp.ORB <- as.numeric(as.character(data$Opp.ORB))
data$Opp.DRB <- as.numeric(as.character(data$Opp.DRB))
data$Opp.TOV <- as.numeric(as.character(data$Opp.TOV))
data$Opp.FTA <- as.numeric(as.character(data$Opp.FTA))
# Group individual game data into season data
season.data <- data %>%
group_by(Team) %>%
summarise(sum(FG, na.rm = TRUE), sum(FGA, na.rm = TRUE),
sum(X3P, na.rm = TRUE), sum(FT, na.rm = TRUE),
sum(FTA, na.rm = TRUE), sum(ORB, na.rm = TRUE),
sum(DRB, na.rm = TRUE), sum(TOV, na.rm = TRUE),
sum(Opp.FG, na.rm = TRUE), sum(Opp.FGA, na.rm = TRUE),
sum(Opp.3P, na.rm = TRUE), sum(Opp.FT, na.rm = TRUE),
sum(Opp.FTA, na.rm = TRUE), sum(Opp.ORB, na.rm = TRUE),
sum(Opp.DRB, na.rm = TRUE), sum(Opp.TOV, na.rm = TRUE))
# Rename column hearders
names(season.data)[names(season.data) == "sum(FG, na.rm = TRUE)"] <- "FG"
names(season.data)[names(season.data) == "sum(FGA, na.rm = TRUE)"] <- "FGA"
names(season.data)[names(season.data) == "sum(X3P, na.rm = TRUE)"] <- "X3P"
names(season.data)[names(season.data) == "sum(FT, na.rm = TRUE)"] <- "FT"
names(season.data)[names(season.data) == "sum(FTA, na.rm = TRUE)"] <- "FTA"
names(season.data)[names(season.data) == "sum(ORB, na.rm = TRUE)"] <- "ORB"
names(season.data)[names(season.data) == "sum(DRB, na.rm = TRUE)"] <- "DRB"
names(season.data)[names(season.data) == "sum(TOV, na.rm = TRUE)"] <- "TOV"
names(season.data)[names(season.data) == "sum(Opp.FG, na.rm = TRUE)"] <- "Opp.FG"
names(season.data)[names(season.data) == "sum(Opp.FGA, na.rm = TRUE)"] <- "Opp.FGA"
names(season.data)[names(season.data) == "sum(Opp.3P, na.rm = TRUE)"] <- "Opp.3P"
names(season.data)[names(season.data) == "sum(Opp.FT, na.rm = TRUE)"] <- "Opp.FT"
names(season.data)[names(season.data) == "sum(Opp.FTA, na.rm = TRUE)"] <- "Opp.FTA"
names(season.data)[names(season.data) == "sum(Opp.ORB, na.rm = TRUE)"] <- "Opp.ORB"
names(season.data)[names(season.data) == "sum(Opp.DRB, na.rm = TRUE)"] <- "Opp.DRB"
names(season.data)[names(season.data) == "sum(Opp.TOV, na.rm = TRUE)"] <- "Opp.TOV"
# Reindex
rownames(season.data) <- 1:nrow(season.data)
# Filter out some teams
# season.data <- filter(season.data)
# Save new data as csv file
write.csv(season.data, file = "/Users/Christian/Documents/Bracketlytics/data/season_data.csv")
####Changing Data type of strength of schedule data####
sos.data$SOS <- as.numeric(as.character(sos.data$SOS))
sos.data$Rank <- as.numeric(as.character(sos.data$Rank))
names(sos.data)[names(sos.data) == "School"] <- "Team"
write.csv(sos.data, file = '/Users/Christian/Documents/Bracketlytics/data/sos.csv')
####Joining data####
sos.data$Team <- tolower(sos.data$Team)
full_data = full_join(season.data, sos.data, by = "Team")
sos.data$SOS <- as.numeric(as.character(sos.data$SOS))
sos.data$Rank <- as.numeric(as.character(sos.data$Rank))
names(sos.data)[names(sos.data) == "School"] <- "Team"
sos.data$Team <- tolower(sos.data$Team)
full_data = right_join(season.data, sos.data, by = "Team")
full_data <- na.omit(full_data)
write.csv(full_data, file = '/Users/Christian/Documents/Bracketlytics/data/season_data.csv')
####Linear Regression Data conversion and joining####
Off_2017_2018 = read.csv(file <- '/Users/Christian/Documents/Bracketlytics/data/model_data/Off_2017-18.csv', header = TRUE, sep = ",")
Off_2016_2017 = read.csv(file <- '/Users/Christian/Documents/Bracketlytics/data/model_data/Off_2016-17.csv', header = TRUE, sep = ",")
Off_2015_2016 = read.csv(file <- '/Users/Christian/Documents/Bracketlytics/data/model_data/Off_2015-16.csv', header = TRUE, sep = ",")
Off_2014_2015 = read.csv(file <- '/Users/Christian/Documents/Bracketlytics/data/model_data/Off_2014-15.csv', header = TRUE, sep = ",")
Off_2013_2014 = read.csv(file <- '/Users/Christian/Documents/Bracketlytics/data/model_data/Off_2013-14.csv', header = TRUE, sep = ",")
Def_2017_2018 = read.csv(file <- '/Users/Christian/Documents/Bracketlytics/data/model_data/Def_2017-18.csv', header = TRUE, sep = ",")
Def_2016_2017 = read.csv(file <- '/Users/Christian/Documents/Bracketlytics/data/model_data/Def_2017-18.csv', header = TRUE, sep = ",")
Def_2015_2016 = read.csv(file <- '/Users/Christian/Documents/Bracketlytics/data/model_data/Def_2017-18.csv', header = TRUE, sep = ",")
Def_2014_2015 = read.csv(file <- '/Users/Christian/Documents/Bracketlytics/data/model_data/Def_2017-18.csv', header = TRUE, sep = ",")
Def_2013_2014 = read.csv(file <- '/Users/Christian/Documents/Bracketlytics/data/model_data/Def_2017-18.csv', header = TRUE, sep = ",")
# Join offensive and defensive files together
total_2017_2018 <- full_join(Off_2017_2018, Def_2017_2018, by = "School")
total_2016_2017 <- full_join(Off_2016_2017, Def_2016_2017, by = "School")
total_2015_2016 <- full_join(Off_2015_2016, Def_2015_2016, by = "School")
total_2014_2015 <- full_join(Off_2014_2015, Def_2014_2015, by = "School")
total_2013_2014 <- full_join(Off_2013_2014, Def_2013_2014, by = "School")
# Add year to school names
total_2017_2018 <- transform(total_2017_2018, School = sprintf('%s_2018', School))
total_2016_2017 <- transform(total_2016_2017, School = sprintf('%s_2017', School))
total_2015_2016 <- transform(total_2015_2016, School = sprintf('%s_2016', School))
total_2014_2015 <- transform(total_2014_2015, School = sprintf('%s_2015', School))
total_2013_2014 <- transform(total_2013_2014, School = sprintf('%s_2014', School))
# Add NCAA tournament wins for each team
wins_2018 <- read_csv("/Users/Christian/Documents/Bracketlytics/data/model_data/wins.csv")
total_2017_2018 <- full_join(total_2017_2018, wins_2018, by = "School")
# Append data sets to each other
full_data <- full_join(total_2017_2018, total_2016_2017, by = NULL) %>%
full_join(total_2015_2016, by = NULL) %>%
full_join(total_2015_2016, by = NULL) %>%
full_join(total_2014_2015, by = NULL) %>%
full_join(total_2013_2014, by = NULL)
total_2017_2018$School = gsub(" ID.*","",total_2017_2018$School)
####Formatting of 2017 game data####
data = read.csv(file = '/Users/Christian/Documents/Bracketlytics/data/2017TeamStats_Test.csv', header = TRUE, sep = ",")
# Remove uncessary columns
data$gameid <- NULL
data$MP <- NULL
data$FG. <- NULL
data$X2P <- NULL
data$X2PA <- NULL
data$X2P. <- NULL
data$X3PA <- NULL
data$X3P. <- NULL
data$FT. <- NULL
data$TRB <- NULL
data$AST <- NULL
data$STL <- NULL
data$BLK <- NULL
data$PF <- NULL
data$PTS <- NULL
data$Opp.FG. <- NULL
data$Opp.2P <- NULL
data$Opp.2PA <- NULL
data$Opp.2P. <- NULL
data$Opp.3PA <- NULL
data$Opp.3P. <- NULL
data$Opp.FT. <- NULL
data$Opp.TRB <- NULL
data$Opp.AST <- NULL
data$Opp.STL <- NULL
data$Opp.BLK <- NULL
data$Opp.PF <- NULL
data$Opp.PTS <- NULL
# Convert columns to numeric data type
data$FG <- as.numeric(as.character(data$FG))
data$FGA <- as.numeric(as.character(data$FGA))
data$X3P <- as.numeric(as.character(data$X3P))
data$FT <- as.numeric(as.character(data$FT))
data$ORB <- as.numeric(as.character(data$ORB))
data$DRB <- as.numeric(as.character(data$DRB))
data$TOV <- as.numeric(as.character(data$TOV))
data$FTA <- as.numeric(as.character(data$FTA))
data$Opp.FG <- as.numeric(as.character(data$Opp.FG))
data$Opp.FGA <- as.numeric(as.character(data$Opp.FGA))
data$Opp.3P <- as.numeric(as.character(data$Opp.3P))
data$Opp.FT <- as.numeric(as.character(data$Opp.FT))
data$Opp.ORB <- as.numeric(as.character(data$Opp.ORB))
data$Opp.DRB <- as.numeric(as.character(data$Opp.DRB))
data$Opp.TOV <- as.numeric(as.character(data$Opp.TOV))
data$Opp.FTA <- as.numeric(as.character(data$Opp.FTA))
# Create new four factors stats and round
data$eFG = (data$FG + 0.5 * data$X3P) / data$FGA
data$OeFG = (data$Opp.FG + 0.5 * data$Opp.3P) / data$Opp.FGA
data$eFG = round(data$eFG, digits = 3)
data$OeFG = round(data$OeFG, digits = 3)
data$TOVp = data$TOV / (data$FGA + 0.44 + data$FTA + data$TOV)
data$oTOVp = data$Opp.TOV / (data$Opp.FGA + 0.44 + data$Opp.FTA + data$Opp.TOV)
data$TOVp = round(data$TOVp, digits = 3)
data$oTOVp = round(data$oTOVp, digits = 3)
data$ORBp = data$ORB / (data$ORB + data$Opp.DRB)
data$DRBp = data$DRB / (data$DRB + data$Opp.DRB)
data$ORBp = round(data$ORBp, digits = 3)
data$DRBp = round(data$DRBp, digits = 3)
data$FTp = data$FT / data$FGA
data$oFTp = data$Opp.FT / data$Opp.FGA
data$FTp = round(data$FTp, digits = 3)
data$oFTp = round(data$FTp, digits = 3)
write.csv(data, file = "/Users/Christian/Documents/Bracketlytics/data/game_data_2017.csv")
####Changes to 2018 game data####
data = read.csv(file = '/Users/Christian/Documents/Bracketlytics/data/2018TeamStats_Final.csv', header = TRUE, sep = ",")
# Remove uncessary columns
data$gameid <- NULL
data$MP <- NULL
data$FG. <- NULL
data$X2P <- NULL
data$X2PA <- NULL
data$X2P. <- NULL
data$X3PA <- NULL
data$X3P. <- NULL
data$FT. <- NULL
data$TRB <- NULL
data$AST <- NULL
data$STL <- NULL
data$BLK <- NULL
data$PF <- NULL
data$PTS <- NULL
data$Opp.FG. <- NULL
data$Opp.2P <- NULL
data$Opp.2PA <- NULL
data$Opp.2P. <- NULL
data$Opp.3PA <- NULL
data$Opp.3P. <- NULL
data$Opp.FT. <- NULL
data$Opp.TRB <- NULL
data$Opp.AST <- NULL
data$Opp.STL <- NULL
data$Opp.BLK <- NULL
data$Opp.PF <- NULL
data$Opp.PTS <- NULL
# Convert columns to numeric data type
data$FG <- as.numeric(as.character(data$FG))
data$FGA <- as.numeric(as.character(data$FGA))
data$X3P <- as.numeric(as.character(data$X3P))
data$FT <- as.numeric(as.character(data$FT))
data$ORB <- as.numeric(as.character(data$ORB))
data$DRB <- as.numeric(as.character(data$DRB))
data$TOV <- as.numeric(as.character(data$TOV))
data$FTA <- as.numeric(as.character(data$FTA))
data$Opp.FG <- as.numeric(as.character(data$Opp.FG))
data$Opp.FGA <- as.numeric(as.character(data$Opp.FGA))
data$Opp.3P <- as.numeric(as.character(data$Opp.3P))
data$Opp.FT <- as.numeric(as.character(data$Opp.FT))
data$Opp.ORB <- as.numeric(as.character(data$Opp.ORB))
data$Opp.DRB <- as.numeric(as.character(data$Opp.DRB))
data$Opp.TOV <- as.numeric(as.character(data$Opp.TOV))
data$Opp.FTA <- as.numeric(as.character(data$Opp.FTA))
# Create new four factors stats and round
data$eFG = (data$FG + 0.5 * data$X3P) / data$FGA
data$OeFG = (data$Opp.FG + 0.5 * data$Opp.3P) / data$Opp.FGA
data$eFG = round(data$eFG, digits = 3)
data$OeFG = round(data$OeFG, digits = 3)
data$TOVp = data$TOV / (data$FGA + 0.44 + data$FTA + data$TOV)
data$oTOVp = data$Opp.TOV / (data$Opp.FGA + 0.44 + data$Opp.FTA + data$Opp.TOV)
data$TOVp = round(data$TOVp, digits = 3)
data$oTOVp = round(data$oTOVp, digits = 3)
data$ORBp = data$ORB / (data$ORB + data$Opp.DRB)
data$DRBp = data$DRB / (data$DRB + data$Opp.DRB)
data$ORBp = round(data$ORBp, digits = 3)
data$DRBp = round(data$DRBp, digits = 3)
data$FTp = data$FT / data$FGA
data$oFTp = data$Opp.FT / data$Opp.FGA
data$FTp = round(data$FTp, digits = 3)
data$oFTp = round(data$FTp, digits = 3)
write.csv(data, file = "/Users/Christian/Documents/Bracketlytics/data/game_data.csv")
| /archive/data_cleaning.R | no_license | geerc/bracketlytics | R | false | false | 12,440 | r | install.packages("dplyr")
install.packages("tidyverse")
library(dplyr)
library(tidyverse)
data = read.csv(file = '/Users/Christian/Documents/Bracketlytics/data/2018TeamStats_Final.csv', header = TRUE, sep = ",")
sos.data = read.csv(file = '/Users/Christian/Documents/Bracketlytics/data/sos.csv')
####Compiling game data to season daata####
# Remove uncessary columns
data$gameid <- NULL
data$MP <- NULL
data$FG. <- NULL
data$X2P <- NULL
data$X2PA <- NULL
data$X2P. <- NULL
data$X3PA <- NULL
data$X3P. <- NULL
data$FT. <- NULL
data$TRB <- NULL
data$AST <- NULL
data$STL <- NULL
data$BLK <- NULL
data$PF <- NULL
data$PTS <- NULL
data$Opp.FG. <- NULL
data$Opp.2P <- NULL
data$Opp.2PA <- NULL
data$Opp.2P. <- NULL
data$Opp.3PA <- NULL
data$Opp.3P. <- NULL
data$Opp.FT. <- NULL
data$Opp.TRB <- NULL
data$Opp.AST <- NULL
data$Opp.STL <- NULL
data$Opp.BLK <- NULL
data$Opp.PF <- NULL
data$Opp.PTS <- NULL
data$Win. <- NULL
# Convert columns to numeric data type
data$FG <- as.numeric(as.character(data$FG))
data$FGA <- as.numeric(as.character(data$FGA))
data$X3P <- as.numeric(as.character(data$X3P))
data$FT <- as.numeric(as.character(data$FT))
data$ORB <- as.numeric(as.character(data$ORB))
data$DRB <- as.numeric(as.character(data$DRB))
data$TOV <- as.numeric(as.character(data$TOV))
data$FTA <- as.numeric(as.character(data$FTA))
data$Opp.FG <- as.numeric(as.character(data$Opp.FG))
data$Opp.FGA <- as.numeric(as.character(data$Opp.FGA))
data$Opp.3P <- as.numeric(as.character(data$Opp.3P))
data$Opp.FT <- as.numeric(as.character(data$Opp.FT))
data$Opp.ORB <- as.numeric(as.character(data$Opp.ORB))
data$Opp.DRB <- as.numeric(as.character(data$Opp.DRB))
data$Opp.TOV <- as.numeric(as.character(data$Opp.TOV))
data$Opp.FTA <- as.numeric(as.character(data$Opp.FTA))
# Group individual game data into season data
season.data <- data %>%
group_by(Team) %>%
summarise(sum(FG, na.rm = TRUE), sum(FGA, na.rm = TRUE),
sum(X3P, na.rm = TRUE), sum(FT, na.rm = TRUE),
sum(FTA, na.rm = TRUE), sum(ORB, na.rm = TRUE),
sum(DRB, na.rm = TRUE), sum(TOV, na.rm = TRUE),
sum(Opp.FG, na.rm = TRUE), sum(Opp.FGA, na.rm = TRUE),
sum(Opp.3P, na.rm = TRUE), sum(Opp.FT, na.rm = TRUE),
sum(Opp.FTA, na.rm = TRUE), sum(Opp.ORB, na.rm = TRUE),
sum(Opp.DRB, na.rm = TRUE), sum(Opp.TOV, na.rm = TRUE))
# Rename column hearders
names(season.data)[names(season.data) == "sum(FG, na.rm = TRUE)"] <- "FG"
names(season.data)[names(season.data) == "sum(FGA, na.rm = TRUE)"] <- "FGA"
names(season.data)[names(season.data) == "sum(X3P, na.rm = TRUE)"] <- "X3P"
names(season.data)[names(season.data) == "sum(FT, na.rm = TRUE)"] <- "FT"
names(season.data)[names(season.data) == "sum(FTA, na.rm = TRUE)"] <- "FTA"
names(season.data)[names(season.data) == "sum(ORB, na.rm = TRUE)"] <- "ORB"
names(season.data)[names(season.data) == "sum(DRB, na.rm = TRUE)"] <- "DRB"
names(season.data)[names(season.data) == "sum(TOV, na.rm = TRUE)"] <- "TOV"
names(season.data)[names(season.data) == "sum(Opp.FG, na.rm = TRUE)"] <- "Opp.FG"
names(season.data)[names(season.data) == "sum(Opp.FGA, na.rm = TRUE)"] <- "Opp.FGA"
names(season.data)[names(season.data) == "sum(Opp.3P, na.rm = TRUE)"] <- "Opp.3P"
names(season.data)[names(season.data) == "sum(Opp.FT, na.rm = TRUE)"] <- "Opp.FT"
names(season.data)[names(season.data) == "sum(Opp.FTA, na.rm = TRUE)"] <- "Opp.FTA"
names(season.data)[names(season.data) == "sum(Opp.ORB, na.rm = TRUE)"] <- "Opp.ORB"
names(season.data)[names(season.data) == "sum(Opp.DRB, na.rm = TRUE)"] <- "Opp.DRB"
names(season.data)[names(season.data) == "sum(Opp.TOV, na.rm = TRUE)"] <- "Opp.TOV"
# Reindex
rownames(season.data) <- 1:nrow(season.data)
# Filter out some teams
# season.data <- filter(season.data)
# Save new data as csv file
write.csv(season.data, file = "/Users/Christian/Documents/Bracketlytics/data/season_data.csv")
####Changing Data type of strength of schedule data####
sos.data$SOS <- as.numeric(as.character(sos.data$SOS))
sos.data$Rank <- as.numeric(as.character(sos.data$Rank))
names(sos.data)[names(sos.data) == "School"] <- "Team"
write.csv(sos.data, file = '/Users/Christian/Documents/Bracketlytics/data/sos.csv')
####Joining data####
sos.data$Team <- tolower(sos.data$Team)
full_data = full_join(season.data, sos.data, by = "Team")
sos.data$SOS <- as.numeric(as.character(sos.data$SOS))
sos.data$Rank <- as.numeric(as.character(sos.data$Rank))
names(sos.data)[names(sos.data) == "School"] <- "Team"
sos.data$Team <- tolower(sos.data$Team)
full_data = right_join(season.data, sos.data, by = "Team")
full_data <- na.omit(full_data)
write.csv(full_data, file = '/Users/Christian/Documents/Bracketlytics/data/season_data.csv')
####Linear Regression Data conversion and joining####
Off_2017_2018 = read.csv(file <- '/Users/Christian/Documents/Bracketlytics/data/model_data/Off_2017-18.csv', header = TRUE, sep = ",")
Off_2016_2017 = read.csv(file <- '/Users/Christian/Documents/Bracketlytics/data/model_data/Off_2016-17.csv', header = TRUE, sep = ",")
Off_2015_2016 = read.csv(file <- '/Users/Christian/Documents/Bracketlytics/data/model_data/Off_2015-16.csv', header = TRUE, sep = ",")
Off_2014_2015 = read.csv(file <- '/Users/Christian/Documents/Bracketlytics/data/model_data/Off_2014-15.csv', header = TRUE, sep = ",")
Off_2013_2014 = read.csv(file <- '/Users/Christian/Documents/Bracketlytics/data/model_data/Off_2013-14.csv', header = TRUE, sep = ",")
Def_2017_2018 = read.csv(file <- '/Users/Christian/Documents/Bracketlytics/data/model_data/Def_2017-18.csv', header = TRUE, sep = ",")
Def_2016_2017 = read.csv(file <- '/Users/Christian/Documents/Bracketlytics/data/model_data/Def_2017-18.csv', header = TRUE, sep = ",")
Def_2015_2016 = read.csv(file <- '/Users/Christian/Documents/Bracketlytics/data/model_data/Def_2017-18.csv', header = TRUE, sep = ",")
Def_2014_2015 = read.csv(file <- '/Users/Christian/Documents/Bracketlytics/data/model_data/Def_2017-18.csv', header = TRUE, sep = ",")
Def_2013_2014 = read.csv(file <- '/Users/Christian/Documents/Bracketlytics/data/model_data/Def_2017-18.csv', header = TRUE, sep = ",")
# Join offensive and defensive files together
total_2017_2018 <- full_join(Off_2017_2018, Def_2017_2018, by = "School")
total_2016_2017 <- full_join(Off_2016_2017, Def_2016_2017, by = "School")
total_2015_2016 <- full_join(Off_2015_2016, Def_2015_2016, by = "School")
total_2014_2015 <- full_join(Off_2014_2015, Def_2014_2015, by = "School")
total_2013_2014 <- full_join(Off_2013_2014, Def_2013_2014, by = "School")
# Add year to school names
total_2017_2018 <- transform(total_2017_2018, School = sprintf('%s_2018', School))
total_2016_2017 <- transform(total_2016_2017, School = sprintf('%s_2017', School))
total_2015_2016 <- transform(total_2015_2016, School = sprintf('%s_2016', School))
total_2014_2015 <- transform(total_2014_2015, School = sprintf('%s_2015', School))
total_2013_2014 <- transform(total_2013_2014, School = sprintf('%s_2014', School))
# Add NCAA tournament wins for each team
wins_2018 <- read_csv("/Users/Christian/Documents/Bracketlytics/data/model_data/wins.csv")
total_2017_2018 <- full_join(total_2017_2018, wins_2018, by = "School")
# Append data sets to each other
full_data <- full_join(total_2017_2018, total_2016_2017, by = NULL) %>%
full_join(total_2015_2016, by = NULL) %>%
full_join(total_2015_2016, by = NULL) %>%
full_join(total_2014_2015, by = NULL) %>%
full_join(total_2013_2014, by = NULL)
total_2017_2018$School = gsub(" ID.*","",total_2017_2018$School)
####Formatting of 2017 game data####
data = read.csv(file = '/Users/Christian/Documents/Bracketlytics/data/2017TeamStats_Test.csv', header = TRUE, sep = ",")
# Remove uncessary columns
data$gameid <- NULL
data$MP <- NULL
data$FG. <- NULL
data$X2P <- NULL
data$X2PA <- NULL
data$X2P. <- NULL
data$X3PA <- NULL
data$X3P. <- NULL
data$FT. <- NULL
data$TRB <- NULL
data$AST <- NULL
data$STL <- NULL
data$BLK <- NULL
data$PF <- NULL
data$PTS <- NULL
data$Opp.FG. <- NULL
data$Opp.2P <- NULL
data$Opp.2PA <- NULL
data$Opp.2P. <- NULL
data$Opp.3PA <- NULL
data$Opp.3P. <- NULL
data$Opp.FT. <- NULL
data$Opp.TRB <- NULL
data$Opp.AST <- NULL
data$Opp.STL <- NULL
data$Opp.BLK <- NULL
data$Opp.PF <- NULL
data$Opp.PTS <- NULL
# Convert columns to numeric data type
data$FG <- as.numeric(as.character(data$FG))
data$FGA <- as.numeric(as.character(data$FGA))
data$X3P <- as.numeric(as.character(data$X3P))
data$FT <- as.numeric(as.character(data$FT))
data$ORB <- as.numeric(as.character(data$ORB))
data$DRB <- as.numeric(as.character(data$DRB))
data$TOV <- as.numeric(as.character(data$TOV))
data$FTA <- as.numeric(as.character(data$FTA))
data$Opp.FG <- as.numeric(as.character(data$Opp.FG))
data$Opp.FGA <- as.numeric(as.character(data$Opp.FGA))
data$Opp.3P <- as.numeric(as.character(data$Opp.3P))
data$Opp.FT <- as.numeric(as.character(data$Opp.FT))
data$Opp.ORB <- as.numeric(as.character(data$Opp.ORB))
data$Opp.DRB <- as.numeric(as.character(data$Opp.DRB))
data$Opp.TOV <- as.numeric(as.character(data$Opp.TOV))
data$Opp.FTA <- as.numeric(as.character(data$Opp.FTA))
# Create new four factors stats and round
data$eFG = (data$FG + 0.5 * data$X3P) / data$FGA
data$OeFG = (data$Opp.FG + 0.5 * data$Opp.3P) / data$Opp.FGA
data$eFG = round(data$eFG, digits = 3)
data$OeFG = round(data$OeFG, digits = 3)
data$TOVp = data$TOV / (data$FGA + 0.44 + data$FTA + data$TOV)
data$oTOVp = data$Opp.TOV / (data$Opp.FGA + 0.44 + data$Opp.FTA + data$Opp.TOV)
data$TOVp = round(data$TOVp, digits = 3)
data$oTOVp = round(data$oTOVp, digits = 3)
data$ORBp = data$ORB / (data$ORB + data$Opp.DRB)
data$DRBp = data$DRB / (data$DRB + data$Opp.DRB)
data$ORBp = round(data$ORBp, digits = 3)
data$DRBp = round(data$DRBp, digits = 3)
data$FTp = data$FT / data$FGA
data$oFTp = data$Opp.FT / data$Opp.FGA
data$FTp = round(data$FTp, digits = 3)
data$oFTp = round(data$FTp, digits = 3)
write.csv(data, file = "/Users/Christian/Documents/Bracketlytics/data/game_data_2017.csv")
####Changes to 2018 game data####
data = read.csv(file = '/Users/Christian/Documents/Bracketlytics/data/2018TeamStats_Final.csv', header = TRUE, sep = ",")
# Remove uncessary columns
data$gameid <- NULL
data$MP <- NULL
data$FG. <- NULL
data$X2P <- NULL
data$X2PA <- NULL
data$X2P. <- NULL
data$X3PA <- NULL
data$X3P. <- NULL
data$FT. <- NULL
data$TRB <- NULL
data$AST <- NULL
data$STL <- NULL
data$BLK <- NULL
data$PF <- NULL
data$PTS <- NULL
data$Opp.FG. <- NULL
data$Opp.2P <- NULL
data$Opp.2PA <- NULL
data$Opp.2P. <- NULL
data$Opp.3PA <- NULL
data$Opp.3P. <- NULL
data$Opp.FT. <- NULL
data$Opp.TRB <- NULL
data$Opp.AST <- NULL
data$Opp.STL <- NULL
data$Opp.BLK <- NULL
data$Opp.PF <- NULL
data$Opp.PTS <- NULL
# Convert columns to numeric data type
data$FG <- as.numeric(as.character(data$FG))
data$FGA <- as.numeric(as.character(data$FGA))
data$X3P <- as.numeric(as.character(data$X3P))
data$FT <- as.numeric(as.character(data$FT))
data$ORB <- as.numeric(as.character(data$ORB))
data$DRB <- as.numeric(as.character(data$DRB))
data$TOV <- as.numeric(as.character(data$TOV))
data$FTA <- as.numeric(as.character(data$FTA))
data$Opp.FG <- as.numeric(as.character(data$Opp.FG))
data$Opp.FGA <- as.numeric(as.character(data$Opp.FGA))
data$Opp.3P <- as.numeric(as.character(data$Opp.3P))
data$Opp.FT <- as.numeric(as.character(data$Opp.FT))
data$Opp.ORB <- as.numeric(as.character(data$Opp.ORB))
data$Opp.DRB <- as.numeric(as.character(data$Opp.DRB))
data$Opp.TOV <- as.numeric(as.character(data$Opp.TOV))
data$Opp.FTA <- as.numeric(as.character(data$Opp.FTA))
# Create new four factors stats and round
data$eFG = (data$FG + 0.5 * data$X3P) / data$FGA
data$OeFG = (data$Opp.FG + 0.5 * data$Opp.3P) / data$Opp.FGA
data$eFG = round(data$eFG, digits = 3)
data$OeFG = round(data$OeFG, digits = 3)
data$TOVp = data$TOV / (data$FGA + 0.44 + data$FTA + data$TOV)
data$oTOVp = data$Opp.TOV / (data$Opp.FGA + 0.44 + data$Opp.FTA + data$Opp.TOV)
data$TOVp = round(data$TOVp, digits = 3)
data$oTOVp = round(data$oTOVp, digits = 3)
data$ORBp = data$ORB / (data$ORB + data$Opp.DRB)
data$DRBp = data$DRB / (data$DRB + data$Opp.DRB)
data$ORBp = round(data$ORBp, digits = 3)
data$DRBp = round(data$DRBp, digits = 3)
data$FTp = data$FT / data$FGA
data$oFTp = data$Opp.FT / data$Opp.FGA
data$FTp = round(data$FTp, digits = 3)
data$oFTp = round(data$FTp, digits = 3)
write.csv(data, file = "/Users/Christian/Documents/Bracketlytics/data/game_data.csv")
|
test_that("in_sample works with include.differenciation=TRUE", {
benchmark <- annualBenchmark(hfserie = turnover,
lfserie = construction,
include.differenciation = TRUE)
simul <- lag(aggregate(construction),-1)*(100+in_sample(benchmark,type="changes")[,1])/100
obtained <- construction
obtained <- window(obtained,start=tsp(obtained)[1]+1)
expect_equal(simul,obtained)
simul <- lag(aggregate(construction),-1)*(na.omit(in_sample(benchmark,type="changes")[,2]))/100
obtained <- fitted(prais(benchmark))
obtained <- window(obtained,start=tsp(obtained)[1]+1)
expect_equal(simul,obtained)
simul <- na.omit(in_sample(benchmark,type="levels")[,1])
obtained <- construction
expect_equal(simul,obtained)
simul <- na.omit(in_sample(benchmark,type="levels")[,2])
attr(simul, "na.action") <- NULL
obtained <- fitted(prais(benchmark))+lag(construction,-1)
obtained <- window(obtained,start=tsp(obtained)[1]+1)
expect_equal(simul,obtained)
})
test_that("in_sample works with include.differenciation=FALSE", {
benchmark <- annualBenchmark(hfserie = turnover,
lfserie = construction,
include.differenciation = FALSE)
simul <- lag(aggregate(construction),-1)*(100+in_sample(benchmark,type="changes")[,1])/100
obtained <- construction
obtained <- window(obtained,start=tsp(obtained)[1]+1)
expect_equal(simul,obtained)
simul <- lag(aggregate(construction),-1)*(na.omit(in_sample(benchmark,type="changes")[,2]))/100
obtained <- fitted(prais(benchmark))-lag(aggregate(construction),-1)
expect_equal(simul,obtained)
simul <- na.omit(in_sample(benchmark,type="levels")[,1])
obtained <- construction
expect_equal(simul,obtained)
simul <- na.omit(in_sample(benchmark,type="levels")[,2])
attr(simul, "na.action") <- NULL
obtained <- fitted(prais(benchmark))
obtained <- window(obtained,start=tsp(obtained)[1]+1)
expect_equal(simul,obtained)
})
test_that("print in_sample prints",{
benchmark <- annualBenchmark(hfserie = turnover,
lfserie = construction,
include.differenciation = TRUE)
expect_known_output(print(in_sample(benchmark)),"outputs/in_sample.txt",update=FALSE)
}) | /fuzzedpackages/disaggR/tests/testthat/test-in_sample.R | no_license | akhikolla/testpackages | R | false | false | 2,385 | r | test_that("in_sample works with include.differenciation=TRUE", {
benchmark <- annualBenchmark(hfserie = turnover,
lfserie = construction,
include.differenciation = TRUE)
simul <- lag(aggregate(construction),-1)*(100+in_sample(benchmark,type="changes")[,1])/100
obtained <- construction
obtained <- window(obtained,start=tsp(obtained)[1]+1)
expect_equal(simul,obtained)
simul <- lag(aggregate(construction),-1)*(na.omit(in_sample(benchmark,type="changes")[,2]))/100
obtained <- fitted(prais(benchmark))
obtained <- window(obtained,start=tsp(obtained)[1]+1)
expect_equal(simul,obtained)
simul <- na.omit(in_sample(benchmark,type="levels")[,1])
obtained <- construction
expect_equal(simul,obtained)
simul <- na.omit(in_sample(benchmark,type="levels")[,2])
attr(simul, "na.action") <- NULL
obtained <- fitted(prais(benchmark))+lag(construction,-1)
obtained <- window(obtained,start=tsp(obtained)[1]+1)
expect_equal(simul,obtained)
})
test_that("in_sample works with include.differenciation=FALSE", {
benchmark <- annualBenchmark(hfserie = turnover,
lfserie = construction,
include.differenciation = FALSE)
simul <- lag(aggregate(construction),-1)*(100+in_sample(benchmark,type="changes")[,1])/100
obtained <- construction
obtained <- window(obtained,start=tsp(obtained)[1]+1)
expect_equal(simul,obtained)
simul <- lag(aggregate(construction),-1)*(na.omit(in_sample(benchmark,type="changes")[,2]))/100
obtained <- fitted(prais(benchmark))-lag(aggregate(construction),-1)
expect_equal(simul,obtained)
simul <- na.omit(in_sample(benchmark,type="levels")[,1])
obtained <- construction
expect_equal(simul,obtained)
simul <- na.omit(in_sample(benchmark,type="levels")[,2])
attr(simul, "na.action") <- NULL
obtained <- fitted(prais(benchmark))
obtained <- window(obtained,start=tsp(obtained)[1]+1)
expect_equal(simul,obtained)
})
test_that("print in_sample prints",{
benchmark <- annualBenchmark(hfserie = turnover,
lfserie = construction,
include.differenciation = TRUE)
expect_known_output(print(in_sample(benchmark)),"outputs/in_sample.txt",update=FALSE)
}) |
library(e1071)
library(glmnet)
fb_post <- read_csv("../../../machine_learning/fb_post_train.csv")
#F1+F2+F3+F5
df <- fb_post %>% select(comment, page_category, title_nu,
pos_word, neg_word,
核四:億元,
topic1:topic7,
stance) %>% na.omit()
#F1+F2+F3+F4+F5
df <- fb_post %>% select(comment, page_category, title_nu,
pos_word, neg_word,
核四:億元,
topic1:topic7,
pos_att:multi_score,
stance) %>% na.omit()
df <- df %>% transform(
page_category = factor(page_category),
title_nu = factor(title_nu),
stance = factor(stance)
)
df[,6:70] <- lapply(df[,6:70], factor)
set.seed(1001)
n <- nrow(df)
t_idx <- sample(seq_len(n), size = round(0.7 * n))
traindata <- df[t_idx,]
testdata <- df[ - t_idx,]
tune.model = tune(svm,
stance~.,
data=traindata,
kernel="radial", # RBF kernel function
range=list(cost=10^(-1:2), gamma=c(.5,1,2))# 調參數的最主要一行
)
tune.model$best.model
model <- svm(stance~. , traindata, cost = 10, gamma = 0.1, cross = 10)
train.pred = predict(model, traindata)
test.pred = predict(model, testdata)
cm <- table(traindata$stance, train.pred)
cm <- table(testdata$stance, test.pred)
cm
(pre_pos <- cm[3,3] / sum(cm[,3]))
(rec_pos <- cm[3,3] / sum(cm[3,]))
(f1_pos <- 2*pre_pos*rec_pos/(pre_pos+rec_pos))
(pre_neg <- cm[1,1] / sum(cm[,1]))
(rec_neg <- cm[1,1] / sum(cm[1,]))
(f1_neg <- 2*pre_neg*rec_neg/(pre_neg+rec_neg))
(pre_neu <- cm[2,2] / sum(cm[,2]))
(rec_neu <- cm[2,2] / sum(cm[2,]))
(f1_neu <- 2*pre_neu*rec_neu/(pre_neu+rec_neu))
(f1_pos+f1_neg+f1_neu)/3
#F1+F2+F3+F5
df <- fb_post %>% select(comment, page_category, title_nu,
pos_word, neg_word,
核四:億元,
topic1:topic7,
stance) %>% na.omit() %>%
mutate(
page_category.pos = ifelse(page_category=="核電類-正", 1, 0),
page_category.neg = ifelse(page_category=="核電類-負", 1, 0),
page_category.news = ifelse(page_category=="新聞類", 1, 0)
) %>% select(-page_category)
#F1+F2+F3+F4+F5
df <- fb_post %>% select(comment, page_category, title_nu,
pos_word, neg_word,
核四:億元,
topic1:topic7,
pos_att:multi_score,
stance) %>% na.omit() %>%
mutate(
page_category.pos = ifelse(page_category=="核電類-正", 1, 0),
page_category.neg = ifelse(page_category=="核電類-負", 1, 0),
page_category.news = ifelse(page_category=="新聞類", 1, 0)
) %>% select(-page_category)
df[] <- lapply(df, as.numeric)
set.seed(1001)
n <- nrow(df)
t_idx <- sample(seq_len(n), size = round(0.7 * n))
traindata <- df[t_idx,]
testdata <- df[ - t_idx,]
ridge = glmnet(x = as.matrix(traindata %>% select(-stance)),
y = as.matrix(traindata %>% select(stance)),
alpha = 0,
family = "multinomial")
cv.ridge = cv.glmnet(x = as.matrix(traindata %>% select(-stance)),
y = as.matrix(traindata %>% select(stance)),
alpha = 0, # ridge
family = "multinomial")
best.ridge.lambda = cv.ridge$lambda.min
ridge.test = predict(ridge,
s = best.ridge.lambda,
newx = as.matrix(testdata %>% select(-stance)),
type = "class")
# table(testdata$stance, ridge.test)
cm <- table(testdata$stance, ridge.test)
cm
(pre_pos <- cm[3,3] / sum(cm[,3]))
(rec_pos <- cm[3,3] / sum(cm[3,]))
(f1_pos <- 2*pre_pos*rec_pos/(pre_pos+rec_pos))
(pre_neg <- cm[1,1] / sum(cm[,1]))
(rec_neg <- cm[1,1] / sum(cm[1,]))
(f1_neg <- 2*pre_neg*rec_neg/(pre_neg+rec_neg))
(pre_neu <- cm[2,2] / sum(cm[,2]))
(rec_neu <- cm[2,2] / sum(cm[2,]))
(f1_neu <- 2*pre_neu*rec_neu/(pre_neu+rec_neu))
(f1_pos+f1_neg+f1_neu)/3
| /code/opinion_mining/main/trainmodel_fbPost.R | no_license | ppjs/public-opinion-mining | R | false | false | 4,105 | r | library(e1071)
library(glmnet)
fb_post <- read_csv("../../../machine_learning/fb_post_train.csv")
#F1+F2+F3+F5
df <- fb_post %>% select(comment, page_category, title_nu,
pos_word, neg_word,
核四:億元,
topic1:topic7,
stance) %>% na.omit()
#F1+F2+F3+F4+F5
df <- fb_post %>% select(comment, page_category, title_nu,
pos_word, neg_word,
核四:億元,
topic1:topic7,
pos_att:multi_score,
stance) %>% na.omit()
df <- df %>% transform(
page_category = factor(page_category),
title_nu = factor(title_nu),
stance = factor(stance)
)
df[,6:70] <- lapply(df[,6:70], factor)
set.seed(1001)
n <- nrow(df)
t_idx <- sample(seq_len(n), size = round(0.7 * n))
traindata <- df[t_idx,]
testdata <- df[ - t_idx,]
tune.model = tune(svm,
stance~.,
data=traindata,
kernel="radial", # RBF kernel function
range=list(cost=10^(-1:2), gamma=c(.5,1,2))# 調參數的最主要一行
)
tune.model$best.model
model <- svm(stance~. , traindata, cost = 10, gamma = 0.1, cross = 10)
train.pred = predict(model, traindata)
test.pred = predict(model, testdata)
cm <- table(traindata$stance, train.pred)
cm <- table(testdata$stance, test.pred)
cm
(pre_pos <- cm[3,3] / sum(cm[,3]))
(rec_pos <- cm[3,3] / sum(cm[3,]))
(f1_pos <- 2*pre_pos*rec_pos/(pre_pos+rec_pos))
(pre_neg <- cm[1,1] / sum(cm[,1]))
(rec_neg <- cm[1,1] / sum(cm[1,]))
(f1_neg <- 2*pre_neg*rec_neg/(pre_neg+rec_neg))
(pre_neu <- cm[2,2] / sum(cm[,2]))
(rec_neu <- cm[2,2] / sum(cm[2,]))
(f1_neu <- 2*pre_neu*rec_neu/(pre_neu+rec_neu))
(f1_pos+f1_neg+f1_neu)/3
#F1+F2+F3+F5
df <- fb_post %>% select(comment, page_category, title_nu,
pos_word, neg_word,
核四:億元,
topic1:topic7,
stance) %>% na.omit() %>%
mutate(
page_category.pos = ifelse(page_category=="核電類-正", 1, 0),
page_category.neg = ifelse(page_category=="核電類-負", 1, 0),
page_category.news = ifelse(page_category=="新聞類", 1, 0)
) %>% select(-page_category)
#F1+F2+F3+F4+F5
df <- fb_post %>% select(comment, page_category, title_nu,
pos_word, neg_word,
核四:億元,
topic1:topic7,
pos_att:multi_score,
stance) %>% na.omit() %>%
mutate(
page_category.pos = ifelse(page_category=="核電類-正", 1, 0),
page_category.neg = ifelse(page_category=="核電類-負", 1, 0),
page_category.news = ifelse(page_category=="新聞類", 1, 0)
) %>% select(-page_category)
df[] <- lapply(df, as.numeric)
set.seed(1001)
n <- nrow(df)
t_idx <- sample(seq_len(n), size = round(0.7 * n))
traindata <- df[t_idx,]
testdata <- df[ - t_idx,]
ridge = glmnet(x = as.matrix(traindata %>% select(-stance)),
y = as.matrix(traindata %>% select(stance)),
alpha = 0,
family = "multinomial")
cv.ridge = cv.glmnet(x = as.matrix(traindata %>% select(-stance)),
y = as.matrix(traindata %>% select(stance)),
alpha = 0, # ridge
family = "multinomial")
best.ridge.lambda = cv.ridge$lambda.min
ridge.test = predict(ridge,
s = best.ridge.lambda,
newx = as.matrix(testdata %>% select(-stance)),
type = "class")
# table(testdata$stance, ridge.test)
cm <- table(testdata$stance, ridge.test)
cm
(pre_pos <- cm[3,3] / sum(cm[,3]))
(rec_pos <- cm[3,3] / sum(cm[3,]))
(f1_pos <- 2*pre_pos*rec_pos/(pre_pos+rec_pos))
(pre_neg <- cm[1,1] / sum(cm[,1]))
(rec_neg <- cm[1,1] / sum(cm[1,]))
(f1_neg <- 2*pre_neg*rec_neg/(pre_neg+rec_neg))
(pre_neu <- cm[2,2] / sum(cm[,2]))
(rec_neu <- cm[2,2] / sum(cm[2,]))
(f1_neu <- 2*pre_neu*rec_neu/(pre_neu+rec_neu))
(f1_pos+f1_neg+f1_neu)/3
|
wttest <- function(x,y) {
l1 <- y[which(x==x[1])]
l2 <- y[which(x!=x[1])]
if(length(unique(x))==1 | length(unique(y))==1){
return(1)
# solve level problem
}else if(length(l1)==1 | length(l2)==1){
return(1)
# solve not enough observers problem
}else if(length(unique(l1))==1 & length(unique(l2)==1)){
return(0)
# solve constant problem
}else{
result <- t.test(x~y)
return(result$p.value)
}
}
| /R/Wttest.R | no_license | chenhong-dkfz/wombat | R | false | false | 441 | r | wttest <- function(x,y) {
l1 <- y[which(x==x[1])]
l2 <- y[which(x!=x[1])]
if(length(unique(x))==1 | length(unique(y))==1){
return(1)
# solve level problem
}else if(length(l1)==1 | length(l2)==1){
return(1)
# solve not enough observers problem
}else if(length(unique(l1))==1 & length(unique(l2)==1)){
return(0)
# solve constant problem
}else{
result <- t.test(x~y)
return(result$p.value)
}
}
|
\name{radSV}
\alias{radSV}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Radiation pattern for SV waves}
\description{
calculate the radiation patterns for SV waves
}
\usage{
radSV(del, phiS, lam, ichi, phi)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{del}{degrees, angle }
\item{phiS}{degrees,angle }
\item{lam}{degrees, angle }
\item{ichi}{degrees, take off angle}
\item{phi}{degrees, take off azimuth}
}
\details{
Given a focal mechanism strike-dip-rake
and a given incident angle (take-off angle)
and azimuth, return the SV amplitude
}
\value{
Amplitude of the SV wave
}
\references{K.~Aki and P.~G. Richards.\emph{Quantitative seismology}. University Science Books, Sausalito, Calif., 2nd edition, 2002.}
\author{Jonathan M. Lees <jonathan.lees@unc.edu>}
\seealso{radP, radSH, imageSV}
\examples{
phiS=65
del=25
lam=13
x = seq(-1, 1, 0.01)
y = x
X = matrix(rep(x, length(y)), nrow= length(x))
Y = t(X)
RAD2DEG = 180/pi
p = RAD2DEG*(pi/2 -atan2(Y, X))
p[p<0] = p[p<0] + 360
R = sqrt(X^2+Y^2)
R[R>1] = NaN
dip =RAD2DEG*2*asin(R/sqrt(2))
### Calculate the radiation pattern
G = radSV(del, phiS, lam, dip, p)
### plot values
image(x,y,G, asp=1)
}
\keyword{misc}
| /man/radSV.Rd | no_license | cran/RFOC | R | false | false | 1,244 | rd | \name{radSV}
\alias{radSV}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Radiation pattern for SV waves}
\description{
calculate the radiation patterns for SV waves
}
\usage{
radSV(del, phiS, lam, ichi, phi)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{del}{degrees, angle }
\item{phiS}{degrees,angle }
\item{lam}{degrees, angle }
\item{ichi}{degrees, take off angle}
\item{phi}{degrees, take off azimuth}
}
\details{
Given a focal mechanism strike-dip-rake
and a given incident angle (take-off angle)
and azimuth, return the SV amplitude
}
\value{
Amplitude of the SV wave
}
\references{K.~Aki and P.~G. Richards.\emph{Quantitative seismology}. University Science Books, Sausalito, Calif., 2nd edition, 2002.}
\author{Jonathan M. Lees <jonathan.lees@unc.edu>}
\seealso{radP, radSH, imageSV}
\examples{
phiS=65
del=25
lam=13
x = seq(-1, 1, 0.01)
y = x
X = matrix(rep(x, length(y)), nrow= length(x))
Y = t(X)
RAD2DEG = 180/pi
p = RAD2DEG*(pi/2 -atan2(Y, X))
p[p<0] = p[p<0] + 360
R = sqrt(X^2+Y^2)
R[R>1] = NaN
dip =RAD2DEG*2*asin(R/sqrt(2))
### Calculate the radiation pattern
G = radSV(del, phiS, lam, dip, p)
### plot values
image(x,y,G, asp=1)
}
\keyword{misc}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.