content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
print.summary.tsglm <- function(x, ...){
if(length(coef(x)) > 0){
cat("\nCall:\n", paste(deparse(x$call), sep="\n", collapse = "\n"), "\n\n", sep="")
cat("Coefficients:\n")
print(format.data.frame(as.data.frame(coef(x)), digits=3), print.gap=2, quote=FALSE, na.print="")
if(!is.null(coef(x)$"Std. Error")){
if(x$se.type == "normapprox") cat("Standard errors obtained by normal approximation.\n")
if(x$se.type == "bootstrap") cat("Standard errors obtained by parametric bootstrap with", x$se.bootstrapsamples, "replications.\n")
}
cat(
"\nLink function:", x$link,
"\nDistribution family:", x$distr, if(x$distr=="nbinom"){"(with overdispersion coefficient 'sigmasq')"}else{NULL},
"\nNumber of coefficients:", x$number.coef,
"\nLog-likelihood:", x$logLik,
"\nAIC:", x$AIC,
"\nBIC:", x$BIC,
"\n\n")
}else{
if(length(x$init)>0){
print(x, ...)
}else{
cat("No coefficients\n")
}
}
invisible(x)
}
| /tscount/R/print.summary.tsglm.r | no_license | ingted/R-Examples | R | false | false | 1,027 | r | print.summary.tsglm <- function(x, ...){
if(length(coef(x)) > 0){
cat("\nCall:\n", paste(deparse(x$call), sep="\n", collapse = "\n"), "\n\n", sep="")
cat("Coefficients:\n")
print(format.data.frame(as.data.frame(coef(x)), digits=3), print.gap=2, quote=FALSE, na.print="")
if(!is.null(coef(x)$"Std. Error")){
if(x$se.type == "normapprox") cat("Standard errors obtained by normal approximation.\n")
if(x$se.type == "bootstrap") cat("Standard errors obtained by parametric bootstrap with", x$se.bootstrapsamples, "replications.\n")
}
cat(
"\nLink function:", x$link,
"\nDistribution family:", x$distr, if(x$distr=="nbinom"){"(with overdispersion coefficient 'sigmasq')"}else{NULL},
"\nNumber of coefficients:", x$number.coef,
"\nLog-likelihood:", x$logLik,
"\nAIC:", x$AIC,
"\nBIC:", x$BIC,
"\n\n")
}else{
if(length(x$init)>0){
print(x, ...)
}else{
cat("No coefficients\n")
}
}
invisible(x)
}
|
# Bar Chart Page
library(shiny)
# going to be focusing on budget and gross revenue
# users have the option of choosing between looking at
# the average, median, and total for budget or revenue
# Create the options for the input in the sidebar
measurement_list <- list(
"Average Budget" = "average_budget",
"Median Budget" = "median_budget",
"Total Budget" = "total_budget",
"Average Revenue" = "average_revenue",
"Median Revenue" = "median_revenue",
"Total Revenue" = "total_revenue"
)
# create the selectInput
measurement_input <- selectInput(
inputId = "measurement",
label = "Metric",
choices = measurement_list,
selected = "Average Budget"
)
# create the slider for the year
year_slider <- sliderInput(
inputId = "year_slider",
label = "Years",
min = 1990,
max = 2020,
value = c(1990, 2020),
round = TRUE,
dragRange = TRUE,
# Remove the comma
sep = ""
)
# Create the sidebar panel
bar_sidebar_panel <- sidebarPanel(
inputId = "bar_sidebar",
measurement_input,
year_slider
)
# Create the main panel
bar_main_panel <- mainPanel(
# Plot the chart
suppressWarnings(plotlyOutput("bar")),
# Write the description, use tags to include the css styles
tags$h2("About Chart:"),
tags$p("One of the questions we wanted to answer
with our data was how different national and global events impacted the movie
industry. To answer this, this graph looks at the numbers for the money that
is brought in and spent by the movie industry by year. To do this, the graph
looks at three main values (average, median, and total) for either budget or
revenue. The graph also has the option of specifiying a date range, to look
at the impact of events of specific years on the movie industry. The
significant years and events that we looked at are shown by a dotted white
line, and depict the years: 1991 (Introduction of the
Internet), 1997 (Creation of Netflix), 2001 (9/11), 2005 (Creation of
Youtube), 2008 (Great Recession), and 2020 (Coronavirus Pandemic)")
)
# Create the actual page to go in the UI
bar_page <- tabPanel(
"Finance",
fluidPage(
includeCSS("styling.css"),
titlePanel(
tags$h1("How Did Different Global Events Effect
the Finances of the Movie Industy?")),
suppressWarnings(sidebarLayout(
bar_sidebar_panel,
bar_main_panel
)),
# add in the footer
tags$footer(tags$p(
"Gisele Fox, Emiri Nishizawa, Melina Perraut, Roshni Srikanth,
Ha Nhat To: ",
tags$a(id = "url",
href = "https://github.com/info-201a-au20/final-project-movies",
"Github URL")))
)
)
| /pages/bar_page.R | permissive | info-201a-au20/final-project-movies | R | false | false | 2,653 | r | # Bar Chart Page
library(shiny)
# going to be focusing on budget and gross revenue
# users have the option of choosing between looking at
# the average, median, and total for budget or revenue
# Create the options for the input in the sidebar
measurement_list <- list(
"Average Budget" = "average_budget",
"Median Budget" = "median_budget",
"Total Budget" = "total_budget",
"Average Revenue" = "average_revenue",
"Median Revenue" = "median_revenue",
"Total Revenue" = "total_revenue"
)
# create the selectInput
measurement_input <- selectInput(
inputId = "measurement",
label = "Metric",
choices = measurement_list,
selected = "Average Budget"
)
# create the slider for the year
year_slider <- sliderInput(
inputId = "year_slider",
label = "Years",
min = 1990,
max = 2020,
value = c(1990, 2020),
round = TRUE,
dragRange = TRUE,
# Remove the comma
sep = ""
)
# Create the sidebar panel
bar_sidebar_panel <- sidebarPanel(
inputId = "bar_sidebar",
measurement_input,
year_slider
)
# Create the main panel
bar_main_panel <- mainPanel(
# Plot the chart
suppressWarnings(plotlyOutput("bar")),
# Write the description, use tags to include the css styles
tags$h2("About Chart:"),
tags$p("One of the questions we wanted to answer
with our data was how different national and global events impacted the movie
industry. To answer this, this graph looks at the numbers for the money that
is brought in and spent by the movie industry by year. To do this, the graph
looks at three main values (average, median, and total) for either budget or
revenue. The graph also has the option of specifiying a date range, to look
at the impact of events of specific years on the movie industry. The
significant years and events that we looked at are shown by a dotted white
line, and depict the years: 1991 (Introduction of the
Internet), 1997 (Creation of Netflix), 2001 (9/11), 2005 (Creation of
Youtube), 2008 (Great Recession), and 2020 (Coronavirus Pandemic)")
)
# Create the actual page to go in the UI
bar_page <- tabPanel(
"Finance",
fluidPage(
includeCSS("styling.css"),
titlePanel(
tags$h1("How Did Different Global Events Effect
the Finances of the Movie Industy?")),
suppressWarnings(sidebarLayout(
bar_sidebar_panel,
bar_main_panel
)),
# add in the footer
tags$footer(tags$p(
"Gisele Fox, Emiri Nishizawa, Melina Perraut, Roshni Srikanth,
Ha Nhat To: ",
tags$a(id = "url",
href = "https://github.com/info-201a-au20/final-project-movies",
"Github URL")))
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alignTF_SNP.R
\name{alignTF_SNP}
\alias{alignTF_SNP}
\title{Align TF around a SNP of interest}
\usage{
alignTF_SNP(list, PFMatrixList)
}
\arguments{
\item{list}{Data.frame outputed from getCandidateTFs().}
\item{PFMatrixList}{Output of getMotifMatrixJASPAR() using matrix="PFM", with all the TF you want to query.}
}
\description{
By using the PWM and the genomic locations of the motif found, it adds blanks to the PWM to make them be centered in the SNP of interest.
}
| /man/alignTF_SNP.Rd | no_license | mireia-bioinfo/maRge | R | false | true | 550 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alignTF_SNP.R
\name{alignTF_SNP}
\alias{alignTF_SNP}
\title{Align TF around a SNP of interest}
\usage{
alignTF_SNP(list, PFMatrixList)
}
\arguments{
\item{list}{Data.frame outputed from getCandidateTFs().}
\item{PFMatrixList}{Output of getMotifMatrixJASPAR() using matrix="PFM", with all the TF you want to query.}
}
\description{
By using the PWM and the genomic locations of the motif found, it adds blanks to the PWM to make them be centered in the SNP of interest.
}
|
library(stringr)
library(dplyr)
library(ASGS)
library(ASGS.foyer)
# BOM Data source Monthly Rainfall - ftp://ftp.bom.gov.au/anon/home/ncc/www/change/HQmonthlyR/
# Save the unzipped file into your local according to the path mentioned below
# The unzipped file contains a file of weather station list and multiple files of Precipitation data for monthly/annual/seasonal
# The name of weather station list file is "HQMR_stations.txt"
# Load the text files of monthly Precipitation data
bom_precip_monthly_list <- list.files("HQ_monthly_prcp_txt/", pattern="*.month.txt$")
# Read the text file of weather station list (space delimited file)
bom_station_list <- read.delim(paste("HQ_monthly_prcp_txt", "HQMR_stations.txt", sep="/"), sep=" ", header=F, col.names=c("station_id","lat","long","elv", "name1","name2", "name3"))
# Concantenate the station names spreaded out in three different columns into one
bom_station_list <- bom_station_list %>%
mutate(station_name = paste(name1,name2,name3))
# Discard the three columns of names, not longer need to use in future
bom_station_list$name1 <- NULL
bom_station_list$name2 <- NULL
bom_station_list$name3 <- NULL
# Precipitation Data Reading
precipitation_df = data.frame()
for (filename in bom_precip_monthly_list) {
## Getting Station Id from the first line of the text file
first_line <- readLines(paste("HQ_monthly_prcp_txt", filename, sep="/"),n=1)
firstline_data <- str_split(first_line, pattern=" ")
stationid = firstline_data[[1]][8]
## Read the text file of Precipitation into a Dataframe
onestation_df <- read.table(paste("HQ_monthly_prcp_txt", filename, sep="/"), skip=1)
# Add a column of stationId into the df
onestation_df$stationid <- stationid
# Append to the main df
precipitation_df <- rbind(precipitation_df,onestation_df)
}
save(precipitation_df, file="data/HPT/org_precp.RData")
sum(is.na(precipitation_df$V3))
precp_missing_value_percentage = sum(is.na(precipitation_df$V3))/nrow(precipitation_df)
precp_missing_value_percentage # 0 m,v
nrow(precipitation_df) #441154 obs
# glimpse(bom_station_list)
# class(precipitation_df$stationid)
# class(bom_station_list$station_id)
# bom_station_list$station_id is factor and precipitation_df$stationid is character
bom_station_list$station_id <- as.character(bom_station_list$station_id)
# Join with station Table
precp_stn <- precipitation_df %>%
inner_join(bom_station_list, by= c("stationid" = "station_id"))
nrow(precp_stn) #441154 rows
# Reformat the date (V1, V2) from string to "1999-09-01" in Sync wiht Unemploymnet Data
colnames(precp_stn) <- c("from", "to", "precp", "stationid", "lat","long", "elv","stationname")
precp_stn <- precp_stn %>%
mutate(from = as.Date(as.character(from), '%Y%m%d')) %>%
mutate(to = as.Date(as.character(to), '%Y%m%d'))
nrow(precp_stn) #441154 rows
# Filter out the records prior to 1990
precp_stn <- precp_stn %>%
filter(from >= as.Date("1996-01-01") & to >= as.Date("1996-01-31"))
head(precp_stn) #8206
save(precp_stn, file="data/HPT/bom_precp_stn_org.RData")
# Merge with SA4 Data
precp_stn$territory_sa4 <- ASGS::latlon2SA(precp_stn$lat, precp_stn$long, to = "SA4", yr = "2016")
# precp_stn_tmp<-precp_stn
precp_sa4 <- precp_stn
nrow(precp_sa4) #82026
# Standardisation of terriority names with Unemployment Data
precp_sa4$territory_sa4 <- as.character(precp_sa4$territory_sa4)
#precp_stn %>% filter(str_detect(territory_sa4, "^Hobart"))
unemploy_sa4 <- c("Greater Hobart","New South Wales - Central West","Victoria - North West",
"Western Australia - Outback (North and South)","Western Australia - Outback (North and South)",
"Tasmania - South East","Tasmania - West and North West")
rainfall_sa4 <- c("Hobart","Central West","North West","Western Australia - Outback (North)",
"Western Australia - Outback (South)","South East","West and North West")
# Renaming
for(i in 1:length(rainfall_sa4)){
precp_sa4$territory_sa4[precp_sa4$territory_sa4 == rainfall_sa4[i]] <- unemploy_sa4[i]
}
head(precp_sa4) #82026
sum(is.na(precp_sa4$precp)) # zero missing
unique(precp_sa4$territory_sa4)
unique(unemployment$territory_sa4)
unemployment$territory_sa4 <- str_trim(unemployment$territory_sa4, side="both")
save("precp_sa4", file="data/precp_sa4.RData")
# To Check are there any terriorities which has more than one station, count stn Id by terriority
head(precp_sa4)
unique(precp_sa4$stationid) # 307 stations
terr_precp_stn_count <- precp_sa4 %>%
select(territory_sa4, stationid) %>%
distinct() %>%
group_by(territory_sa4) %>%
summarise(precp_stn_count = n()) %>%
arrange(desc(precp_stn_count))
nrow(terr_precp_stn_count) # 45 Terr have more than one station
View(terr_precp_stn_count)
# Get SA4 list
pure_sa4_list <- unemployment %>%
select(territory_sa4) %>%
distinct()
pure_sa4_list # 87
# Check the count of station in each terriority and identify which terriorities have no station
lookup_missing_terr_precpstn <- pure_sa4_list %>%
left_join(terr_precp_stn_count, by=c("territory_sa4"= "territory_sa4"))
View(lookup_missing_terr_precpstn)
write.csv(lookup_missing_terr_precpstn, file="data/HPT/precp_stn_count_by_terriority.csv")
# Merge with Unemployment Data
load("data/unemployment.RData")
precp_unemployment <- unemployment %>%
left_join(precp_sa4, by=c("territory_sa4" = "territory_sa4", "date" = "from"))
nrow(precp_unemployment)# 78631
head(precp_unemployment)
# Aggreate by SA4 and date for Average Precipitation and Unemployment Rate
precp_unemployment <- precp_unemployment %>%
group_by(territory_sa4, date) %>%
summarise(precp_mean = mean(precp), unemployment_rate = mean(unemployment_rate))
nrow(precp_unemployment) #
save(precp_unemployment, file="data/unemployment_precp.RData")
sum(is.na(precp_unemployment$precp_mean)) # 1151
# Extra Checking - Optional
View(precp_unemployment)
precp_sa4 %>% filter(str_detect(territory_sa4,"^Australian Capital Territory"))
tail(unemployment)
# Check missing data count by terriority and period
View(precp_unemployment)
missing_check_precp_unemp <- precp_unemployment %>%
filter(is.na(precp_mean)) %>%
group_by(territory_sa4) %>%
summarise(missing_count = n(), max_date= max(date), min_date=min(date))
write.csv(missing_check_precp_unemp, file="data/HPT/missing_check_precp_unemp.csv")
| /data_acquistion/BOM_Preci_Monthly.R | no_license | mdsi-2020-aut/stds-drought | R | false | false | 6,313 | r | library(stringr)
library(dplyr)
library(ASGS)
library(ASGS.foyer)
# BOM Data source Monthly Rainfall - ftp://ftp.bom.gov.au/anon/home/ncc/www/change/HQmonthlyR/
# Save the unzipped file into your local according to the path mentioned below
# The unzipped file contains a file of weather station list and multiple files of Precipitation data for monthly/annual/seasonal
# The name of weather station list file is "HQMR_stations.txt"
# Load the text files of monthly Precipitation data
bom_precip_monthly_list <- list.files("HQ_monthly_prcp_txt/", pattern="*.month.txt$")
# Read the text file of weather station list (space delimited file)
bom_station_list <- read.delim(paste("HQ_monthly_prcp_txt", "HQMR_stations.txt", sep="/"), sep=" ", header=F, col.names=c("station_id","lat","long","elv", "name1","name2", "name3"))
# Concantenate the station names spreaded out in three different columns into one
bom_station_list <- bom_station_list %>%
mutate(station_name = paste(name1,name2,name3))
# Discard the three columns of names, not longer need to use in future
bom_station_list$name1 <- NULL
bom_station_list$name2 <- NULL
bom_station_list$name3 <- NULL
# Precipitation Data Reading
precipitation_df = data.frame()
for (filename in bom_precip_monthly_list) {
## Getting Station Id from the first line of the text file
first_line <- readLines(paste("HQ_monthly_prcp_txt", filename, sep="/"),n=1)
firstline_data <- str_split(first_line, pattern=" ")
stationid = firstline_data[[1]][8]
## Read the text file of Precipitation into a Dataframe
onestation_df <- read.table(paste("HQ_monthly_prcp_txt", filename, sep="/"), skip=1)
# Add a column of stationId into the df
onestation_df$stationid <- stationid
# Append to the main df
precipitation_df <- rbind(precipitation_df,onestation_df)
}
save(precipitation_df, file="data/HPT/org_precp.RData")
sum(is.na(precipitation_df$V3))
precp_missing_value_percentage = sum(is.na(precipitation_df$V3))/nrow(precipitation_df)
precp_missing_value_percentage # 0 m,v
nrow(precipitation_df) #441154 obs
# glimpse(bom_station_list)
# class(precipitation_df$stationid)
# class(bom_station_list$station_id)
# bom_station_list$station_id is factor and precipitation_df$stationid is character
bom_station_list$station_id <- as.character(bom_station_list$station_id)
# Join with station Table
precp_stn <- precipitation_df %>%
inner_join(bom_station_list, by= c("stationid" = "station_id"))
nrow(precp_stn) #441154 rows
# Reformat the date (V1, V2) from string to "1999-09-01" in Sync wiht Unemploymnet Data
colnames(precp_stn) <- c("from", "to", "precp", "stationid", "lat","long", "elv","stationname")
precp_stn <- precp_stn %>%
mutate(from = as.Date(as.character(from), '%Y%m%d')) %>%
mutate(to = as.Date(as.character(to), '%Y%m%d'))
nrow(precp_stn) #441154 rows
# Filter out the records prior to 1990
precp_stn <- precp_stn %>%
filter(from >= as.Date("1996-01-01") & to >= as.Date("1996-01-31"))
head(precp_stn) #8206
save(precp_stn, file="data/HPT/bom_precp_stn_org.RData")
# Merge with SA4 Data
precp_stn$territory_sa4 <- ASGS::latlon2SA(precp_stn$lat, precp_stn$long, to = "SA4", yr = "2016")
# precp_stn_tmp<-precp_stn
precp_sa4 <- precp_stn
nrow(precp_sa4) #82026
# Standardisation of terriority names with Unemployment Data
precp_sa4$territory_sa4 <- as.character(precp_sa4$territory_sa4)
#precp_stn %>% filter(str_detect(territory_sa4, "^Hobart"))
unemploy_sa4 <- c("Greater Hobart","New South Wales - Central West","Victoria - North West",
"Western Australia - Outback (North and South)","Western Australia - Outback (North and South)",
"Tasmania - South East","Tasmania - West and North West")
rainfall_sa4 <- c("Hobart","Central West","North West","Western Australia - Outback (North)",
"Western Australia - Outback (South)","South East","West and North West")
# Renaming
for(i in 1:length(rainfall_sa4)){
precp_sa4$territory_sa4[precp_sa4$territory_sa4 == rainfall_sa4[i]] <- unemploy_sa4[i]
}
head(precp_sa4) #82026
sum(is.na(precp_sa4$precp)) # zero missing
unique(precp_sa4$territory_sa4)
unique(unemployment$territory_sa4)
unemployment$territory_sa4 <- str_trim(unemployment$territory_sa4, side="both")
save("precp_sa4", file="data/precp_sa4.RData")
# To Check are there any terriorities which has more than one station, count stn Id by terriority
head(precp_sa4)
unique(precp_sa4$stationid) # 307 stations
terr_precp_stn_count <- precp_sa4 %>%
select(territory_sa4, stationid) %>%
distinct() %>%
group_by(territory_sa4) %>%
summarise(precp_stn_count = n()) %>%
arrange(desc(precp_stn_count))
nrow(terr_precp_stn_count) # 45 Terr have more than one station
View(terr_precp_stn_count)
# Get SA4 list
pure_sa4_list <- unemployment %>%
select(territory_sa4) %>%
distinct()
pure_sa4_list # 87
# Check the count of station in each terriority and identify which terriorities have no station
lookup_missing_terr_precpstn <- pure_sa4_list %>%
left_join(terr_precp_stn_count, by=c("territory_sa4"= "territory_sa4"))
View(lookup_missing_terr_precpstn)
write.csv(lookup_missing_terr_precpstn, file="data/HPT/precp_stn_count_by_terriority.csv")
# Merge with Unemployment Data
load("data/unemployment.RData")
precp_unemployment <- unemployment %>%
left_join(precp_sa4, by=c("territory_sa4" = "territory_sa4", "date" = "from"))
nrow(precp_unemployment)# 78631
head(precp_unemployment)
# Aggreate by SA4 and date for Average Precipitation and Unemployment Rate
precp_unemployment <- precp_unemployment %>%
group_by(territory_sa4, date) %>%
summarise(precp_mean = mean(precp), unemployment_rate = mean(unemployment_rate))
nrow(precp_unemployment) #
save(precp_unemployment, file="data/unemployment_precp.RData")
sum(is.na(precp_unemployment$precp_mean)) # 1151
# Extra Checking - Optional
View(precp_unemployment)
precp_sa4 %>% filter(str_detect(territory_sa4,"^Australian Capital Territory"))
tail(unemployment)
# Check missing data count by terriority and period
View(precp_unemployment)
missing_check_precp_unemp <- precp_unemployment %>%
filter(is.na(precp_mean)) %>%
group_by(territory_sa4) %>%
summarise(missing_count = n(), max_date= max(date), min_date=min(date))
write.csv(missing_check_precp_unemp, file="data/HPT/missing_check_precp_unemp.csv")
|
\name{tundra_gbm_train_fn}
\alias{tundra_gbm_train_fn}
\title{Tundra GBM wrapper}
\usage{
tundra_gbm_train_fn(dataframe)
}
\description{
Tundra GBM wrapper
}
| /man/tundra_gbm_train_fn.Rd | no_license | tonglu/tundra | R | false | false | 159 | rd | \name{tundra_gbm_train_fn}
\alias{tundra_gbm_train_fn}
\title{Tundra GBM wrapper}
\usage{
tundra_gbm_train_fn(dataframe)
}
\description{
Tundra GBM wrapper
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{waldCI}
\alias{waldCI}
\title{Compute Wald Confidence Interval}
\usage{
waldCI(estimate, se, df = Inf, level = 0.95)
}
\arguments{
\item{estimate}{an estimated coefficient.}
\item{se}{standard error of \code{estimate}.}
\item{df}{degrees of freedom associate to \code{estimate}. \code{df = Inf} is
allowed.}
\item{level}{level of confidence interval.}
}
\value{
a matrix of lower and upper confidence interval.
}
\description{
Compute Wald Confidence Interval
}
\details{
This code is greatly inspired by code from the \pkg{lmerTest}
package.
}
\keyword{internal}
| /man/waldCI.Rd | no_license | cran/vici | R | false | true | 664 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{waldCI}
\alias{waldCI}
\title{Compute Wald Confidence Interval}
\usage{
waldCI(estimate, se, df = Inf, level = 0.95)
}
\arguments{
\item{estimate}{an estimated coefficient.}
\item{se}{standard error of \code{estimate}.}
\item{df}{degrees of freedom associate to \code{estimate}. \code{df = Inf} is
allowed.}
\item{level}{level of confidence interval.}
}
\value{
a matrix of lower and upper confidence interval.
}
\description{
Compute Wald Confidence Interval
}
\details{
This code is greatly inspired by code from the \pkg{lmerTest}
package.
}
\keyword{internal}
|
setwd("D:/OneDrive/R Work/FacilityAnomalies")
library(dplyr)
#read in raw data
raw_data<-read.csv('./DWHLogs_Feb26.csv',stringsAsFactors=FALSE)
#Variables to check for outliers
str(raw_data)
#Change all columnnames to lowercase
names(raw_data)<-tolower(names(raw_data))
#Get the Tier for the Facilities
raw_data <- raw_data %>%
mutate(FacilityType = ifelse(grepl('Hospital', facilityname), "Hospital",
ifelse(grepl("Dispensary", facilityname), "Dispensary", "Health Center")))
df<-raw_data %>% filter(dashboardrefreshdate=='2/22/2021' & num==1)
df$dateuploaded<-as.Date(df$dateuploaded,format="%m/%d/%Y")
df$siteabstractiondate<-as.Date(df$siteabstractiondate,format="%m/%d/%Y")
df <- df%>%
mutate(diff_days=difftime(df$dateuploaded,df$siteabstractiondate,units=c("days")),
days_eom=(difftime(as.Date("2021-01-31",origin = "1899-12-30"),df$siteabstractiondate,units=c("days")))
)
vars <- c("txcurr","khis_txcurr", "patients", "art", "visits", "pharm", "labs", "exits", "diff_days")
#Convert the current ART Columns from Character to Numeric
df[,vars]<-lapply(df[,vars],as.numeric)
#Convert NA to 0
df[is.na(df)] <- 0
#Get the Means of the columns specified
colMeans(df[,vars])
#Get the CoVariance
cov(df[,vars])
#Calculate the mahalanobis distance
md <-mahalanobis(df[,vars],colMeans(df[,vars]),cov(df[,vars]))
#Add the Mala
df$md<-round(md,3)
#Mahalanobis Outliers
df$outlier_maha <-FALSE
df$outlier_maha[df$md>10]<-TRUE
df$txcurr_var<-(1-(df$txcurr/df$khis_txcurr))*100
df$txcurr_var=round(df$txcurr_var,0)
df <-df %>% mutate(dbStatus = ifelse((substr(dateuploaded, 1, 4)<2021),"Site Uploaded before 2021",
ifelse(diff_days<0,"Site Abstraction issue",
ifelse(diff_days>=0 & diff_days<=30,"Data uploaded is within 30 days","Stale DB"))),
var_status=ifelse(txcurr_var>=-5 & txcurr_var <=5,"Acceptable Variance","Out of bounds"))
df <-df %>% mutate(dbStatus_eom = ifelse(days_eom<=0, "Uploaded bef Jan31(<=5 days) & after Jan","Uploaded more than 5 days before Jan31"))
df_inc <-df %>% select (mflcode,facilityname,county,ctpartner,dateuploaded,patients,art,visits,labs,pharm,exits)%>%
filter(patients==0 | art==0 | visits==0 | pharm==0 | labs==0| exits ==0)
write.csv(df_inc,"inc.csv")
statsbyEMR<-df %>% group_by(dbStatus_eom,dbStatus,emrstatus,emr,var_status,txcurr_var) %>%
summarise(sites=n(),
txcurr=sum(txcurr),
k_txcurr=sum(khis_txcurr)) %>%
ungroup()
out<-df%>% select(mflcode,facilityname,county,ctpartner,emr,emrstatus,dateuploaded,siteabstractiondate,txcurr,khis_txcurr,txcurr_var,var_status,dbStatus,dbStatus_eom)
ken_ob<-df %>% select (facilityname,dateuploaded,siteabstractiondate,txcurr,khis_txcurr,days_eom,dbStatus,var_status,emr) %>%
filter(dbStatus=="Data uploaded is within 30 days" & txcurr_var>0)
| /Anomaly Detection/DWH Data Anomalies.R | no_license | MaringaM/Analytics | R | false | false | 2,922 | r | setwd("D:/OneDrive/R Work/FacilityAnomalies")
library(dplyr)
#read in raw data
raw_data<-read.csv('./DWHLogs_Feb26.csv',stringsAsFactors=FALSE)
#Variables to check for outliers
str(raw_data)
#Change all columnnames to lowercase
names(raw_data)<-tolower(names(raw_data))
#Get the Tier for the Facilities
raw_data <- raw_data %>%
mutate(FacilityType = ifelse(grepl('Hospital', facilityname), "Hospital",
ifelse(grepl("Dispensary", facilityname), "Dispensary", "Health Center")))
df<-raw_data %>% filter(dashboardrefreshdate=='2/22/2021' & num==1)
df$dateuploaded<-as.Date(df$dateuploaded,format="%m/%d/%Y")
df$siteabstractiondate<-as.Date(df$siteabstractiondate,format="%m/%d/%Y")
df <- df%>%
mutate(diff_days=difftime(df$dateuploaded,df$siteabstractiondate,units=c("days")),
days_eom=(difftime(as.Date("2021-01-31",origin = "1899-12-30"),df$siteabstractiondate,units=c("days")))
)
vars <- c("txcurr","khis_txcurr", "patients", "art", "visits", "pharm", "labs", "exits", "diff_days")
#Convert the current ART Columns from Character to Numeric
df[,vars]<-lapply(df[,vars],as.numeric)
#Convert NA to 0
df[is.na(df)] <- 0
#Get the Means of the columns specified
colMeans(df[,vars])
#Get the CoVariance
cov(df[,vars])
#Calculate the mahalanobis distance
md <-mahalanobis(df[,vars],colMeans(df[,vars]),cov(df[,vars]))
#Add the Mala
df$md<-round(md,3)
#Mahalanobis Outliers
df$outlier_maha <-FALSE
df$outlier_maha[df$md>10]<-TRUE
df$txcurr_var<-(1-(df$txcurr/df$khis_txcurr))*100
df$txcurr_var=round(df$txcurr_var,0)
df <-df %>% mutate(dbStatus = ifelse((substr(dateuploaded, 1, 4)<2021),"Site Uploaded before 2021",
ifelse(diff_days<0,"Site Abstraction issue",
ifelse(diff_days>=0 & diff_days<=30,"Data uploaded is within 30 days","Stale DB"))),
var_status=ifelse(txcurr_var>=-5 & txcurr_var <=5,"Acceptable Variance","Out of bounds"))
df <-df %>% mutate(dbStatus_eom = ifelse(days_eom<=0, "Uploaded bef Jan31(<=5 days) & after Jan","Uploaded more than 5 days before Jan31"))
df_inc <-df %>% select (mflcode,facilityname,county,ctpartner,dateuploaded,patients,art,visits,labs,pharm,exits)%>%
filter(patients==0 | art==0 | visits==0 | pharm==0 | labs==0| exits ==0)
write.csv(df_inc,"inc.csv")
statsbyEMR<-df %>% group_by(dbStatus_eom,dbStatus,emrstatus,emr,var_status,txcurr_var) %>%
summarise(sites=n(),
txcurr=sum(txcurr),
k_txcurr=sum(khis_txcurr)) %>%
ungroup()
out<-df%>% select(mflcode,facilityname,county,ctpartner,emr,emrstatus,dateuploaded,siteabstractiondate,txcurr,khis_txcurr,txcurr_var,var_status,dbStatus,dbStatus_eom)
ken_ob<-df %>% select (facilityname,dateuploaded,siteabstractiondate,txcurr,khis_txcurr,days_eom,dbStatus,var_status,emr) %>%
filter(dbStatus=="Data uploaded is within 30 days" & txcurr_var>0)
|
#---------------- Close all devices and delete all variables. -------------------------------------#
rm(list=ls(all=TRUE)) # clear workspace
graphics.off() # close any open graphics
closeAllConnections() # close any open connections to files
#--------------------------------------------------------------------------------------------------#
#---------------- Load required libraries ---------------------------------------------------------#
library(PEcAn.all)
library(PEcAn.SIPNET)
library(PEcAn.LINKAGES)
library(PEcAn.visualization)
library(PEcAnAssimSequential)
library(nimble)
library(lubridate)
library(PEcAn.visualization)
#PEcAnAssimSequential::
library(rgdal) # need to put in assim.sequential
library(ncdf4) # need to put in assim.sequential
library(purrr)
library(listviewer)
library(dplyr)
library(future)
library(tictoc)
#--------------------------------------------------------------------------------------------------#
######################################## INTIAL SET UP STUFF #######################################
work_dir <- "/data/bmorrison/sda/lai"
# delete an old run
unlink(c('run','out','SDA'),recursive = T)
# grab multi-site XML file
settings <- read.settings("pecan_MultiSite_SDA_LAI_AGB_8_Sites_2009.xml")
# doesn't work for one site
observation <- c()
for (i in seq_along(1:length(settings$run))) {
command <- paste0("settings$run$settings.",i,"$site$id")
obs <- eval(parse(text=command))
observation <- c(observation,obs)
}
#observation = "1000000048"
# what is this step for???? is this to get the site locations for the map??
if ("MultiSettings" %in% class(settings)) site.ids <- settings %>%
map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character()
# sample from parameters used for both sensitivity analysis and Ens
get.parameter.samples(settings,
ens.sample.method = settings$ensemble$samplingspace$parameters$method)
## Aside: if method were set to unscented, would take minimal changes to do UnKF
#--------------------------------------------------------------------------------------------------#
############################ EXTRACT SITE INFORMATION FROM XML TO DOWNLOAD DATA + RUN SDA ###########################
################ Not working on interactive job on MODEX
PEcAn.logger::logger.info("**** Extracting LandTrendr AGB data for model sites ****")
bety <- list(user='bety', password='bety', host='localhost',
dbname='bety', driver='PostgreSQL',write=TRUE)
con <- PEcAn.DB::db.open(bety)
bety$con <- con
site_ID <- observation
suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon,
ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})",
ids = site_ID, .con = con))
suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry))
suppressWarnings(qry_results <- DBI::dbFetch(qry_results))
site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat,
lon=qry_results$lon, time_zone=qry_results$time_zone)
#
# ###################### EXTRACT AGB DATA + REFORMAT LONG VS. WIDE STYLE #####################################
# ### this is for LandTrendr data ###
#
# # output folder for the data
# data_dir <- "/data2/RS_GIS_Data/LandTrendr/LandTrendr_AGB_data"
#
# # extract the data
# med_agb_data <- extract.LandTrendr.AGB(site_info, "median", buffer = NULL, fun = "mean",
# data_dir, product_dates=NULL, file.path(work_dir,"Obs"))[[1]]
#
# sdev_agb_data <- extract.LandTrendr.AGB(site_info, "stdv", buffer = NULL, fun = "mean",
# data_dir, product_dates=NULL, file.path(work_dir,"Obs"))[[1]]
#
#
# ### temporary fix to make agb data long vs. wide format to match modis data. ###
# ndates = colnames(med_agb_data)[-c(1:2)]
#
# med_agb_data$Site_Name = as.character(med_agb_data$Site_Name, stringsAsFactors = FALSE)
# med_agb_data = reshape2::melt(med_agb_data, id.vars = "Site_ID", measure.vars = colnames(med_agb_data)[-c(1:2)])
#
# sdev_agb_data$Site_Name = as.character(sdev_agb_data$Site_Name, stringsAsFactors = FALSE)
# sdev_agb_data = reshape2::melt(sdev_agb_data, id.vars = "Site_ID", measure.vars = colnames(sdev_agb_data)[-c(1:2)])
#
# agb_data = as.data.frame(cbind(med_agb_data, sdev_agb_data$value))
# names(agb_data) = c("Site_ID", "Date", "Median", "SD")
# agb_data$Date = as.character(agb_data$Date, stringsAsFactors = FALSE)
#
# # save AGB data into long style
# save(agb_data, file = '/data/bmorrison/sda/lai/modis_lai_data/agb_data_update_sites.Rdata')
#
#
# # ####################### Extract MODISTools LAI data ##############################
#
# library(doParallel)
# cl <- parallel::makeCluster(10, outfile="")
# doParallel::registerDoParallel(cl)
#
# start = Sys.time()
# # keep QC_filter on for this because bad LAI values crash the SDA. Progress can be turned off if it annoys you.
# data = foreach(i=1:length(site_info$site_id), .combine = rbind) %dopar% PEcAn.data.remote::call_MODIS(start_date = "2000/01/01", end_date = "2017/12/31", band = "Lai_500m", product = "MOD15A2H", lat = site_info$lat[i], lon = site_info$lon[i], size = 0, band_qc = "FparLai_QC", band_sd = "LaiStdDev_500m", package_method = "MODISTools", QC_filter = T, progress = T)
# end = Sys.time()
# difference = end-start
# stopCluster(cl)
#
# # already in long format style for dataframe
# output = as.data.frame(data)
# save(output, file = '/data/bmorrison/sda/lai/modis_lai_data/modis_lai_output_update_sites.Rdata')
#
# # change tile names to the site name
# for (i in 1:length(site_info$site_name))
# {
# name = as.character(site_info$site_id[i], stringsAsFactor = F)
# g = which(round(output$lat, digits = 3) == round(site_info$lat[i], digits = 3))
# output$tile[g] = name
# }
# # remove extra data
# output = output[,c(4,2,8,10)]
# colnames(output) = names(agb_data)
#
# # compute peak lai per year
# data = output
# peak_lai = data.frame()
# years = unique(year(as.Date(data$Date, "%Y-%m-%d")))
# for (i in seq_along(years))
# {
# d = data[grep(data$Date, pattern = years[i]),]
# sites = unique(d$Site_ID)
# for (j in seq_along(sites))
# {
# index = which(d$Site_ID == site_info$site_id[j]) #which(round(d$lat, digits = 3) == round(site_info$lat[j], digits = 3) & round(d$lon, digits = 3) == round(site_info$lon[j], digits = 3))
# site = d[index,]
# if (length(index) > 0)
# {
# # peak lai is the max value that is the value <95th quantile to remove potential outlier values
# max = site[which(site$Median == max(site$Median[which(site$Median <= quantile(site$Median, probs = 0.95))], na.rm = T))[1],] #which(d$Median == max(d$Median[index], na.rm = T))[1]
# peak = data.frame(max$Site_ID, Date = paste("Year", years[i], sep = "_"), Median = max$Median, SD = max$SD)
# peak_lai = rbind(peak_lai, peak)
#
# }
# }
# }
#
# # a fix for low SD values because of an issue with MODIS LAI error calculations. Reference: VISKARI et al 2014.
# peak_lai$SD[peak_lai$SD < 0.66] = 0.66
#
# #output data
# names(peak_lai) = c("Site_ID", "Date", "Median", "SD")
# save(peak_lai, file = '/data/bmorrison/sda/lai/modis_lai_data/peak_lai_output_update_sites.Rdata')
#
#
# ######################### TIME TO FIX UP THE OBSERVED DATASETS INTO A FORMAT THAT WORKS TO MAKE OBS.MEAN and OBS.COV FOR SDA ########################
# #################
# load('/data/bmorrison/sda/lai/modis_lai_data/agb_data_update_sites.Rdata')
# load( '/data/bmorrison/sda/lai/modis_lai_data/peak_lai_output_update_sites.Rdata')
# # output likes to make factors ..... :/... so this unfactors them
# peak_lai$Site_ID = as.numeric(as.character(peak_lai$Site_ID, stringsAsFactors = F))
# peak_lai$Date = as.character(peak_lai$Date, stringsAsFactors = F)
#
# observed_vars = c("AbvGrndWood", "LAI")
#
#
# # merge agb and lai dataframes and places NA values where data is missing between the 2 datasets
# observed_data = merge(agb_data, peak_lai, by = c("Site_ID", "Date"), all = T)
# names(observed_data) = c("Site_ID", "Date", "med_agb", "sdev_agb", "med_lai", "sdev_lai")
#
# # order by year
# observed_data = observed_data[order(observed_data$Date),]
#
# #sort by date
# dates = sort(unique(observed_data$Date))
#
# # create the obs.mean list --> this needs to be adjusted to work with load.data in the future (via hackathon)
# obs.mean = data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, med_agb = observed_data$med_agb, med_lai = observed_data$med_lai)
# obs.mean$date = as.character(obs.mean$date, stringsAsFactors = FALSE)
#
# obs.mean = obs.mean %>%
# split(.$date)
#
# # change the dates to be middle of the year
# date.obs <- strsplit(names(obs.mean), "_") %>%
# map_chr(~.x[2]) %>% paste0(.,"/07/15")
#
# obs.mean = names(obs.mean) %>%
# map(function(namesl){
# obs.mean[[namesl]] %>%
# split(.$site_id) %>%
# map(~.x[3:4] %>% setNames(c("AbvGrndWood", "LAI"))) %>%
# setNames(site.ids)
# }) %>% setNames(date.obs)
#
# # remove NA data as this will crash the SDA. Removes rown numbers (may not be nessesary)
# names = date.obs
# for (name in names)
# {
# for (site in names(obs.mean[[name]]))
# {
# na_index = which(!(is.na(obs.mean[[ name]][[site]])))
# colnames = names(obs.mean[[name]][[site]])
# if (length(na_index) > 0)
# {
# obs.mean[[name]][[site]] = obs.mean[[name]][[site]][na_index]
# row.names(obs.mean[[name]][[site]]) = NULL
# }
# }
# }
#
# # fillers are 0's for the covariance matrix. This will need to change for differing size matrixes when more variables are added in.
# filler_0 = as.data.frame(matrix(0, ncol = length(observed_vars), nrow = nrow(observed_data)))
# names(filler_0) = paste0("h", seq_len(length(observed_vars)))
#
# # create obs.cov dataframe -->list by date
# obs.cov = data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, sdev_agb = observed_data$sdev_agb, sdev_lai = observed_data$sdev_lai, filler_0)
# obs.cov$date = as.character(obs.cov$date, stringsAsFactors = F)
#
# obs.cov = obs.cov %>%
# split(.$date)
#
# #sublist by date --> site
# obs.cov = names(obs.cov) %>%
# map(function(namesl){
# obs.cov[[namesl]] %>%
# split(.$site_id) %>%
# map(~diag(.x[3:4]^2, nrow = 2, ncol = 2)) %>%
# setNames(site.ids)}) %>%
# setNames(date.obs)
#
# # remove NA/missing observations from covariance matrix and removes NA values to restructure size of covar matrix
# names = names(obs.cov)
# for (name in names)
# {
# for (site in names(obs.cov[[name]]))
# {
# na_index = which(is.na(obs.cov[[ name]][[site]]))
# if (length(na_index) > 0)
# {
# n_good_vars = length(observed_vars)-length(na_index)
# obs.cov[[name]][[site]] = matrix(obs.cov[[name]][[site]][-na_index], nrow = n_good_vars, ncol = n_good_vars)
# }
# }
# }
#
# # save these lists for future use.
# save(obs.mean, file = '/data/bmorrison/sda/lai/obs_mean_update_sites.Rdata')
# save(obs.cov, file = '/data/bmorrison/sda/lai/obs_cov_update_sites.Rdata')
# save(date.obs, file = '/data/bmorrison/sda/lai/date_obs_update_sites.Rdata')
################################ START THE SDA ########################################
load('/data/bmorrison/sda/lai/obs_mean_update_sites.Rdata')
load('/data/bmorrison/sda/lai/obs_cov_update_sites.Rdata')
date.obs = names(obs.mean)
new.settings <- PEcAn.settings::prepare.settings(settings)
#unlink(c('run','out','SDA'),recursive = T)
sda.enkf.multisite(new.settings,
obs.mean =obs.mean,
obs.cov = obs.cov,
keepNC = TRUE,
forceRun = TRUE,
control=list(trace=TRUE,
FF=FALSE,
interactivePlot=FALSE,
TimeseriesPlot=TRUE,
BiasPlot=FALSE,
plot.title=NULL,
facet.plots=4,
debug=FALSE,
pause=FALSE,
Profiling = FALSE,
OutlierDetection=FALSE))
### FOR PLOTTING after analysis if TimeseriesPlot == FALSE)
load('/data/bmorrison/sda/lai/SDA/sda.output.Rdata')
facetg=4
readsFF=NULL
obs.mean = Viz.output[[2]]
obs.cov = Viz.output[[3]]
obs.times = names(obs.mean)
PEcAnAssimSequential::post.analysis.multisite.ggplot(settings = new.settings, t, obs.times, obs.mean, obs.cov, FORECAST, ANALYSIS, plot.title=NULL, facetg=facetg, readsFF=NULL)
| /modules/assim.sequential/inst/sda_backup/bmorrison/general_sda_setup_2.R | permissive | PecanProject/pecan | R | false | false | 12,742 | r |
#---------------- Close all devices and delete all variables. -------------------------------------#
rm(list=ls(all=TRUE)) # clear workspace
graphics.off() # close any open graphics
closeAllConnections() # close any open connections to files
#--------------------------------------------------------------------------------------------------#
#---------------- Load required libraries ---------------------------------------------------------#
library(PEcAn.all)
library(PEcAn.SIPNET)
library(PEcAn.LINKAGES)
library(PEcAn.visualization)
library(PEcAnAssimSequential)
library(nimble)
library(lubridate)
library(PEcAn.visualization)
#PEcAnAssimSequential::
library(rgdal) # need to put in assim.sequential
library(ncdf4) # need to put in assim.sequential
library(purrr)
library(listviewer)
library(dplyr)
library(future)
library(tictoc)
#--------------------------------------------------------------------------------------------------#
######################################## INTIAL SET UP STUFF #######################################
work_dir <- "/data/bmorrison/sda/lai"
# delete an old run
unlink(c('run','out','SDA'),recursive = T)
# grab multi-site XML file
settings <- read.settings("pecan_MultiSite_SDA_LAI_AGB_8_Sites_2009.xml")
# doesn't work for one site
observation <- c()
for (i in seq_along(1:length(settings$run))) {
command <- paste0("settings$run$settings.",i,"$site$id")
obs <- eval(parse(text=command))
observation <- c(observation,obs)
}
#observation = "1000000048"
# what is this step for???? is this to get the site locations for the map??
if ("MultiSettings" %in% class(settings)) site.ids <- settings %>%
map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character()
# sample from parameters used for both sensitivity analysis and Ens
get.parameter.samples(settings,
ens.sample.method = settings$ensemble$samplingspace$parameters$method)
## Aside: if method were set to unscented, would take minimal changes to do UnKF
#--------------------------------------------------------------------------------------------------#
############################ EXTRACT SITE INFORMATION FROM XML TO DOWNLOAD DATA + RUN SDA ###########################
################ Not working on interactive job on MODEX
PEcAn.logger::logger.info("**** Extracting LandTrendr AGB data for model sites ****")
bety <- list(user='bety', password='bety', host='localhost',
dbname='bety', driver='PostgreSQL',write=TRUE)
con <- PEcAn.DB::db.open(bety)
bety$con <- con
site_ID <- observation
suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon,
ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})",
ids = site_ID, .con = con))
suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry))
suppressWarnings(qry_results <- DBI::dbFetch(qry_results))
site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat,
lon=qry_results$lon, time_zone=qry_results$time_zone)
#
# ###################### EXTRACT AGB DATA + REFORMAT LONG VS. WIDE STYLE #####################################
# ### this is for LandTrendr data ###
#
# # output folder for the data
# data_dir <- "/data2/RS_GIS_Data/LandTrendr/LandTrendr_AGB_data"
#
# # extract the data
# med_agb_data <- extract.LandTrendr.AGB(site_info, "median", buffer = NULL, fun = "mean",
# data_dir, product_dates=NULL, file.path(work_dir,"Obs"))[[1]]
#
# sdev_agb_data <- extract.LandTrendr.AGB(site_info, "stdv", buffer = NULL, fun = "mean",
# data_dir, product_dates=NULL, file.path(work_dir,"Obs"))[[1]]
#
#
# ### temporary fix to make agb data long vs. wide format to match modis data. ###
# ndates = colnames(med_agb_data)[-c(1:2)]
#
# med_agb_data$Site_Name = as.character(med_agb_data$Site_Name, stringsAsFactors = FALSE)
# med_agb_data = reshape2::melt(med_agb_data, id.vars = "Site_ID", measure.vars = colnames(med_agb_data)[-c(1:2)])
#
# sdev_agb_data$Site_Name = as.character(sdev_agb_data$Site_Name, stringsAsFactors = FALSE)
# sdev_agb_data = reshape2::melt(sdev_agb_data, id.vars = "Site_ID", measure.vars = colnames(sdev_agb_data)[-c(1:2)])
#
# agb_data = as.data.frame(cbind(med_agb_data, sdev_agb_data$value))
# names(agb_data) = c("Site_ID", "Date", "Median", "SD")
# agb_data$Date = as.character(agb_data$Date, stringsAsFactors = FALSE)
#
# # save AGB data into long style
# save(agb_data, file = '/data/bmorrison/sda/lai/modis_lai_data/agb_data_update_sites.Rdata')
#
#
# # ####################### Extract MODISTools LAI data ##############################
#
# library(doParallel)
# cl <- parallel::makeCluster(10, outfile="")
# doParallel::registerDoParallel(cl)
#
# start = Sys.time()
# # keep QC_filter on for this because bad LAI values crash the SDA. Progress can be turned off if it annoys you.
# data = foreach(i=1:length(site_info$site_id), .combine = rbind) %dopar% PEcAn.data.remote::call_MODIS(start_date = "2000/01/01", end_date = "2017/12/31", band = "Lai_500m", product = "MOD15A2H", lat = site_info$lat[i], lon = site_info$lon[i], size = 0, band_qc = "FparLai_QC", band_sd = "LaiStdDev_500m", package_method = "MODISTools", QC_filter = T, progress = T)
# end = Sys.time()
# difference = end-start
# stopCluster(cl)
#
# # already in long format style for dataframe
# output = as.data.frame(data)
# save(output, file = '/data/bmorrison/sda/lai/modis_lai_data/modis_lai_output_update_sites.Rdata')
#
# # change tile names to the site name
# for (i in 1:length(site_info$site_name))
# {
# name = as.character(site_info$site_id[i], stringsAsFactor = F)
# g = which(round(output$lat, digits = 3) == round(site_info$lat[i], digits = 3))
# output$tile[g] = name
# }
# # remove extra data
# output = output[,c(4,2,8,10)]
# colnames(output) = names(agb_data)
#
# # compute peak lai per year
# data = output
# peak_lai = data.frame()
# years = unique(year(as.Date(data$Date, "%Y-%m-%d")))
# for (i in seq_along(years))
# {
# d = data[grep(data$Date, pattern = years[i]),]
# sites = unique(d$Site_ID)
# for (j in seq_along(sites))
# {
# index = which(d$Site_ID == site_info$site_id[j]) #which(round(d$lat, digits = 3) == round(site_info$lat[j], digits = 3) & round(d$lon, digits = 3) == round(site_info$lon[j], digits = 3))
# site = d[index,]
# if (length(index) > 0)
# {
# # peak lai is the max value that is the value <95th quantile to remove potential outlier values
# max = site[which(site$Median == max(site$Median[which(site$Median <= quantile(site$Median, probs = 0.95))], na.rm = T))[1],] #which(d$Median == max(d$Median[index], na.rm = T))[1]
# peak = data.frame(max$Site_ID, Date = paste("Year", years[i], sep = "_"), Median = max$Median, SD = max$SD)
# peak_lai = rbind(peak_lai, peak)
#
# }
# }
# }
#
# # a fix for low SD values because of an issue with MODIS LAI error calculations. Reference: VISKARI et al 2014.
# peak_lai$SD[peak_lai$SD < 0.66] = 0.66
#
# #output data
# names(peak_lai) = c("Site_ID", "Date", "Median", "SD")
# save(peak_lai, file = '/data/bmorrison/sda/lai/modis_lai_data/peak_lai_output_update_sites.Rdata')
#
#
# ######################### TIME TO FIX UP THE OBSERVED DATASETS INTO A FORMAT THAT WORKS TO MAKE OBS.MEAN and OBS.COV FOR SDA ########################
# #################
# load('/data/bmorrison/sda/lai/modis_lai_data/agb_data_update_sites.Rdata')
# load( '/data/bmorrison/sda/lai/modis_lai_data/peak_lai_output_update_sites.Rdata')
# # output likes to make factors ..... :/... so this unfactors them
# peak_lai$Site_ID = as.numeric(as.character(peak_lai$Site_ID, stringsAsFactors = F))
# peak_lai$Date = as.character(peak_lai$Date, stringsAsFactors = F)
#
# observed_vars = c("AbvGrndWood", "LAI")
#
#
# # merge agb and lai dataframes and places NA values where data is missing between the 2 datasets
# observed_data = merge(agb_data, peak_lai, by = c("Site_ID", "Date"), all = T)
# names(observed_data) = c("Site_ID", "Date", "med_agb", "sdev_agb", "med_lai", "sdev_lai")
#
# # order by year
# observed_data = observed_data[order(observed_data$Date),]
#
# #sort by date
# dates = sort(unique(observed_data$Date))
#
# # create the obs.mean list --> this needs to be adjusted to work with load.data in the future (via hackathon)
# obs.mean = data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, med_agb = observed_data$med_agb, med_lai = observed_data$med_lai)
# obs.mean$date = as.character(obs.mean$date, stringsAsFactors = FALSE)
#
# obs.mean = obs.mean %>%
# split(.$date)
#
# # change the dates to be middle of the year
# date.obs <- strsplit(names(obs.mean), "_") %>%
# map_chr(~.x[2]) %>% paste0(.,"/07/15")
#
# obs.mean = names(obs.mean) %>%
# map(function(namesl){
# obs.mean[[namesl]] %>%
# split(.$site_id) %>%
# map(~.x[3:4] %>% setNames(c("AbvGrndWood", "LAI"))) %>%
# setNames(site.ids)
# }) %>% setNames(date.obs)
#
# # remove NA data as this will crash the SDA. Removes rown numbers (may not be nessesary)
# names = date.obs
# for (name in names)
# {
# for (site in names(obs.mean[[name]]))
# {
# na_index = which(!(is.na(obs.mean[[ name]][[site]])))
# colnames = names(obs.mean[[name]][[site]])
# if (length(na_index) > 0)
# {
# obs.mean[[name]][[site]] = obs.mean[[name]][[site]][na_index]
# row.names(obs.mean[[name]][[site]]) = NULL
# }
# }
# }
#
# # fillers are 0's for the covariance matrix. This will need to change for differing size matrixes when more variables are added in.
# filler_0 = as.data.frame(matrix(0, ncol = length(observed_vars), nrow = nrow(observed_data)))
# names(filler_0) = paste0("h", seq_len(length(observed_vars)))
#
# # create obs.cov dataframe -->list by date
# obs.cov = data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, sdev_agb = observed_data$sdev_agb, sdev_lai = observed_data$sdev_lai, filler_0)
# obs.cov$date = as.character(obs.cov$date, stringsAsFactors = F)
#
# obs.cov = obs.cov %>%
# split(.$date)
#
# #sublist by date --> site
# obs.cov = names(obs.cov) %>%
# map(function(namesl){
# obs.cov[[namesl]] %>%
# split(.$site_id) %>%
# map(~diag(.x[3:4]^2, nrow = 2, ncol = 2)) %>%
# setNames(site.ids)}) %>%
# setNames(date.obs)
#
# # remove NA/missing observations from covariance matrix and removes NA values to restructure size of covar matrix
# names = names(obs.cov)
# for (name in names)
# {
# for (site in names(obs.cov[[name]]))
# {
# na_index = which(is.na(obs.cov[[ name]][[site]]))
# if (length(na_index) > 0)
# {
# n_good_vars = length(observed_vars)-length(na_index)
# obs.cov[[name]][[site]] = matrix(obs.cov[[name]][[site]][-na_index], nrow = n_good_vars, ncol = n_good_vars)
# }
# }
# }
#
# # save these lists for future use.
# save(obs.mean, file = '/data/bmorrison/sda/lai/obs_mean_update_sites.Rdata')
# save(obs.cov, file = '/data/bmorrison/sda/lai/obs_cov_update_sites.Rdata')
# save(date.obs, file = '/data/bmorrison/sda/lai/date_obs_update_sites.Rdata')
################################ START THE SDA ########################################
load('/data/bmorrison/sda/lai/obs_mean_update_sites.Rdata')
load('/data/bmorrison/sda/lai/obs_cov_update_sites.Rdata')
date.obs = names(obs.mean)
new.settings <- PEcAn.settings::prepare.settings(settings)
#unlink(c('run','out','SDA'),recursive = T)
sda.enkf.multisite(new.settings,
obs.mean =obs.mean,
obs.cov = obs.cov,
keepNC = TRUE,
forceRun = TRUE,
control=list(trace=TRUE,
FF=FALSE,
interactivePlot=FALSE,
TimeseriesPlot=TRUE,
BiasPlot=FALSE,
plot.title=NULL,
facet.plots=4,
debug=FALSE,
pause=FALSE,
Profiling = FALSE,
OutlierDetection=FALSE))
### FOR PLOTTING after analysis if TimeseriesPlot == FALSE)
load('/data/bmorrison/sda/lai/SDA/sda.output.Rdata')
facetg=4
readsFF=NULL
obs.mean = Viz.output[[2]]
obs.cov = Viz.output[[3]]
obs.times = names(obs.mean)
PEcAnAssimSequential::post.analysis.multisite.ggplot(settings = new.settings, t, obs.times, obs.mean, obs.cov, FORECAST, ANALYSIS, plot.title=NULL, facetg=facetg, readsFF=NULL)
|
library('foreach')
library('itertools')
library('Brobdingnag')
# The actual formula to compute the function, fn's, optimum
pincus_formula <- function(x, fn, lambda = 10) {
colSums(exp(lambda * x * fn(x)) / exp(lambda * fn(x)))
}
# ...: parameter grids
# fn: function to optimize
# lambda: constant that should be large to use the Pincus theorem
# chunk_size: Size of subset of Cartesian product of grid.
pincus <- function(..., fn, lambda, chunk_size = 10) {
iter_chunk <- ichunk(product(...), chunkSize = chunk_size)
out <- foreach(chunk = iter_chunk) %dopar% {
grid <- do.call(rbind, lapply(chunk, function(x) unlist(x)))
list(
pincus = pincus_formula(grid, fn = fn, lambda = lambda),
count = nrow(grid)
)
}
col_sums <- do.call(rbind, lapply(out, function(x) x$pincus))
count <- do.call(rbind, lapply(out, function(x) x$count))
colSums(col_sums) / sum(count)
}
f <- function(x) {
sin(sqrt(x[,1]^2 + x[,2]^2)) / sqrt(x[,1]^2 + x[,2]^2) - sqrt(x[,1]^2 + x[,2]^2)
}
x <- y <- seq(-5, 5, length = 100)
pincus_out <- pincus(x, y, fn = f, lambda = 10, chunk_size = 1000)
print(pincus_out)
| /pincus.r | no_license | ramhiser/pincus | R | false | false | 1,111 | r | library('foreach')
library('itertools')
library('Brobdingnag')
# The actual formula to compute the function, fn's, optimum
pincus_formula <- function(x, fn, lambda = 10) {
colSums(exp(lambda * x * fn(x)) / exp(lambda * fn(x)))
}
# ...: parameter grids
# fn: function to optimize
# lambda: constant that should be large to use the Pincus theorem
# chunk_size: Size of subset of Cartesian product of grid.
pincus <- function(..., fn, lambda, chunk_size = 10) {
iter_chunk <- ichunk(product(...), chunkSize = chunk_size)
out <- foreach(chunk = iter_chunk) %dopar% {
grid <- do.call(rbind, lapply(chunk, function(x) unlist(x)))
list(
pincus = pincus_formula(grid, fn = fn, lambda = lambda),
count = nrow(grid)
)
}
col_sums <- do.call(rbind, lapply(out, function(x) x$pincus))
count <- do.call(rbind, lapply(out, function(x) x$count))
colSums(col_sums) / sum(count)
}
f <- function(x) {
sin(sqrt(x[,1]^2 + x[,2]^2)) / sqrt(x[,1]^2 + x[,2]^2) - sqrt(x[,1]^2 + x[,2]^2)
}
x <- y <- seq(-5, 5, length = 100)
pincus_out <- pincus(x, y, fn = f, lambda = 10, chunk_size = 1000)
print(pincus_out)
|
library(tinytest)
library(ggiraph)
library(ggplot2)
library(xml2)
source("setup.R")
# geom_point_interactive ----
{
eval(test_geom_layer, envir = list(name = "geom_point_interactive"))
}
# hover_nearest ---
{
gr <- ggplot(mtcars, aes(x =mpg, y = disp, hover_nearest = TRUE)) +
geom_point_interactive()
doc <- dsvg_doc({
print(gr)
})
expect_equal(xml_attr(xml_find_first(doc, ".//circle"), "nearest"), "true")
}
# test all shapes ----
{
doc <- dsvg_doc({
sxy <- seq(from = 0.2, to = 0.95, by = 0.15)
s <- seq.int(from = 0, to = 25)
dat <- data.frame(
x = head(rep(sxy, 5), length(s)),
y = head(rep(sxy, each = 6), length(s)),
s = s
)
p <- ggplot(dat, aes(x =x, y = y, shape = I(s), col=as.character(s), tooltip = s, info= s)) +
geom_point_interactive(extra_interactive_params = "info")
print(p)
})
for (i in s) {
nodes <- xml_find_all(doc, paste0(".//*[@info='", i, "']"))
expect_true(length(nodes) > 0, info = paste("Shape", i, "is drawn"))
}
}
{
gr <- ggiraph:::partialPointGrob(interactive_points_grob(), pch = 2)
expect_true(ggiraph:::is.zero(gr))
}
| /inst/tinytest/test-geom_point_interactive.R | no_license | davidgohel/ggiraph | R | false | false | 1,145 | r | library(tinytest)
library(ggiraph)
library(ggplot2)
library(xml2)
source("setup.R")
# geom_point_interactive ----
{
eval(test_geom_layer, envir = list(name = "geom_point_interactive"))
}
# hover_nearest ---
{
gr <- ggplot(mtcars, aes(x =mpg, y = disp, hover_nearest = TRUE)) +
geom_point_interactive()
doc <- dsvg_doc({
print(gr)
})
expect_equal(xml_attr(xml_find_first(doc, ".//circle"), "nearest"), "true")
}
# test all shapes ----
{
doc <- dsvg_doc({
sxy <- seq(from = 0.2, to = 0.95, by = 0.15)
s <- seq.int(from = 0, to = 25)
dat <- data.frame(
x = head(rep(sxy, 5), length(s)),
y = head(rep(sxy, each = 6), length(s)),
s = s
)
p <- ggplot(dat, aes(x =x, y = y, shape = I(s), col=as.character(s), tooltip = s, info= s)) +
geom_point_interactive(extra_interactive_params = "info")
print(p)
})
for (i in s) {
nodes <- xml_find_all(doc, paste0(".//*[@info='", i, "']"))
expect_true(length(nodes) > 0, info = paste("Shape", i, "is drawn"))
}
}
{
gr <- ggiraph:::partialPointGrob(interactive_points_grob(), pch = 2)
expect_true(ggiraph:::is.zero(gr))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/seird_Methods.R
\name{estimateInfectiousNode,seirdModel-method}
\alias{estimateInfectiousNode,seirdModel-method}
\title{An S4 method to estimate current state of S, E and I for an SEIRD when provided
with a total count of deaths so far}
\usage{
\S4method{estimateInfectiousNode}{seirdModel}(epiModel, deaths, nderiv, plotDeriv)
}
\arguments{
\item{epiModel}{The epidemic model of class SEIRD to have the current state of
S and I estimated.}
\item{deaths}{The total death count for this model, up to each of the
changeTimes so far.}
\item{nderiv}{Which derivative to assume is 0.}
\item{plotDeriv}{True/False that tells the function to plot the derivative
of D determined by nderiv.}
}
\value{
An object of class seirdModel with the values for S, E and I updated
for the current state.
}
\description{
Please note that this method does not guarantee that this state would be
generated by the model, given its initial conditions.
This method uses the methods for an SIRD model to calculate S and R
This method also makes the assumption that the nth derivative of D with
respect to time is 0. N is set by nderiv with a default of 8.
This equation is then solved for I using optim, hence this method returns a
close but non-exact solution for E and I.
These values are then stored in the list in the slot "currentState".
}
\examples{
#model with time-varying Beta
model <- setSEIRD(N = 100, Beta = c(2,1/2), Lambda = 1, Gamma = 1/5, ProbOfDeath = 0.5,
I0 = 1, changeTimes = 5)
#Set the deaths, two entries are required now
deaths <- c(20,30)
time <- 10
#call this method via calculate current state
model <- calculateCurrentState(model, t=time, deaths=deaths, nderiv = 8)
#check S and I
currentState(model)
}
| /man/estimateInfectiousNode-seirdModel-method.Rd | permissive | mrc-ide/excalibur | R | false | true | 1,790 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/seird_Methods.R
\name{estimateInfectiousNode,seirdModel-method}
\alias{estimateInfectiousNode,seirdModel-method}
\title{An S4 method to estimate current state of S, E and I for an SEIRD when provided
with a total count of deaths so far}
\usage{
\S4method{estimateInfectiousNode}{seirdModel}(epiModel, deaths, nderiv, plotDeriv)
}
\arguments{
\item{epiModel}{The epidemic model of class SEIRD to have the current state of
S and I estimated.}
\item{deaths}{The total death count for this model, up to each of the
changeTimes so far.}
\item{nderiv}{Which derivative to assume is 0.}
\item{plotDeriv}{True/False that tells the function to plot the derivative
of D determined by nderiv.}
}
\value{
An object of class seirdModel with the values for S, E and I updated
for the current state.
}
\description{
Please note that this method does not guarantee that this state would be
generated by the model, given its initial conditions.
This method uses the methods for an SIRD model to calculate S and R
This method also makes the assumption that the nth derivative of D with
respect to time is 0. N is set by nderiv with a default of 8.
This equation is then solved for I using optim, hence this method returns a
close but non-exact solution for E and I.
These values are then stored in the list in the slot "currentState".
}
\examples{
#model with time-varying Beta
model <- setSEIRD(N = 100, Beta = c(2,1/2), Lambda = 1, Gamma = 1/5, ProbOfDeath = 0.5,
I0 = 1, changeTimes = 5)
#Set the deaths, two entries are required now
deaths <- c(20,30)
time <- 10
#call this method via calculate current state
model <- calculateCurrentState(model, t=time, deaths=deaths, nderiv = 8)
#check S and I
currentState(model)
}
|
context("operators")
test_that("%|% returns default value", {
lgl <- c(TRUE, TRUE, NA, FALSE) %|% FALSE
expect_identical(lgl, c(TRUE, TRUE, FALSE, FALSE))
int <- c(1L, 2L, NA, 4L) %|% 3L
expect_identical(int, 1:4)
dbl <- c(1, 2, NA, 4) %|% 3
expect_identical(dbl, as.double(1:4))
chr <- c("1", "2", NA, "4") %|% "3"
expect_identical(chr, as.character(1:4))
cpx <- c(1i, 2i, NA, 4i) %|% 3i
expect_equal(cpx, c(1i, 2i, 3i, 4i))
})
test_that("%|% also works when y is of same length as x", {
lgl <- c(TRUE, TRUE, NA, FALSE) %|% c(TRUE, TRUE, FALSE, TRUE)
expect_identical(lgl, c(TRUE, TRUE, FALSE, FALSE))
int <- c(1L, 2L, NA, 4L) %|% c(10L, 11L, 12L, 13L)
expect_identical(int, c(1L, 2L, 12L, 4L))
dbl <- c(1, 2, NA, 4) %|% c(10, 11, 12, 13)
expect_identical(dbl, c(1, 2, 12, 4))
chr <- c("1", "2", NA, "4") %|% c("10", "11", "12", "13")
expect_identical(chr, c("1", "2", "12", "4"))
cpx <- c(1i, 2i, NA, 4i) %|% c(10i, 11i, 12i, 13i)
expect_equal(cpx, c(1i, 2i, 12i, 4i))
})
test_that("%|% fails on non-atomic original values", {
verify_errors({
expect_error(call("fn") %|% 1)
})
})
test_that("%|% fails with wrong types", {
verify_errors({
expect_error(c(1L, NA) %|% 2)
expect_error(c(1, NA) %|% "")
expect_error(c(1, NA) %|% call("fn"))
})
})
test_that("%|% fails with wrong length", {
verify_errors({
expect_error(c(1L, NA) %|% 1:3)
expect_error(1:10 %|% 1:4)
expect_error(1L %|% 1:4)
})
})
test_that("%|% fails with intelligent errors", {
verify_output(test_path("test-operators-replace-na.txt"), {
"# %|% fails on non-atomic original values"
call("fn") %|% 1
"# %|% fails with wrong types"
c(1L, NA) %|% 2
c(1, NA) %|% ""
c(1, NA) %|% call("fn")
"# %|% fails with wrong length"
c(1L, NA) %|% 1:3
1:10 %|% 1:4
1L %|% 1:4
})
})
test_that("%@% returns attribute", {
expect_identical(mtcars %@% row.names, row.names(mtcars))
expect_identical(mtcars %@% "row.names", row.names(mtcars))
expect_null(mtcars %@% "row")
})
test_that("%@% has replacement version", {
x <- structure(list(), foo = "bar")
x %@% foo <- NULL
x %@% baz <- "quux"
expect_identical(x, structure(list(), baz = "quux"))
})
test_that("new_definition() returns new `:=` call", {
def <- "foo" ~ "bar"
node_poke_car(def, quote(`:=`))
expect_identical(new_definition("foo", "bar"), def)
})
test_that("%@% works with S4 objects (#207)", {
.Person <- setClass("Person", slots = c(name = "character", species = "character"))
fievel <- .Person(name = "Fievel", species = "mouse")
expect_identical(fievel %@% name, "Fievel")
expect_identical(fievel %@% "species", "mouse")
fievel %@% name <- "Bernard"
fievel %@% "species" <- "MOUSE"
expect_identical(fievel@name, "Bernard")
expect_identical(fievel@species, "MOUSE")
})
| /packrat/lib/x86_64-w64-mingw32/3.6.1/rlang/tests/testthat/test-operators.R | permissive | jmcascalheira/LGMIberiaCluster | R | false | false | 2,860 | r | context("operators")
test_that("%|% returns default value", {
lgl <- c(TRUE, TRUE, NA, FALSE) %|% FALSE
expect_identical(lgl, c(TRUE, TRUE, FALSE, FALSE))
int <- c(1L, 2L, NA, 4L) %|% 3L
expect_identical(int, 1:4)
dbl <- c(1, 2, NA, 4) %|% 3
expect_identical(dbl, as.double(1:4))
chr <- c("1", "2", NA, "4") %|% "3"
expect_identical(chr, as.character(1:4))
cpx <- c(1i, 2i, NA, 4i) %|% 3i
expect_equal(cpx, c(1i, 2i, 3i, 4i))
})
test_that("%|% also works when y is of same length as x", {
lgl <- c(TRUE, TRUE, NA, FALSE) %|% c(TRUE, TRUE, FALSE, TRUE)
expect_identical(lgl, c(TRUE, TRUE, FALSE, FALSE))
int <- c(1L, 2L, NA, 4L) %|% c(10L, 11L, 12L, 13L)
expect_identical(int, c(1L, 2L, 12L, 4L))
dbl <- c(1, 2, NA, 4) %|% c(10, 11, 12, 13)
expect_identical(dbl, c(1, 2, 12, 4))
chr <- c("1", "2", NA, "4") %|% c("10", "11", "12", "13")
expect_identical(chr, c("1", "2", "12", "4"))
cpx <- c(1i, 2i, NA, 4i) %|% c(10i, 11i, 12i, 13i)
expect_equal(cpx, c(1i, 2i, 12i, 4i))
})
test_that("%|% fails on non-atomic original values", {
verify_errors({
expect_error(call("fn") %|% 1)
})
})
test_that("%|% fails with wrong types", {
verify_errors({
expect_error(c(1L, NA) %|% 2)
expect_error(c(1, NA) %|% "")
expect_error(c(1, NA) %|% call("fn"))
})
})
test_that("%|% fails with wrong length", {
verify_errors({
expect_error(c(1L, NA) %|% 1:3)
expect_error(1:10 %|% 1:4)
expect_error(1L %|% 1:4)
})
})
test_that("%|% fails with intelligent errors", {
verify_output(test_path("test-operators-replace-na.txt"), {
"# %|% fails on non-atomic original values"
call("fn") %|% 1
"# %|% fails with wrong types"
c(1L, NA) %|% 2
c(1, NA) %|% ""
c(1, NA) %|% call("fn")
"# %|% fails with wrong length"
c(1L, NA) %|% 1:3
1:10 %|% 1:4
1L %|% 1:4
})
})
test_that("%@% returns attribute", {
expect_identical(mtcars %@% row.names, row.names(mtcars))
expect_identical(mtcars %@% "row.names", row.names(mtcars))
expect_null(mtcars %@% "row")
})
test_that("%@% has replacement version", {
x <- structure(list(), foo = "bar")
x %@% foo <- NULL
x %@% baz <- "quux"
expect_identical(x, structure(list(), baz = "quux"))
})
test_that("new_definition() returns new `:=` call", {
def <- "foo" ~ "bar"
node_poke_car(def, quote(`:=`))
expect_identical(new_definition("foo", "bar"), def)
})
test_that("%@% works with S4 objects (#207)", {
.Person <- setClass("Person", slots = c(name = "character", species = "character"))
fievel <- .Person(name = "Fievel", species = "mouse")
expect_identical(fievel %@% name, "Fievel")
expect_identical(fievel %@% "species", "mouse")
fievel %@% name <- "Bernard"
fievel %@% "species" <- "MOUSE"
expect_identical(fievel@name, "Bernard")
expect_identical(fievel@species, "MOUSE")
})
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/sunburst.R
\name{sunburst}
\alias{sunburst}
\title{htmlwidget for d3.js sequence sunburst diagrams}
\usage{
sunburst(csvdata = NULL, jsondata = NULL, legendOrder = NULL,
colors = NULL, percent = TRUE, count = FALSE, explanation = NULL,
width = NULL, height = NULL)
}
\arguments{
\item{csvdata}{data in csv source,target form}
\item{jsondata}{data in nested d3 JSON hierarchy with `{name:..., children:[];}`}
\item{legendOrder}{string vector if you would like to manually order the legend.
If legendOrder is not provided, then the legend will be in the descending
order of the top level hierarchy.}
\item{colors}{\code{vector} of strings representing colors as hexadecimal for
manual colors.}
\item{percent}{\code{logical} to include percentage of total in the explanation.}
\item{count}{\code{logical} to include count and total in the explanation.}
\item{explanation}{JavaScript function to define a custom explanation for the center
of the sunburst. Note, this will override \code{percent} and \code{count}.}
}
\description{
\href{https://gist.github.com/kerryrodden/7090426}{Sequences sunburst} diagrams provide
an interactive method of exploring sequence data, such as website navigation paths.
}
\examples{
# devtools::install_github("timelyportfolio/sunburstR")
library(sunburstR)
# read in sample visit-sequences.csv data provided in source
# https://gist.github.com/kerryrodden/7090426#file-visit-sequences-csv
sequences <- read.csv(
system.file("examples/visit-sequences.csv",package="sunburstR")
,header=F
,stringsAsFactors = FALSE
)
sunburst(sequence_data)
# explore some of the arguments
sunburst(
sequence_data
,count = TRUE
)
sunburst(
sequence_data
# apply sort order to the legendS
,legendOrder = unique(unlist(strsplit(sequence_data[,1],"-")))
# just provide the name in the explanation in the center
,explanation = "function(d){return d.name}"
)
# try with json data
sequence_json <- rjson::fromJSON(file="./inst/examples/visit-sequences.json")
sunburst(jsondata = sequence_json)
# try with csv data from this fork
# https://gist.github.com/mkajava/7515402
# works technically but not cosmetically
sunburst( csvdata = read.csv(
file = "https://gist.githubusercontent.com/mkajava/7515402/raw/9f80d28094dc9dfed7090f8fb3376ef1539f4fd2/comment-sequences.csv"
,header = FALSE
,stringsAsFactors = FALSE
))
# try with csv data from this fork
# https://gist.github.com/rileycrane/92a2c36eb932b4f99e51/
sunburst( csvdata = read.csv(
file = "https://gist.githubusercontent.com/rileycrane/92a2c36eb932b4f99e51/raw/a0212b4ca8043af47ec82369aa5f023530279aa3/visit-sequences.csv"
,header=FALSE
,stringsAsFactors = FALSE
))
# use sunburst to analyze ngram data from Peter Norvig
# http://norvig.com/mayzner.html
library(sunburstR)
library(pipeR)
# read the csv data downloaded from the Google Fusion Table linked in the article
ngrams2 <- read.csv(
system.file(
"examples/ngrams2.csv"
,package="sunburstR"
)
, stringsAsFactors = FALSE
)
ngrams2 \%>>\%
# let's look at ngrams at the start of a word, so columns 1 and 3
(.[,c(1,3)]) \%>>\%
# split the ngrams into a sequence by splitting each letter and adding -
(
data.frame(
sequence = strsplit(.[,1],"") \%>>\%
lapply( function(ng){ paste0(ng,collapse = "-") } ) \%>>\%
unlist
,freq = .[,2]
,stringsAsFactors = FALSE
)
) \%>>\%
sunburst
library(htmltools)
ngrams2 \%>>\%
(
lapply(
seq.int(3,ncol(.))
,function(letpos){
(.[,c(1,letpos)]) \%>>\%
# split the ngrams into a sequence by splitting each letter and adding -
(
data.frame(
sequence = strsplit(.[,1],"") \%>>\%
lapply( function(ng){ paste0(ng,collapse = "-") } ) \%>>\%
unlist
,freq = .[,2]
,stringsAsFactors = FALSE
)
) \%>>\%
( tags$div(style="float:left;",sunburst( ., height = 300, width = 300 )) )
}
)
) \%>>\%
tagList \%>>\%
browsable
}
| /man/sunburst.Rd | permissive | abresler/sunburstR | R | false | false | 4,173 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/sunburst.R
\name{sunburst}
\alias{sunburst}
\title{htmlwidget for d3.js sequence sunburst diagrams}
\usage{
sunburst(csvdata = NULL, jsondata = NULL, legendOrder = NULL,
colors = NULL, percent = TRUE, count = FALSE, explanation = NULL,
width = NULL, height = NULL)
}
\arguments{
\item{csvdata}{data in csv source,target form}
\item{jsondata}{data in nested d3 JSON hierarchy with `{name:..., children:[];}`}
\item{legendOrder}{string vector if you would like to manually order the legend.
If legendOrder is not provided, then the legend will be in the descending
order of the top level hierarchy.}
\item{colors}{\code{vector} of strings representing colors as hexadecimal for
manual colors.}
\item{percent}{\code{logical} to include percentage of total in the explanation.}
\item{count}{\code{logical} to include count and total in the explanation.}
\item{explanation}{JavaScript function to define a custom explanation for the center
of the sunburst. Note, this will override \code{percent} and \code{count}.}
}
\description{
\href{https://gist.github.com/kerryrodden/7090426}{Sequences sunburst} diagrams provide
an interactive method of exploring sequence data, such as website navigation paths.
}
\examples{
# devtools::install_github("timelyportfolio/sunburstR")
library(sunburstR)
# read in sample visit-sequences.csv data provided in source
# https://gist.github.com/kerryrodden/7090426#file-visit-sequences-csv
sequences <- read.csv(
system.file("examples/visit-sequences.csv",package="sunburstR")
,header=F
,stringsAsFactors = FALSE
)
sunburst(sequence_data)
# explore some of the arguments
sunburst(
sequence_data
,count = TRUE
)
sunburst(
sequence_data
# apply sort order to the legendS
,legendOrder = unique(unlist(strsplit(sequence_data[,1],"-")))
# just provide the name in the explanation in the center
,explanation = "function(d){return d.name}"
)
# try with json data
sequence_json <- rjson::fromJSON(file="./inst/examples/visit-sequences.json")
sunburst(jsondata = sequence_json)
# try with csv data from this fork
# https://gist.github.com/mkajava/7515402
# works technically but not cosmetically
sunburst( csvdata = read.csv(
file = "https://gist.githubusercontent.com/mkajava/7515402/raw/9f80d28094dc9dfed7090f8fb3376ef1539f4fd2/comment-sequences.csv"
,header = FALSE
,stringsAsFactors = FALSE
))
# try with csv data from this fork
# https://gist.github.com/rileycrane/92a2c36eb932b4f99e51/
sunburst( csvdata = read.csv(
file = "https://gist.githubusercontent.com/rileycrane/92a2c36eb932b4f99e51/raw/a0212b4ca8043af47ec82369aa5f023530279aa3/visit-sequences.csv"
,header=FALSE
,stringsAsFactors = FALSE
))
# use sunburst to analyze ngram data from Peter Norvig
# http://norvig.com/mayzner.html
library(sunburstR)
library(pipeR)
# read the csv data downloaded from the Google Fusion Table linked in the article
ngrams2 <- read.csv(
system.file(
"examples/ngrams2.csv"
,package="sunburstR"
)
, stringsAsFactors = FALSE
)
ngrams2 \%>>\%
# let's look at ngrams at the start of a word, so columns 1 and 3
(.[,c(1,3)]) \%>>\%
# split the ngrams into a sequence by splitting each letter and adding -
(
data.frame(
sequence = strsplit(.[,1],"") \%>>\%
lapply( function(ng){ paste0(ng,collapse = "-") } ) \%>>\%
unlist
,freq = .[,2]
,stringsAsFactors = FALSE
)
) \%>>\%
sunburst
library(htmltools)
ngrams2 \%>>\%
(
lapply(
seq.int(3,ncol(.))
,function(letpos){
(.[,c(1,letpos)]) \%>>\%
# split the ngrams into a sequence by splitting each letter and adding -
(
data.frame(
sequence = strsplit(.[,1],"") \%>>\%
lapply( function(ng){ paste0(ng,collapse = "-") } ) \%>>\%
unlist
,freq = .[,2]
,stringsAsFactors = FALSE
)
) \%>>\%
( tags$div(style="float:left;",sunburst( ., height = 300, width = 300 )) )
}
)
) \%>>\%
tagList \%>>\%
browsable
}
|
testlist <- list(data = structure(0, .Dim = c(1L, 1L)), x = structure(c(2.31535369324468e+77, 0, 0, 3.47886064287861e-310, 0, 0, 2.48104025832395e-265, 1.84616429335722e-312, 8.19687411242632e+107, 8.19687411242632e+107, 8.19687411242632e+107, 8.19687411242632e+107, 8.19687411242632e+107, 8.19687411242632e+107, 8.19687411242632e+107, 3.23790861658519e-319, 3.48007309718799e-312, 1.62597454369523e-260), .Dim = c(2L, 9L)))
result <- do.call(distr6:::C_EmpiricalMVCdf,testlist)
str(result) | /distr6/inst/testfiles/C_EmpiricalMVCdf/libFuzzer_C_EmpiricalMVCdf/C_EmpiricalMVCdf_valgrind_files/1610048190-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 495 | r | testlist <- list(data = structure(0, .Dim = c(1L, 1L)), x = structure(c(2.31535369324468e+77, 0, 0, 3.47886064287861e-310, 0, 0, 2.48104025832395e-265, 1.84616429335722e-312, 8.19687411242632e+107, 8.19687411242632e+107, 8.19687411242632e+107, 8.19687411242632e+107, 8.19687411242632e+107, 8.19687411242632e+107, 8.19687411242632e+107, 3.23790861658519e-319, 3.48007309718799e-312, 1.62597454369523e-260), .Dim = c(2L, 9L)))
result <- do.call(distr6:::C_EmpiricalMVCdf,testlist)
str(result) |
# Dataset 239: Phytoplankton data from cruises.
# Add libraries:
library(plyr)
# Set read and write directories:
in_dir = 'raw_datasets'
out_dir = 'formatted_datasets'
# These datasets include spatial sample data (t1) and collection data (t3)
d239t1 = read.csv(file.path(in_dir,'/dataset_239RAW/dataset_239_table1.csv'))
d239t3 = read.csv(file.path(in_dir,'/dataset_239RAW/dataset_239_table3.csv'))
# ************************************
# ----- Spatial sampling data ----
# ************************************
# Extract the columns of interest:
t1 = d239t1[,c(1,3,7:8)]
# Remove NA's
t1 = na.omit(t1)
# Create a spatial points dataframe with lat lon
t1sp = SpatialPoints(data.frame(t1$Lon, t1$Lat), proj4string = CRS('+proj=longlat +datum=WGS84'))
# Create an extent object from the point file (stretching the boundaries by 1 degree):
t1.extent = extent(extent(t1sp)@xmin-10,
extent(t1sp)@xmax+10,
extent(t1sp)@ymin-10,
extent(t1sp)@ymax+10)
# Create an empty raster from the extent object with a resolution of one degree:
r = raster(t1.extent, resolution = 8, crs ='+proj=longlat +datum=WGS84')
# Assign a unique value to each cell:
r = setValues(r, 1:ncell(r))
# Extract the cell assignment to the site table:
t1$site = extract(r, t1sp)
# Testing:
head(t1)
t1[t1$site == 94,]
# It seems there is paired shallow and deep samples for each lat-lon, how to deal with?
# Trade-off with resolution? Think on this ...
###------REFERENCE SCRIPT BELOW--------------
# Goal is to change monthly sample to some decimal of year (breaking into quarters):
# Month is embedded within a date string (YYYYMM), extract month:
d = as.numeric(substr(as.character(d239$mo), 5,6))
# Change month to season (wint = Dec, Jan, Feb, spr = Mar, Apr, May, sum = Jun, Jul, Aug, etc.)
d1 = .1* ifelse(d >= 3 & d <=5, 1,
ifelse(d >= 6 & d <= 8, 2,
ifelse(d >= 9 & d <=11, 3, 4)))
# Extract year from the date column:
y = as.numeric(substr(as.character(d239$mo), 1,4))
# Add the decimal season to the year column:
d239$year =y + d1
# Create a "site" column, the sites are the 20 experimental grids
site = paste('d239_',d239$gr, sep = '')
# Make initial frame (necessary columns, not summarized):
df1 = data.frame(site, d239$sp, d239$year, d239$mo)
colnames(df1)[2:4] = c('species','year','date')
# Create a data frame of the count of individuals for a given sampling event:
df2 = ddply(df1, .(site, year, species, date),
summarise, count = length(species))
# Create a data frame of the maximum count of individuals
# for a given sampling event within a season.
df3 = ddply(df2,.(site,year,species),
summarise, count = max(count))
# Arrange the fields in the same order as other datasets:
df4 = data.frame(df3[,1],df3[,3],df3[,2],df3[,4])
names(df4) = c('site','species','year','count')
# Add a dataset ID column for matching with metadata
df4$datasetID = rep(239, length(df4[,1]))
# Rearrange the columns"
d239 = df4[,c(5,1:4)]
# Write to file:
write.csv(d239, file.path(out_dir,'dataset_239.csv'), row.names = F)
# Remove objects from the global environment
rm(list = ls())
| /scripts/R-scripts/data_cleaning_scripts/dwork_239.R | no_license | ssnell6/core-transient | R | false | false | 3,226 | r | # Dataset 239: Phytoplankton data from cruises.
# Add libraries:
library(plyr)
# Set read and write directories:
in_dir = 'raw_datasets'
out_dir = 'formatted_datasets'
# These datasets include spatial sample data (t1) and collection data (t3)
d239t1 = read.csv(file.path(in_dir,'/dataset_239RAW/dataset_239_table1.csv'))
d239t3 = read.csv(file.path(in_dir,'/dataset_239RAW/dataset_239_table3.csv'))
# ************************************
# ----- Spatial sampling data ----
# ************************************
# Extract the columns of interest:
t1 = d239t1[,c(1,3,7:8)]
# Remove NA's
t1 = na.omit(t1)
# Create a spatial points dataframe with lat lon
t1sp = SpatialPoints(data.frame(t1$Lon, t1$Lat), proj4string = CRS('+proj=longlat +datum=WGS84'))
# Create an extent object from the point file (stretching the boundaries by 1 degree):
t1.extent = extent(extent(t1sp)@xmin-10,
extent(t1sp)@xmax+10,
extent(t1sp)@ymin-10,
extent(t1sp)@ymax+10)
# Create an empty raster from the extent object with a resolution of one degree:
r = raster(t1.extent, resolution = 8, crs ='+proj=longlat +datum=WGS84')
# Assign a unique value to each cell:
r = setValues(r, 1:ncell(r))
# Extract the cell assignment to the site table:
t1$site = extract(r, t1sp)
# Testing:
head(t1)
t1[t1$site == 94,]
# It seems there is paired shallow and deep samples for each lat-lon, how to deal with?
# Trade-off with resolution? Think on this ...
###------REFERENCE SCRIPT BELOW--------------
# Goal is to change monthly sample to some decimal of year (breaking into quarters):
# Month is embedded within a date string (YYYYMM), extract month:
d = as.numeric(substr(as.character(d239$mo), 5,6))
# Change month to season (wint = Dec, Jan, Feb, spr = Mar, Apr, May, sum = Jun, Jul, Aug, etc.)
d1 = .1* ifelse(d >= 3 & d <=5, 1,
ifelse(d >= 6 & d <= 8, 2,
ifelse(d >= 9 & d <=11, 3, 4)))
# Extract year from the date column:
y = as.numeric(substr(as.character(d239$mo), 1,4))
# Add the decimal season to the year column:
d239$year =y + d1
# Create a "site" column, the sites are the 20 experimental grids
site = paste('d239_',d239$gr, sep = '')
# Make initial frame (necessary columns, not summarized):
df1 = data.frame(site, d239$sp, d239$year, d239$mo)
colnames(df1)[2:4] = c('species','year','date')
# Create a data frame of the count of individuals for a given sampling event:
df2 = ddply(df1, .(site, year, species, date),
summarise, count = length(species))
# Create a data frame of the maximum count of individuals
# for a given sampling event within a season.
df3 = ddply(df2,.(site,year,species),
summarise, count = max(count))
# Arrange the fields in the same order as other datasets:
df4 = data.frame(df3[,1],df3[,3],df3[,2],df3[,4])
names(df4) = c('site','species','year','count')
# Add a dataset ID column for matching with metadata
df4$datasetID = rep(239, length(df4[,1]))
# Rearrange the columns"
d239 = df4[,c(5,1:4)]
# Write to file:
write.csv(d239, file.path(out_dir,'dataset_239.csv'), row.names = F)
# Remove objects from the global environment
rm(list = ls())
|
library(data.table)
setwd("~/Dropbox/cervical/sub/")
sub <- fread("sub_dara_part_resnet_raw_5xbag_20170510.csv")
mat = as.matrix(sub[,-1,with=F])
mat[mat < 0.02] = 0.02
mat = mat/rowSums(mat)
cols= names(sub)[-1]
for(i in 1:3) sub[[cols[i]]] = mat[,i]
sub1 = sub
# write.csv(sub, "sub_dara_part_resnet_raw_5xbag_20170510_clipped.csv", row.names = F)
########################################
sub <- fread("sub_dara_full_resnet_dmcrop_5xbag_20170513.csv")
mat = as.matrix(sub[,-1,with=F])
mat[mat < 0.02] = 0.02
mat = mat/rowSums(mat)
cols= names(sub)[-1]
for(i in 1:3) sub[[cols[i]]] = mat[,i]
sub2 = sub
########################################
sub <- fread("sub_dara_part_resnet_raw_5xbag_20170521.csv")
mat = as.matrix(sub[,-1,with=F])
mat[mat < 0.02] = 0.02
mat = mat/rowSums(mat)
cols= names(sub)[-1]
for(i in 1:3) sub[[cols[i]]] = mat[,i]
sub3 = sub
########################################
sub <- fread("sub_dara_full_remove_addl_3xbag_20170605.csv")
mat = as.matrix(sub[,-1,with=F])
mat[mat < 0.02] = 0.02
mat = mat/rowSums(mat)
cols= names(sub)[-1]
for(i in 1:3) sub[[cols[i]]] = mat[,i]
sub4 = sub
for (i in 2:4) print(cor(sub1[[i]], sub2[[i]]))
########################################
# Try baggin
sub4 = sub4[order(image_name)]
sub3 = sub3[order(image_name)]
sub2 = sub2[order(image_name)]
sub1 = sub1[order(image_name)]
for(i in 1:3) print(cor(sub1[[i+1]], sub2[[i+1]]))
########################################
subdm1 = fread("bebhionn_submission_clipped.csv")
subdm2 = fread("bebhionn_googlenet_submission.csv")
subdm1 = subdm1[order(image_name)]
subdm2 = subdm2[order(image_name)]
sub = sub1
for(var in names(sub)[2:4]) sub[[var]] = 0.2* (sub1[[var]] + sub2[[var]] + sub4[[var]]) + 0.2* (subdm1[[var]] + subdm2[[var]])
#########################################
# Load leak
dupes = fread("../features/dupes_leak.csv", skip = 1)
dupes = dupes[,c(2,4), with=F]
setnames(dupes, c("image_name", "act"))
dupes
sub[image_name %in% dupes[act=="Type_1"]$image_name, `:=`(Type_1 = 0.98, Type_2 = 0.02, Type_3 = 0.02)]
sub[image_name %in% dupes[act=="Type_2"]$image_name, `:=`(Type_1 = 0.02, Type_2 = 0.98, Type_3 = 0.02)]
sub[image_name %in% dupes[act=="Type_3"]$image_name, `:=`(Type_1 = 0.02, Type_2 = 0.02, Type_3 = 0.98)]
sub[image_name %in% dupes$image_name]
dupes
View(sub)
write.csv(sub, "remove_addtl_leak.csv", row.names = F, )
| /sub/clip0506.R | no_license | darraghdog/cervical | R | false | false | 2,363 | r | library(data.table)
setwd("~/Dropbox/cervical/sub/")
sub <- fread("sub_dara_part_resnet_raw_5xbag_20170510.csv")
mat = as.matrix(sub[,-1,with=F])
mat[mat < 0.02] = 0.02
mat = mat/rowSums(mat)
cols= names(sub)[-1]
for(i in 1:3) sub[[cols[i]]] = mat[,i]
sub1 = sub
# write.csv(sub, "sub_dara_part_resnet_raw_5xbag_20170510_clipped.csv", row.names = F)
########################################
sub <- fread("sub_dara_full_resnet_dmcrop_5xbag_20170513.csv")
mat = as.matrix(sub[,-1,with=F])
mat[mat < 0.02] = 0.02
mat = mat/rowSums(mat)
cols= names(sub)[-1]
for(i in 1:3) sub[[cols[i]]] = mat[,i]
sub2 = sub
########################################
sub <- fread("sub_dara_part_resnet_raw_5xbag_20170521.csv")
mat = as.matrix(sub[,-1,with=F])
mat[mat < 0.02] = 0.02
mat = mat/rowSums(mat)
cols= names(sub)[-1]
for(i in 1:3) sub[[cols[i]]] = mat[,i]
sub3 = sub
########################################
sub <- fread("sub_dara_full_remove_addl_3xbag_20170605.csv")
mat = as.matrix(sub[,-1,with=F])
mat[mat < 0.02] = 0.02
mat = mat/rowSums(mat)
cols= names(sub)[-1]
for(i in 1:3) sub[[cols[i]]] = mat[,i]
sub4 = sub
for (i in 2:4) print(cor(sub1[[i]], sub2[[i]]))
########################################
# Try baggin
sub4 = sub4[order(image_name)]
sub3 = sub3[order(image_name)]
sub2 = sub2[order(image_name)]
sub1 = sub1[order(image_name)]
for(i in 1:3) print(cor(sub1[[i+1]], sub2[[i+1]]))
########################################
subdm1 = fread("bebhionn_submission_clipped.csv")
subdm2 = fread("bebhionn_googlenet_submission.csv")
subdm1 = subdm1[order(image_name)]
subdm2 = subdm2[order(image_name)]
sub = sub1
for(var in names(sub)[2:4]) sub[[var]] = 0.2* (sub1[[var]] + sub2[[var]] + sub4[[var]]) + 0.2* (subdm1[[var]] + subdm2[[var]])
#########################################
# Load leak
dupes = fread("../features/dupes_leak.csv", skip = 1)
dupes = dupes[,c(2,4), with=F]
setnames(dupes, c("image_name", "act"))
dupes
sub[image_name %in% dupes[act=="Type_1"]$image_name, `:=`(Type_1 = 0.98, Type_2 = 0.02, Type_3 = 0.02)]
sub[image_name %in% dupes[act=="Type_2"]$image_name, `:=`(Type_1 = 0.02, Type_2 = 0.98, Type_3 = 0.02)]
sub[image_name %in% dupes[act=="Type_3"]$image_name, `:=`(Type_1 = 0.02, Type_2 = 0.02, Type_3 = 0.98)]
sub[image_name %in% dupes$image_name]
dupes
View(sub)
write.csv(sub, "remove_addtl_leak.csv", row.names = F, )
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fn_grade.R
\name{grade}
\alias{grade}
\title{Grade .summ file}
\usage{
grade(path, minsnr = 3, tlagmax = 1, minl = 0, mfast = FALSE)
}
\arguments{
\item{path}{Path to .summ file to be graded}
\item{minsnr}{Minimum SNR allowed for an AB+ grade}
\item{tlagmax}{Maximum time delay allowed for an AB+ grade}
\item{minl}{Minimum lambdamax allowed for a AB+ grade}
\item{mfast}{Set to TRUE to grade a .summ file produced by the original MFAST}
}
\description{
Grades a .summ file (do_station automatically grades)
}
\examples{
# (Re)grade LHOR2.75.summ
write_sample("~/mfast/sample_data/raw_data")
do_station_simple(path="~/mfast/sample_data/raw_data")
pathto <- "~/mfast/sample_data/raw_data/LHOR2.summ_files/LHOR2.75.summ"
grade(pathto)
}
| /man/grade.Rd | no_license | shearwavesplitter/MFASTR | R | false | true | 817 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fn_grade.R
\name{grade}
\alias{grade}
\title{Grade .summ file}
\usage{
grade(path, minsnr = 3, tlagmax = 1, minl = 0, mfast = FALSE)
}
\arguments{
\item{path}{Path to .summ file to be graded}
\item{minsnr}{Minimum SNR allowed for an AB+ grade}
\item{tlagmax}{Maximum time delay allowed for an AB+ grade}
\item{minl}{Minimum lambdamax allowed for a AB+ grade}
\item{mfast}{Set to TRUE to grade a .summ file produced by the original MFAST}
}
\description{
Grades a .summ file (do_station automatically grades)
}
\examples{
# (Re)grade LHOR2.75.summ
write_sample("~/mfast/sample_data/raw_data")
do_station_simple(path="~/mfast/sample_data/raw_data")
pathto <- "~/mfast/sample_data/raw_data/LHOR2.summ_files/LHOR2.75.summ"
grade(pathto)
}
|
cleanMatrixForClusterW <- function(mtx, f_row = 0.5, f_col = 0.5) {
cat(sprintf("Filter rows with >%1.2f missingness and columns with >%1.2f missingness.\n",
f_row, f_col))
cat("Before: ", nrow(mtx), "rows and ", ncol(mtx),"columns.\n")
namtx = is.na(mtx)
good_row = rowSums(namtx) <= ncol(mtx) * f_row
good_col = colSums(namtx) <= nrow(mtx) * f_col
cat("After: ", sum(good_row), "rows and ", sum(good_col),"columns.\n")
mtx[good_row, good_col]
}
imputeRowMean <- function(mtx) {
k <- which(is.na(mtx), arr.ind=TRUE)
mtx[k] <- rowMeans(mtx, na.rm=TRUE)[k[,1]]
mtx
}
#' Get most variable probes
bSubMostVariable <- function(betas, n=2000) {
std <- apply(betas, 1, sd, na.rm=TRUE)
betas[names(sort(std, decreasing=TRUE)[seq_len(n)]),]
}
| /Rutils/2022/matrix.R | no_license | zhou-lab/labtools | R | false | false | 793 | r | cleanMatrixForClusterW <- function(mtx, f_row = 0.5, f_col = 0.5) {
cat(sprintf("Filter rows with >%1.2f missingness and columns with >%1.2f missingness.\n",
f_row, f_col))
cat("Before: ", nrow(mtx), "rows and ", ncol(mtx),"columns.\n")
namtx = is.na(mtx)
good_row = rowSums(namtx) <= ncol(mtx) * f_row
good_col = colSums(namtx) <= nrow(mtx) * f_col
cat("After: ", sum(good_row), "rows and ", sum(good_col),"columns.\n")
mtx[good_row, good_col]
}
imputeRowMean <- function(mtx) {
k <- which(is.na(mtx), arr.ind=TRUE)
mtx[k] <- rowMeans(mtx, na.rm=TRUE)[k[,1]]
mtx
}
#' Get most variable probes
bSubMostVariable <- function(betas, n=2000) {
std <- apply(betas, 1, sd, na.rm=TRUE)
betas[names(sort(std, decreasing=TRUE)[seq_len(n)]),]
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qe.mean.sd.R
\name{qe.mean.sd}
\alias{qe.mean.sd}
\title{Quantile estimation method for estimating the sample mean and standard deviation}
\usage{
qe.mean.sd(
min.val,
q1.val,
med.val,
q3.val,
max.val,
n,
qe.fit.control = list()
)
}
\arguments{
\item{min.val}{numeric value giving the sample minimum.}
\item{q1.val}{numeric value giving the sample first quartile.}
\item{med.val}{numeric value giving the sample median.}
\item{q3.val}{numeric value giving the sample third quartile.}
\item{max.val}{numeric value giving the sample maximum.}
\item{n}{numeric value giving the sample size.}
\item{qe.fit.control}{optional list of control parameters for \code{\link{qe.fit}}.}
}
\value{
A list with the following components:
\item{est.mean}{Estimated sample mean.}
\item{est.sd}{Estimated sample standard deviation.}
\item{selected.dist}{Selected outcome distribution.}
\item{values}{Values of the objective functions evaluated at the estimated paramters of each candidate distribution.}
\item{...}{Some additional elements.}
}
\description{
This function applies the quantile estimation (QE) method to estimate the sample mean and standard deviation from a study that presents one of the following sets of summary statistics: \itemize{
\item S1: median, minimum and maximum values, and sample size
\item S2: median, first and third quartiles, and sample size
\item S3: median, minimum and maximum values, first and third quartiles, and sample size
}
}
\details{
In brief, the QE method fits candidate distribution(s) by minimizing the distance between observed and distribution quantiles. See \code{\link{qe.fit}} for further details concerning the distribution fitting step. If multiple candidate distributions are fit, the distribution with the best fit (i.e., the fitted distribution obtaining the smallest distance between observed and distribution quantiles) is selected as the underlying outcome distribution. The mean and standard devition of the selected distribution are used to estimate the sample mean and standard deviation, respectively
}
\examples{
## Generate S2 summary data
set.seed(1)
n <- 100
x <- stats::rlnorm(n, 2.5, 1)
quants <- stats::quantile(x, probs = c(0.25, 0.5, 0.75))
obs.mean <- mean(x)
obs.sd <- stats::sd(x)
## Estimate the sample mean and standard deviation using the QE method
qe.mean.sd(q1.val = quants[1], med.val = quants[2], q3.val = quants[3],
n = n)
}
\references{
McGrath S., Zhao X., Steele R., Thombs B.D., Benedetti A., and the DEPRESsion Screening Data (DEPRESSD) Collaboration. (2020). Estimating the sample mean and standard deviation from commonly reported quantiles in meta-analysis. \emph{Statistical Methods in Medical Research}. \strong{29}(9):2520-2537.
}
| /man/qe.mean.sd.Rd | no_license | stmcg/estmeansd | R | false | true | 2,814 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qe.mean.sd.R
\name{qe.mean.sd}
\alias{qe.mean.sd}
\title{Quantile estimation method for estimating the sample mean and standard deviation}
\usage{
qe.mean.sd(
min.val,
q1.val,
med.val,
q3.val,
max.val,
n,
qe.fit.control = list()
)
}
\arguments{
\item{min.val}{numeric value giving the sample minimum.}
\item{q1.val}{numeric value giving the sample first quartile.}
\item{med.val}{numeric value giving the sample median.}
\item{q3.val}{numeric value giving the sample third quartile.}
\item{max.val}{numeric value giving the sample maximum.}
\item{n}{numeric value giving the sample size.}
\item{qe.fit.control}{optional list of control parameters for \code{\link{qe.fit}}.}
}
\value{
A list with the following components:
\item{est.mean}{Estimated sample mean.}
\item{est.sd}{Estimated sample standard deviation.}
\item{selected.dist}{Selected outcome distribution.}
\item{values}{Values of the objective functions evaluated at the estimated paramters of each candidate distribution.}
\item{...}{Some additional elements.}
}
\description{
This function applies the quantile estimation (QE) method to estimate the sample mean and standard deviation from a study that presents one of the following sets of summary statistics: \itemize{
\item S1: median, minimum and maximum values, and sample size
\item S2: median, first and third quartiles, and sample size
\item S3: median, minimum and maximum values, first and third quartiles, and sample size
}
}
\details{
In brief, the QE method fits candidate distribution(s) by minimizing the distance between observed and distribution quantiles. See \code{\link{qe.fit}} for further details concerning the distribution fitting step. If multiple candidate distributions are fit, the distribution with the best fit (i.e., the fitted distribution obtaining the smallest distance between observed and distribution quantiles) is selected as the underlying outcome distribution. The mean and standard devition of the selected distribution are used to estimate the sample mean and standard deviation, respectively
}
\examples{
## Generate S2 summary data
set.seed(1)
n <- 100
x <- stats::rlnorm(n, 2.5, 1)
quants <- stats::quantile(x, probs = c(0.25, 0.5, 0.75))
obs.mean <- mean(x)
obs.sd <- stats::sd(x)
## Estimate the sample mean and standard deviation using the QE method
qe.mean.sd(q1.val = quants[1], med.val = quants[2], q3.val = quants[3],
n = n)
}
\references{
McGrath S., Zhao X., Steele R., Thombs B.D., Benedetti A., and the DEPRESsion Screening Data (DEPRESSD) Collaboration. (2020). Estimating the sample mean and standard deviation from commonly reported quantiles in meta-analysis. \emph{Statistical Methods in Medical Research}. \strong{29}(9):2520-2537.
}
|
## This is programming exercise that demonstrates how
## computationally expensive function results can be
## cached and reused
##
## makeCacheMatrix creates cache object that then gets
## used in cacheSolve function, demonstrating how
## to use the cache object
## makeCacheMatrix returns a cache object
makeCacheMatrix <- function(x = matrix()) {
cache <- NULL
set <- function(y) {
x <<- y
cache <<- NULL ## clear the cache!
}
get <- function() x
setinverse <- function(inversematrix) cache <<- inversematrix
getinverse <- function() cache
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve uses cache object to either return cached result or
## calculate it and then store the result in the cache
cacheSolve <- function(x, ...) {
cachedata <- x$getinverse()
if(!is.null(cachedata)) {
message("getting cached data")
return(cachedata)
}
data <- x$get()
invmatrix <- solve(data, ...)
x$setinverse(invmatrix)
invmatrix
}
| /cachematrix.R | no_license | huima/ProgrammingAssignment2 | R | false | false | 1,027 | r | ## This is programming exercise that demonstrates how
## computationally expensive function results can be
## cached and reused
##
## makeCacheMatrix creates cache object that then gets
## used in cacheSolve function, demonstrating how
## to use the cache object
## makeCacheMatrix returns a cache object
makeCacheMatrix <- function(x = matrix()) {
cache <- NULL
set <- function(y) {
x <<- y
cache <<- NULL ## clear the cache!
}
get <- function() x
setinverse <- function(inversematrix) cache <<- inversematrix
getinverse <- function() cache
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve uses cache object to either return cached result or
## calculate it and then store the result in the cache
cacheSolve <- function(x, ...) {
cachedata <- x$getinverse()
if(!is.null(cachedata)) {
message("getting cached data")
return(cachedata)
}
data <- x$get()
invmatrix <- solve(data, ...)
x$setinverse(invmatrix)
invmatrix
}
|
#!/usr/bin/env Rscript
" Generate a list of contigs to filter from the initial assembly
This should include only the most obvious contamination candidates:
1. Contigs with high GC (> 0.6)
2. Contigs with bacterial and plant/algae taxonomic assignments.
3. Contigs with low coverage (< 10)
Usage:
find_contam_contigs.R BLOBTABLE
" -> doc
import::from(dplyr, filter, select, "%>%")
source("lib/R/blobtools_util.R")
main <- function(blobtable) {
exclusions <- filter(blobtable,
superkingdom.t %in% c("Bacteria", "Viruses", "Archaea") |
grepl("ophyta", phylum.t) | GC > 0.6 | cov_sum < 10) %>%
select(name)
exclusions <- noquote(unlist(exclusions))
exclusions_string <- paste(exclusions, collapse = "\n")
write(exclusions_string, stdout())
}
if (!interactive()) {
blobtable_file <- commandArgs(trailingOnly = TRUE)
blobtable <- read_blobtable(blobtable_file)
main(blobtable)
}
| /pipe/scripts/find_contam_contigs1.R | permissive | EddieKHHo/megadaph | R | false | false | 909 | r | #!/usr/bin/env Rscript
" Generate a list of contigs to filter from the initial assembly
This should include only the most obvious contamination candidates:
1. Contigs with high GC (> 0.6)
2. Contigs with bacterial and plant/algae taxonomic assignments.
3. Contigs with low coverage (< 10)
Usage:
find_contam_contigs.R BLOBTABLE
" -> doc
import::from(dplyr, filter, select, "%>%")
source("lib/R/blobtools_util.R")
main <- function(blobtable) {
exclusions <- filter(blobtable,
superkingdom.t %in% c("Bacteria", "Viruses", "Archaea") |
grepl("ophyta", phylum.t) | GC > 0.6 | cov_sum < 10) %>%
select(name)
exclusions <- noquote(unlist(exclusions))
exclusions_string <- paste(exclusions, collapse = "\n")
write(exclusions_string, stdout())
}
if (!interactive()) {
blobtable_file <- commandArgs(trailingOnly = TRUE)
blobtable <- read_blobtable(blobtable_file)
main(blobtable)
}
|
library(ggplot2)
umapPlot = function(umap, method, colored_by = c("batch", "level1", "level2")){
if(colored_by == "batch"){Label = MetaData$batch}
if(colored_by == "level1"){Label = MetaData$level1}
if(colored_by == "level2"){Label = MetaData$level2}
df = data.frame(UMAP1 = umap$layout[,1],
UMAP2 = umap$layout[,2],
Label = Label)
gp = ggplot(df, aes(UMAP1, UMAP2, color = Label)) +
geom_point(size = -0.1, alpha = 0.1) +
ggtitle(method) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5, size = 20, face = "bold"),
legend.title = element_text(size = 15)) +
guides(color = guide_legend(override.aes = list(alpha = 1, size = 5), title = colored_by))
return(gp)
}
| /R/plot.R | no_license | xggzj/batch_correction | R | false | false | 751 | r | library(ggplot2)
umapPlot = function(umap, method, colored_by = c("batch", "level1", "level2")){
if(colored_by == "batch"){Label = MetaData$batch}
if(colored_by == "level1"){Label = MetaData$level1}
if(colored_by == "level2"){Label = MetaData$level2}
df = data.frame(UMAP1 = umap$layout[,1],
UMAP2 = umap$layout[,2],
Label = Label)
gp = ggplot(df, aes(UMAP1, UMAP2, color = Label)) +
geom_point(size = -0.1, alpha = 0.1) +
ggtitle(method) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5, size = 20, face = "bold"),
legend.title = element_text(size = 15)) +
guides(color = guide_legend(override.aes = list(alpha = 1, size = 5), title = colored_by))
return(gp)
}
|
rankhospital <- function(state, outcome, num = "best") {
## Read outcome data
my_data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Split data by state
split_by_state <- split(my_data, my_data$State)
## Pick out state I want
state_data <- split_by_state[[state]]
## Check that state is valid
if (is.null(state_data)) {
stop(print("invalid state"))
}
## Check that outcome is valid
j <- 0
if (outcome == "heart attack") {
j <- 11
} else if (outcome == "heart failure") {
j <- 17
} else if (outcome == "pneumonia") {
j <- 23
} else {
stop(print("invalid outcome"))
}
## Return hospital name in that state with the given rank
## 30-day death rate
my_data <- state_data[ ,c(1,2,j)]
my_data[ ,3] <- as.numeric(my_data[ ,3])
my_data <- my_data[ order(my_data[ ,3], my_data[ ,2]), ]
my_data <- my_data[complete.cases(my_data), ]
hospital <- vector()
if (num == "best") {
hospital <- my_data[1,2]
} else if (num == "worst") {
hospital <- my_data[nrow(my_data),2]
} else {
hospital <- my_data[num,2]
}
hospital
} | /rprog_data_ProgAssignment3-data/rankhospital.R | no_license | ninien/datasciencecoursera | R | false | false | 1,241 | r | rankhospital <- function(state, outcome, num = "best") {
## Read outcome data
my_data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Split data by state
split_by_state <- split(my_data, my_data$State)
## Pick out state I want
state_data <- split_by_state[[state]]
## Check that state is valid
if (is.null(state_data)) {
stop(print("invalid state"))
}
## Check that outcome is valid
j <- 0
if (outcome == "heart attack") {
j <- 11
} else if (outcome == "heart failure") {
j <- 17
} else if (outcome == "pneumonia") {
j <- 23
} else {
stop(print("invalid outcome"))
}
## Return hospital name in that state with the given rank
## 30-day death rate
my_data <- state_data[ ,c(1,2,j)]
my_data[ ,3] <- as.numeric(my_data[ ,3])
my_data <- my_data[ order(my_data[ ,3], my_data[ ,2]), ]
my_data <- my_data[complete.cases(my_data), ]
hospital <- vector()
if (num == "best") {
hospital <- my_data[1,2]
} else if (num == "worst") {
hospital <- my_data[nrow(my_data),2]
} else {
hospital <- my_data[num,2]
}
hospital
} |
setwd("~/R/ExData_Plotting1/")
arq<-read.csv("~/R/data/household_power_consumption.txt",sep=";")
arq$NewDateTime <- strptime(paste(arq$Date,arq$Time), "%d/%m/%Y %H:%M:%S")
feb2007<-arq[(arq$NewDate == "2007-02-01" | arq$NewDate == "2007-02-02"),]
par(mfcol=c(1,1))
plot(feb2007$NewDateTime,as.numeric(feb2007$Global_active_power)/1000,type="l",ylab="Global Active Power (kilowatts)",xlab="")
dev.copy(png,file="plot2.png")
dev.off()
| /plot2.R | no_license | msperez/ExData_Plotting1 | R | false | false | 433 | r | setwd("~/R/ExData_Plotting1/")
arq<-read.csv("~/R/data/household_power_consumption.txt",sep=";")
arq$NewDateTime <- strptime(paste(arq$Date,arq$Time), "%d/%m/%Y %H:%M:%S")
feb2007<-arq[(arq$NewDate == "2007-02-01" | arq$NewDate == "2007-02-02"),]
par(mfcol=c(1,1))
plot(feb2007$NewDateTime,as.numeric(feb2007$Global_active_power)/1000,type="l",ylab="Global Active Power (kilowatts)",xlab="")
dev.copy(png,file="plot2.png")
dev.off()
|
#Read in packages
library(dplyr)
library(stringr)
rm(list = ls())
###############READ IN DATA AND CLEAN #################
#We have 3 datasets
#1. Deprivation Indices: GISD_NDS_Kreise_2017_final.csv in raw_data --> GISD_wide.csv in clean_data
#2. Citizens data (population of each kreis) Einwohner_NDS_Kreise_2017.csv in raw_data --> citizens_wide in clean_data
#3. Suicide numbers of each kreis: TODESURS78_NDS_Kreise_2017_final.csv --> clean_data/suicides_wide.csv
#and --> processed_wide including age-standardized suicide mortality rates
####### 1. Deprivation index data: GISD #####
GISD <- readr::read_csv2("raw_data/GISD_NDS_Kreise_2017_final.csv") %>%
as_tibble()
#mutate(kreis = str_replace(Raumeinheit, ", Stadt", "")) %>% #make new variable where Stadt is deleted
#Clean & Standardize Indices
GISD <- GISD[-1,-3]
# rename indicators
varname <- c('X') # define a list of varying "varname"
n <- c(8) # there are 8 indicators in total
names(GISD)[3:ncol(GISD)] <- unlist(mapply(function(x,y) paste(x, seq(1,y), sep="_"), varname, n))
# X_1 = 'Arbeitslosigkeit', engl.: Unemployment
# X_2 = 'Beschäftigte am Wohnort mit akademischen Anschluss' (!= Beschäftigte am Wohnort mit Fach-(Hochschulabschluss)), engl.: Employees at place of residence with a technical (university) degree
# X_3 = 'Beschäftigtenquote'
# X_4 = 'Bruttoverdienst' (!= Bruttolohn und -gehalt), engl.: Gross wage and salary
# X_5 = 'Haushaltseinkommen' (!= Haushaltsnettoeinkommen), engl.: Net household income
# X_6 = 'Schulabgänger ohne Abschluss', engl.: School leavers without qualifications
# X_7 = 'Schuldnerquote', engl.: Debtor rate
# X_8 = 'Einkommenssteuer' (!= Steuereinnahmen), engl. Tax revenues
##### Z-Transformation of GISD data #####
# Z-Standardization fo all indicators; transformation of the variables of a distribution for better comparability of the different value ranges
# Z-score = (X - µ)/ sqrt(var)
# GISD <- scale(GISD.select[, X_1:X_8], center = TRUE, scale = TRUE)
# Z-standardization and saving as ZX_i
GISD$ZX_1 <- scale(GISD$X_1)
GISD$ZX_2 <- scale(GISD$X_2)
GISD$ZX_3 <- scale(GISD$X_3)
GISD$ZX_4 <- scale(GISD$X_4)
GISD$ZX_5 <- scale(GISD$X_5)
GISD$ZX_6 <- scale(GISD$X_6)
GISD$ZX_7 <- scale(GISD$X_7)
GISD$ZX_8 <- scale(GISD$X_8)
# Strip first characters of 'Kennziffer' in order to merge later
GISD$Kennziffer<-substring(GISD$Kennziffer, 3)
GISD <- GISD %>% as_tibble(GISD) %>%
select(Kennziffer, Kreis = Raumeinheit, ZX_1, ZX_2, ZX_3, ZX_4, ZX_5, ZX_6, ZX_7, ZX_8)
#Change signs of variables,
# because logically, a high employment rate does not contribute to more deprivation,
# but a high debtor rate does. So we put all indicators on the same interpretation scale
# employees_residence_technical_university_degree = ZX_2
# employment_rate = ZX_3
# gross_wage_and_salary = ZX_4
# net_household_income = ZX_5
# tax_revenues = ZX_8
GISD <- GISD %>%
mutate(ZX_2 = ZX_2*(-1),
ZX_3 = ZX_3*(-1),
ZX_4 = ZX_4*(-1),
ZX_5 = ZX_5*(-1),
ZX_8 = ZX_8*(-1)) %>%
mutate(DI = rowSums(.[3:10])) %>%
arrange(desc(DI)) #Overall Deprivation Index per Kreis = DI
write.csv(GISD, "clean_data/GISD_wide.csv")
############### 2. Citizen data ###############
### 2nd: Citizen numbers on 'Kreis'-level
cdata <- readr::read_csv2("raw_data/Einwohner_NDS_Kreise_2017.csv")
cdata <- cdata[-c(5356:5361),]
ind_nas <- which(rowSums(is.na(cdata)) == 4)
cdata <- cdata[-ind_nas, ]
data <- data.frame()
for (j in 1:51) {
indx <- 103 * (j-1) + 1
transp <- t(cdata[indx:(indx+102), ])
transp[,1] <- transp[1,1]
transp <- transp[-1,]
data <- rbind(data, transp)
}
columnnames <- t(cdata[1:(1+102), ])[1,]
columnnames[1] <- "Kreis"
colnames(data) <- columnnames
colnames(data)
data$variable = str_remove_all( rownames(data), "[0-9]+")
sapply(data, class)
# Output: All values are saved as factors --> change them to numeric!!!
data <- as_tibble(data) %>%
rename_all( ~ tolower(.) %>% str_remove_all(" ") %>% str_replace("-","_")) %>%
rename_at(vars(3:103), ~ paste0("age_", .)) %>%
mutate_all(as.character) %>%
mutate_at(vars(3:103), ~ na_if(. , "-") %>% as.numeric()) %>%
mutate(kreisnummer = str_extract(kreis, "[0-9]+")) %>% ##########################
mutate(kreis = str_remove(kreis, "[0-9]+ +")) %>%
mutate(age_under_20 = select(., age_0_1:age_19_20) %>% rowSums(na.rm = TRUE)) %>%
mutate(age_20_30 = select(., age_20_21:age_29_30) %>% rowSums(na.rm = TRUE)) %>%
mutate(age_30_40 = select(., age_30_31:age_39_40) %>% rowSums(na.rm = TRUE)) %>%
mutate(age_40_50 = select(., age_40_41:age_49_50) %>% rowSums(na.rm = TRUE)) %>%
mutate(age_50_60 = select(., age_50_51:age_59_60) %>% rowSums(na.rm = TRUE)) %>%
mutate(age_60_70 = select(., age_60_61:age_69_70) %>% rowSums(na.rm = TRUE)) %>%
mutate(age_70_80 = select(., age_70_71:age_79_80) %>% rowSums(na.rm = TRUE)) %>%
mutate(age_over_80 = select(., age_80_81:age_100undälter) %>% rowSums(na.rm = TRUE)) %>%
mutate(total = select(., c(age_under_20, age_20_30, age_30_40, age_40_50, age_50_60, age_60_70, age_70_80, age_over_80)) %>% rowSums(na.rm = TRUE)) %>%
select(kreis, kreisnummer, total, age_under_20:age_over_80, variable)
data <- filter(data, variable == "Insgesamt")
# Removing row 'Niedersachsen' and the column 'variable'
data <- data[-1, -12]
# Removing rows 'Braunschweig', 'Hannover', 'Hannover,Landeshauptstadt', 'Lüneburg' (appears two times, with the same name), 'Weser-Ems'
# Removing rows manually/by row number due to the double entry of 'Lüneburg'
data <- data[-c(1, 12, 14, 21, 33), ]
# Substitute entries 'NA' by value 0
data[is.na(data)] = 0
data$total <- as.numeric(data$total)
citizens <- data
write.csv(citizens, "clean_data/citizens_wide.csv", row.names = F)
############### 3. suicide rates per kreis ##################
#suicide rates with age groups data
#make variable kreis: line 1: 12 -> take
### 3rd: Suicide numbers on ‘Kreis’-Level
#rm(list = ls())
d <- readr::read_csv2("raw_data/TODESURS78_NDS_Kreise_2017_final.csv", na = "-")
column_names <- pull( d[2:12, 1])
nkreise = nrow(d)/12
data <- data.frame()
for(i in 1:nkreise){
ind = 12 * (i-1) + 1
transposed = t(d[ind:(ind+11),])
transposed[,1] = transposed[1,1]
transposed = transposed[-1,]
data = rbind(data, transposed)
}
colnames(data) <- c("Kreis", column_names)
data$variable = str_remove_all( rownames(data), "[0-9]+")
data <- as_tibble(data) %>%
rename_all( ~ tolower(.) %>% str_remove_all(" ") %>% str_replace("-","_")) %>%
mutate_at(vars(3:12), as.numeric) %>%
rename_at(vars(4:12), ~ paste0("age_", .)) %>%
mutate(kreisnummer = str_extract(kreis, "[0-9]+")) %>%
mutate(kreis = str_remove(kreis, "[0-9]+ +")) %>%
mutate(age_under_20 = u.1jahr + age_1_15 + age_15_20) %>%
mutate(age_over_80 = age_80u.ä.) %>%
mutate(total = select(., c(age_under_20, age_20_30, age_30_40, age_40_50, age_50_60, age_60_70, age_70_80, age_over_80)) %>% rowSums(na.rm = TRUE)) %>%
select(kreis, kreisnummer, total, age_under_20, age_20_30, age_30_40, age_40_50, age_50_60, age_60_70, age_70_80, age_over_80, variable)
data <- filter(data, variable == "Insgesamt")
# Removing row ‘Niedersachsen’ and the column ‘variable’
data <- data[-1, -12]
# Removing rows ‘Braunschweig’, ‘Hannover’, ‘Hannover,Landeshauptstadt’, ‘Lüneburg’ (appears two times, with the same name), ‘Weser-Ems’
# Removing rows manually/by row number due to the double entry of ‘Lüneburg’
data <- data[-c(1, 12, 14, 21, 33), ]
# data
suicides <- data
# suicides
write.csv(suicides, "clean_data/suicides_wide.csv", row.names = F)
##### Calculation of age-standardized suicide mortality rate (SMR) #####
# The following is calculated for each 'Kreis' - Why? In order to also take into account the age-distribution of a population
# 1st step: age-specific suicide rate (age specific SR)
# age-specific SR = (age-specific no. of suicides / age-specific no. of population) x 100.000 (inhabitants)
asSR <- citizens
i <- 4
while(i <= 11) {
asSR[,i] <- (suicides[,i] / citizens[,i]) * 100000
i <- i + 1
}
write.csv(asSR, "clean_data/asSR_wide.csv")
# 2nd step: Calculation of age-specific proportions of population
# age-specific proportion of population = age-specific no. of population / total no. of population
asProp <- citizens
i <- 4
while(i <= 11) {
asProp[,i] <- (citizens[,i] / citizens[,3])
i <- i + 1
}
write.csv(asProp, "clean_data/asSR_wide.csv")
# 3rd step: age-standardized suicide mortality rate (SMR)
# age-standardized SMR = sum(age-specific SR's x age-specific proportion of population)
asSMR <- citizens
i <- 4
while(i <= 11) {
asSMR[,i] <- (asSR[,i] * asProp[,i])
i <- i + 1
}
asSMR <- as_tibble(asSMR) %>%
mutate(total = select(., c(age_under_20, age_20_30, age_30_40, age_40_50, age_50_60, age_60_70, age_70_80, age_over_80)) %>%
rowSums(na.rm = TRUE)) %>%
select(Kennziffer = kreisnummer, SMR = total) %>%
arrange(desc(SMR))
write.csv(asSMR, "clean_data/asSMR_wide.csv")
##### Merging processed GISD and SMR #####
Processed <-
left_join(GISD, asSMR, by="Kennziffer") %>%
arrange(desc(SMR))
write.csv(Processed, "clean_data/Processed_wide.csv")
| /script_pm.R | no_license | NorrisPau/BayesFriends | R | false | false | 9,308 | r | #Read in packages
library(dplyr)
library(stringr)
rm(list = ls())
###############READ IN DATA AND CLEAN #################
#We have 3 datasets
#1. Deprivation Indices: GISD_NDS_Kreise_2017_final.csv in raw_data --> GISD_wide.csv in clean_data
#2. Citizens data (population of each kreis) Einwohner_NDS_Kreise_2017.csv in raw_data --> citizens_wide in clean_data
#3. Suicide numbers of each kreis: TODESURS78_NDS_Kreise_2017_final.csv --> clean_data/suicides_wide.csv
#and --> processed_wide including age-standardized suicide mortality rates
####### 1. Deprivation index data: GISD #####
GISD <- readr::read_csv2("raw_data/GISD_NDS_Kreise_2017_final.csv") %>%
as_tibble()
#mutate(kreis = str_replace(Raumeinheit, ", Stadt", "")) %>% #make new variable where Stadt is deleted
#Clean & Standardize Indices
GISD <- GISD[-1,-3]
# rename indicators
varname <- c('X') # define a list of varying "varname"
n <- c(8) # there are 8 indicators in total
names(GISD)[3:ncol(GISD)] <- unlist(mapply(function(x,y) paste(x, seq(1,y), sep="_"), varname, n))
# X_1 = 'Arbeitslosigkeit', engl.: Unemployment
# X_2 = 'Beschäftigte am Wohnort mit akademischen Anschluss' (!= Beschäftigte am Wohnort mit Fach-(Hochschulabschluss)), engl.: Employees at place of residence with a technical (university) degree
# X_3 = 'Beschäftigtenquote'
# X_4 = 'Bruttoverdienst' (!= Bruttolohn und -gehalt), engl.: Gross wage and salary
# X_5 = 'Haushaltseinkommen' (!= Haushaltsnettoeinkommen), engl.: Net household income
# X_6 = 'Schulabgänger ohne Abschluss', engl.: School leavers without qualifications
# X_7 = 'Schuldnerquote', engl.: Debtor rate
# X_8 = 'Einkommenssteuer' (!= Steuereinnahmen), engl. Tax revenues
##### Z-Transformation of GISD data #####
# Z-Standardization fo all indicators; transformation of the variables of a distribution for better comparability of the different value ranges
# Z-score = (X - µ)/ sqrt(var)
# GISD <- scale(GISD.select[, X_1:X_8], center = TRUE, scale = TRUE)
# Z-standardization and saving as ZX_i
GISD$ZX_1 <- scale(GISD$X_1)
GISD$ZX_2 <- scale(GISD$X_2)
GISD$ZX_3 <- scale(GISD$X_3)
GISD$ZX_4 <- scale(GISD$X_4)
GISD$ZX_5 <- scale(GISD$X_5)
GISD$ZX_6 <- scale(GISD$X_6)
GISD$ZX_7 <- scale(GISD$X_7)
GISD$ZX_8 <- scale(GISD$X_8)
# Strip first characters of 'Kennziffer' in order to merge later
GISD$Kennziffer<-substring(GISD$Kennziffer, 3)
GISD <- GISD %>% as_tibble(GISD) %>%
select(Kennziffer, Kreis = Raumeinheit, ZX_1, ZX_2, ZX_3, ZX_4, ZX_5, ZX_6, ZX_7, ZX_8)
#Change signs of variables,
# because logically, a high employment rate does not contribute to more deprivation,
# but a high debtor rate does. So we put all indicators on the same interpretation scale
# employees_residence_technical_university_degree = ZX_2
# employment_rate = ZX_3
# gross_wage_and_salary = ZX_4
# net_household_income = ZX_5
# tax_revenues = ZX_8
GISD <- GISD %>%
mutate(ZX_2 = ZX_2*(-1),
ZX_3 = ZX_3*(-1),
ZX_4 = ZX_4*(-1),
ZX_5 = ZX_5*(-1),
ZX_8 = ZX_8*(-1)) %>%
mutate(DI = rowSums(.[3:10])) %>%
arrange(desc(DI)) #Overall Deprivation Index per Kreis = DI
write.csv(GISD, "clean_data/GISD_wide.csv")
############### 2. Citizen data ###############
### 2nd: Citizen numbers on 'Kreis'-level
cdata <- readr::read_csv2("raw_data/Einwohner_NDS_Kreise_2017.csv")
cdata <- cdata[-c(5356:5361),]
ind_nas <- which(rowSums(is.na(cdata)) == 4)
cdata <- cdata[-ind_nas, ]
data <- data.frame()
for (j in 1:51) {
indx <- 103 * (j-1) + 1
transp <- t(cdata[indx:(indx+102), ])
transp[,1] <- transp[1,1]
transp <- transp[-1,]
data <- rbind(data, transp)
}
columnnames <- t(cdata[1:(1+102), ])[1,]
columnnames[1] <- "Kreis"
colnames(data) <- columnnames
colnames(data)
data$variable = str_remove_all( rownames(data), "[0-9]+")
sapply(data, class)
# Output: All values are saved as factors --> change them to numeric!!!
data <- as_tibble(data) %>%
rename_all( ~ tolower(.) %>% str_remove_all(" ") %>% str_replace("-","_")) %>%
rename_at(vars(3:103), ~ paste0("age_", .)) %>%
mutate_all(as.character) %>%
mutate_at(vars(3:103), ~ na_if(. , "-") %>% as.numeric()) %>%
mutate(kreisnummer = str_extract(kreis, "[0-9]+")) %>% ##########################
mutate(kreis = str_remove(kreis, "[0-9]+ +")) %>%
mutate(age_under_20 = select(., age_0_1:age_19_20) %>% rowSums(na.rm = TRUE)) %>%
mutate(age_20_30 = select(., age_20_21:age_29_30) %>% rowSums(na.rm = TRUE)) %>%
mutate(age_30_40 = select(., age_30_31:age_39_40) %>% rowSums(na.rm = TRUE)) %>%
mutate(age_40_50 = select(., age_40_41:age_49_50) %>% rowSums(na.rm = TRUE)) %>%
mutate(age_50_60 = select(., age_50_51:age_59_60) %>% rowSums(na.rm = TRUE)) %>%
mutate(age_60_70 = select(., age_60_61:age_69_70) %>% rowSums(na.rm = TRUE)) %>%
mutate(age_70_80 = select(., age_70_71:age_79_80) %>% rowSums(na.rm = TRUE)) %>%
mutate(age_over_80 = select(., age_80_81:age_100undälter) %>% rowSums(na.rm = TRUE)) %>%
mutate(total = select(., c(age_under_20, age_20_30, age_30_40, age_40_50, age_50_60, age_60_70, age_70_80, age_over_80)) %>% rowSums(na.rm = TRUE)) %>%
select(kreis, kreisnummer, total, age_under_20:age_over_80, variable)
data <- filter(data, variable == "Insgesamt")
# Removing row 'Niedersachsen' and the column 'variable'
data <- data[-1, -12]
# Removing rows 'Braunschweig', 'Hannover', 'Hannover,Landeshauptstadt', 'Lüneburg' (appears two times, with the same name), 'Weser-Ems'
# Removing rows manually/by row number due to the double entry of 'Lüneburg'
data <- data[-c(1, 12, 14, 21, 33), ]
# Substitute entries 'NA' by value 0
data[is.na(data)] = 0
data$total <- as.numeric(data$total)
citizens <- data
write.csv(citizens, "clean_data/citizens_wide.csv", row.names = F)
############### 3. suicide rates per kreis ##################
#suicide rates with age groups data
#make variable kreis: line 1: 12 -> take
### 3rd: Suicide numbers on ‘Kreis’-Level
#rm(list = ls())
d <- readr::read_csv2("raw_data/TODESURS78_NDS_Kreise_2017_final.csv", na = "-")
column_names <- pull( d[2:12, 1])
nkreise = nrow(d)/12
data <- data.frame()
for(i in 1:nkreise){
ind = 12 * (i-1) + 1
transposed = t(d[ind:(ind+11),])
transposed[,1] = transposed[1,1]
transposed = transposed[-1,]
data = rbind(data, transposed)
}
colnames(data) <- c("Kreis", column_names)
data$variable = str_remove_all( rownames(data), "[0-9]+")
data <- as_tibble(data) %>%
rename_all( ~ tolower(.) %>% str_remove_all(" ") %>% str_replace("-","_")) %>%
mutate_at(vars(3:12), as.numeric) %>%
rename_at(vars(4:12), ~ paste0("age_", .)) %>%
mutate(kreisnummer = str_extract(kreis, "[0-9]+")) %>%
mutate(kreis = str_remove(kreis, "[0-9]+ +")) %>%
mutate(age_under_20 = u.1jahr + age_1_15 + age_15_20) %>%
mutate(age_over_80 = age_80u.ä.) %>%
mutate(total = select(., c(age_under_20, age_20_30, age_30_40, age_40_50, age_50_60, age_60_70, age_70_80, age_over_80)) %>% rowSums(na.rm = TRUE)) %>%
select(kreis, kreisnummer, total, age_under_20, age_20_30, age_30_40, age_40_50, age_50_60, age_60_70, age_70_80, age_over_80, variable)
data <- filter(data, variable == "Insgesamt")
# Removing row ‘Niedersachsen’ and the column ‘variable’
data <- data[-1, -12]
# Removing rows ‘Braunschweig’, ‘Hannover’, ‘Hannover,Landeshauptstadt’, ‘Lüneburg’ (appears two times, with the same name), ‘Weser-Ems’
# Removing rows manually/by row number due to the double entry of ‘Lüneburg’
data <- data[-c(1, 12, 14, 21, 33), ]
# data
suicides <- data
# suicides
write.csv(suicides, "clean_data/suicides_wide.csv", row.names = F)
##### Calculation of age-standardized suicide mortality rate (SMR) #####
# The following is calculated for each 'Kreis' - Why? In order to also take into account the age-distribution of a population
# 1st step: age-specific suicide rate (age specific SR)
# age-specific SR = (age-specific no. of suicides / age-specific no. of population) x 100.000 (inhabitants)
asSR <- citizens
i <- 4
while(i <= 11) {
asSR[,i] <- (suicides[,i] / citizens[,i]) * 100000
i <- i + 1
}
write.csv(asSR, "clean_data/asSR_wide.csv")
# 2nd step: Calculation of age-specific proportions of population
# age-specific proportion of population = age-specific no. of population / total no. of population
asProp <- citizens
i <- 4
while(i <= 11) {
asProp[,i] <- (citizens[,i] / citizens[,3])
i <- i + 1
}
write.csv(asProp, "clean_data/asSR_wide.csv")
# 3rd step: age-standardized suicide mortality rate (SMR)
# age-standardized SMR = sum(age-specific SR's x age-specific proportion of population)
asSMR <- citizens
i <- 4
while(i <= 11) {
asSMR[,i] <- (asSR[,i] * asProp[,i])
i <- i + 1
}
asSMR <- as_tibble(asSMR) %>%
mutate(total = select(., c(age_under_20, age_20_30, age_30_40, age_40_50, age_50_60, age_60_70, age_70_80, age_over_80)) %>%
rowSums(na.rm = TRUE)) %>%
select(Kennziffer = kreisnummer, SMR = total) %>%
arrange(desc(SMR))
write.csv(asSMR, "clean_data/asSMR_wide.csv")
##### Merging processed GISD and SMR #####
Processed <-
left_join(GISD, asSMR, by="Kennziffer") %>%
arrange(desc(SMR))
write.csv(Processed, "clean_data/Processed_wide.csv")
|
.II "awk^tutorial"
.ds TL "The awk Language"
.NH "Introduction to the awk Language" 1
.PP
\fBawk\fR is a general-purpose language for processing text.
With \fBawk\fR, you can manipulate strings,
process records, and generate reports.
.PP
.II "Aho, A.V."
.II "Weinberger, P.J."
.II "Kernighan, Brian W."
.B awk
is named after its creators:
A. V. Aho, P. J. Weinberger, and Brian W. Kernighan.
Unfortunately, its name suggests that
.B awk
is awkward \(em whereas in truth the
.B awk
language is simple, elegant, and powerful.
With it, you can perform many tasks that would otherwise require
hours of drudgery.
.PP
\fBawk\fR uses a simple syntax.
Each statement in an \fBawk\fR program contains
either or both of two elements:
a \fIpattern\fR and an \fIaction\fR.
The pattern tells \fBawk\fR what lines to select from the input stream;
and the action tells \fBawk\fR what to do with the selected data.
.PP
This tutorial explains how to write
.B awk
programs.
It explains how to describe a pattern to \fBawk\fR.
It also describes the range of actions that
.B awk
can perform;
these include formatted printing of text,
assigning variables, defining arrays, and controlling the flow of data.
.SH "Example Files"
.PP
Before you begin to study
.BR awk ,
please take the time to type the following text files that
are used by the examples in this tutorial.
.PP
The first is some text from Shakespeare.
Use the command \fBcat\fR to type it into the file
.BR text1 ,
as follows.
Note that \fB<ctrl-D>\fR means that you should hold down the \fBCtrl\fR
(or \fBcontrol\fR) key and simultaneously press `D'.
Do not type it literally.
.DM
.II "Shakespeare, William"
cat > text1
When, in disgrace with fortune and men's eyes,
I all alone beweep my outcast state,
And trouble deaf heaven with my bootless cries,
And look upon myself, and curse my fate,
Wishing me like to one more rich in hope,
Featured like him, like him with friends possest,
Desiring this man's art and that man's scope,
With what I most enjoy contented least.
Yet in these thoughts myself almost despising,
Haply I think on thee - and then my state,
Like to the lark at break of day arising
From sullen earth, sings hymns at heaven's gate;
For thy sweet love remember'd such wealth brings
That then I scorn to change my state with kings.
<ctrl-D>
.DE
.II "Ruth, Babe"
The second example consists of some of Babe Ruth's batting statistics,
which we will use to demonstrate how \fBawk\fR processes tabular input.
Type it into file
.BR table1 ,
as follows:
.DM
cat > table1
1920 .376 54 158 137
1921 .378 59 177 171
1922 .315 35 94 99
1923 .393 41 151 131
1924 .378 46 143 121
1925 .290 25 61 66
1926 .372 47 139 145
1927 .356 60 158 164
1928 .323 54 163 142
1929 .345 46 121 154
<ctrl-D>
.DE
The columns give, respectively,
the season, the batting average, and the numbers of
home runs, runs scored, and runs batted in (RBIs).
.PP
The rest of this tutorial presents many examples that use these files.
Type them in and run them!
In that way, you can get a feel for
.B awk
into your fingers.
Experiment; try some variations on the examples.
Don't be afraid of making mistakes; this is one good way to learn the
limits (and the strengths) of a language.
.SH "Using awk"
.PP
\fBawk\fR reads input from the standard input
(entered from your terminal or from a file you specify),
processes each input line according to a specified \fBawk\fR program,
and writes output to the standard output.
This section explains the structure of an \fBawk\fR program
and the syntax of \fBawk\fR command lines.
.Sh "Command-line Options"
.PP
.II "awk^command-line options"
The complete form for the \fBawk\fR command line is as follows:
.DM
\fBawk\fR [\fB-y\^\fR] [\fB\-F\fIc\^\fR] [\fB\-f \fIprogfile\^\fR] [\fIprog\^\fR] [\fIfile1\^\fR] [\fIfile2\^\fR] ...
.DE
The following describes each element of the command line.
.IP \fB\-y\fR
Map
.I patterns
from lower case to both lower-case and upper-case letters.
For example, with this option the string
.B the
would match
.B the
or
.BR The .
.IP \fB\-F\fIc\fR
Set the field-separator character to the character
.IR c .
The field-separator character and its uses are described below.
.IP "\fB\-f \fIprogfile\fR"
Read the
.B awk
program from
.I progfile .
.IP \fIprog\fR
An
.B awk
program to execute.
If you do not use the
.B -f
option, you must enter
.BR awk 's
statements on its command line.
.sp \n(pDu
Note that if you include
.BR awk 's
program on its command line (instead of in a separate file),
you must enclose the program between apostrophes.
Otherwise, some of the
.B awk
statements will be modified by the shell before \fBawk\fR ever sees them,
which will make a mess of your program.
For example:
.DM
awk 'BEGIN {print "sample output file"}
{print NR, $0}'
.DE
(The following sections explain
what the stuff between the apostrophes means.)
However, if you include the statement within a file that you pass to
.B awk
via its \fB\-f\fR option, you must \fInot\fR enclose the statements within
parentheses; otherwise,
.B awk
will become very confused.
If you were to put the statements in the above program into an
.B awk
program file, they would appear as follows:
.DM
BEGIN {print "sample output file"}
{print NR, $0}
.DE
.IP "\fIfile1 file2 ...\fR"
The files whose text you wish to process.
For example, the command
.DM
awk '{print NR, $0}' text1
.DE
prints the contents of
.BR text1 ,
but precedes each line with a line number.
.sp \n(pDu
If you do not name an input file,
.B awk
processes what it reads from the standard input.
For example, the command
.DM
awk '{print NR, $0}'
.DE
reads what you type from the keyboard and echoes it preceded with a line
number.
To exit from this program, type
.BR <ctrl-D> .
.SH "Structure of an awk Program"
.PP
.II "awk^statement"
An
.B awk
program consists of one or more statements of the form:
.DS
\fIpattern \fB{ \fIaction \fB}\fR
.DE
Note that
.B awk
insists that the
action
be enclosed between braces, so that it can distinguish the action
from the pattern.
.PP
A program can contain as many statements
as you need to accomplish your purposes.
When
.B awk
reads a line of input,
it compares that line with the \fIpattern\fR in each statement.
Each time a line matches \fIpattern\fR,
\fBawk\fR performs the corresponding \fIaction\fR.
.B awk
then reads the next line of input.
.PP
A statement can specify an \fIaction\fR without a \fIpattern\fR.
In this case,
.B awk
performs the action on every line of input.
For example, the program
.DM
awk '{ print }' text1
.DE
prints every line of
.B text1
onto the standard output.
.PP
An \fBawk\fR program may also specify a pattern without an action.
In this case, when an input line matches the pattern,
\fBawk\fR prints it on the standard output.
For example, the command
.DM
awk 'NR > 0' table1
.DE
prints all of
.B table1
onto the standard output.
Note that you can use the same pattern in more than one statement.
Examples of this will be given below.
.PP
.BR awk 's
method of forming patterns uses
.I "regular expressions"
(also called
.IR patterns ),
like those used by the \*(CO commands
.BR sed ,
.BR ed ,
and
.BR egrep .
Likewise,
.BR awk 's
method of constructing actions is modelled after the C programming language.
If you are familiar with regular expressions and with C, you should have
no problem learning how to use
.BR awk .
However,
if you are not familiar with them, they will be explained in the following
sections.
.Sh "Records and Fields"
.PP
.II "awk^records"
.II "awk^fields"
\fBawk\fR divides its input into
.IR records .
It divides each record, in turn, into
.IR fields .
.II "awk^input-record separator"
.II "awk^input-field separator"
Records are separated by a character called
the
.IR "input-record separator" ;
likewise, fields are separated by the
.IR "input-field separator" .
.B awk
in effect conceives of its input as a table with an indefinite
number of columns.
.PP
The newline character is the default input-field separator,
so \fBawk\fR normally regards each input line as a separate record.
The space and the tab characters are
the default input-field separator,
so white space normally separates fields.
.PP
To address a field within a record, use the syntax \fB$\fIN\fR, where
.I N
is the number of the field within the current record.
The pattern \fB$0\fR addresses the entire record.
Examples of this will be given below.
.II "awk^output-record separator"
.II "awk^output-field separator"
In addition to input record and field separators,
\fBawk\fR provides output record and field separators,
which it prints between output records and fields.
The default output-field separator
is the newline character;
\fBawk\fR normally prints each output record as a separate line.
The space character is the default output-field separator.
.SH "Patterns"
.PP
This section describes how
.B awk
interprets the pattern section of a statement.
.Sh "Special Patterns"
.PP
.II "awk^special patterns"
.II "awk^pattern, special"
To begin,
.B awk
defines and sets a number of special \fIpattern\fRs.
You can use these patterns in your program for special purposes.
You can also redefine some of these patterns to suit your preferences.
The following describes the commonest such special
.IR patterns ,
and how they're used:
.II "awk^BEGIN"
.IP \fBBEGIN\fR
This pattern matches the beginning of the input file.
.B awk
executes all
.I actions
associated with this pattern before it begins to read input.
.II "awk^END"
.IP \fBEND\fR
This pattern matches the end of the input file.
.B awk
executes all
.I actions
associated with this pattern after it had read all of its input.
.II "awk^FILENAME"
.IP \fBFILENAME\fR
.B awk
sets this pattern to the name of the file
that it is currently reading.
Should you name more than one input file on the command line,
.B awk
resets this pattern as it reads each file in turn.
.II "awk^FS"
.IP \fBFS\fR
Input-field separator.
This pattern names the character that
.B awk
recognizes as the field separator for the records it reads.
.II "awk^NF"
.IP \fBNF\fR
This pattern gives the number of fields within the current record.
.II "awk^NR"
.IP \fBNR\fR
This pattern gives the number of the current record within the
input stream.
.II "awk^OFS"
.IP \fBOFS\fR
Output-field separator.
.B awk
sets this pattern to the character that it writes in its output to
separate one field from another.
.II "awk^ORS"
.IP \fBORS\fR
Output-record separator.
.B awk
sets this pattern to the character that it writes in its output to
separate one field from another.
.II "awk^RS"
.IP \fBRS\fR
Input-record separator.
.B awk
sets this pattern to the character by which it separates records
that it reads.
.Sh "Arithmetic Relational Expressions"
.II "awk^arithmetic operators"
.PP
An
.I operator
marks a task to be within an expression,
much as the `+' or `/' within an arithmetic expression indicates
that the numbers are to be, respectively, added or divided.
You can use \fBawk\fR's operators to:
.IP \(bu 0.3i
Compare a special pattern with a variable, a field, or a constant.
.IS \(bu
Assign a value to a variable or to a special pattern.
.IS \(bu
Dictate the relationship among two or more expressions.
.PP
The first type of operator to be discussed are
.IR "arithmetic relational operators" .
These compare the input text with an arithmetic value.
.B awk
recognizes the following arithmetic operators:
.DS
\fB<\fR Less than
\fB<=\fR Less than or equal to
\fB==\fR Equivalent
\fB!=\fR Not equal
\fB>=\fR Greater than or equal to
\fB>\fR Greater than
.DE
With these operators, you can compare a field with a constant;
compare one field with another;
or compare a special pattern with either a
field or a constant.
.PP
For example, the following
.B awk
program prints all of the years in which Babe Ruth hit more than
50 home runs:
.DM
awk '$3 >= 50' table1
.DE
(As you recall, column 3 in the file
.B table1
gives the number of home runs.)
The program prints the following on your screen:
.DM
1920 .376 54 158 137
1921 .378 59 177 171
1927 .356 60 158 164
1928 .323 54 163 142
.DE
The following program, however, shows the years in which Babe Ruth scored
more runs than he drove in:
.DM
awk '$4 > $5 { print $1 }' table1
.DE
Remember, field 4 in file
.B table1
gives the number of runs scored, and field 5 gives the number of
runs batted in.
You should see the following on your screen:
.DM
1920
1921
1923
1924
1928
.DE
In the above program, expression
.DM
{print $1}
.DE
defines the action to perform, as noted by the fact that expression is
enclosed between braces.
In this case, the program tells
.B awk
that if the input record matches the pattern, to print only the first
field.
However, to print both the season and the number of runs scored, use the
following program:
.DM
awk '$4 > $5 { print $1, $4 }' table1
.DE
This prints the following:
.DM
1920 158
1921 177
1923 151
1924 143
1928 163
.DE
Note that
.B $1
and
.B $4
are separated by a comma.
.II "awk^,"
The comma tells
.B awk
to print its default output-field separator between columns.
If we had left out the comma, the output would have appeared as follows:
.DM
1920158
1921177
1923151
1924143
1928163
.DE
As we noted above, the special pattern
.B OFS
gives the output-field separator.
.B awk
by default defines this special pattern to the space character.
If we wish to redefine the output-field separator,
we can use an operator, plus the special pattern
.BR BEGIN ,
as follows:
.DM
awk 'BEGIN { OFS = ":" }
$4 > $5 { print $1, $4 }' table1
.DE
This prints:
.DM
1920:158
1921:177
1923:151
1924:143
1928:163
.DE
The first statement
.DM
BEGIN { OFS = ":"}
.DE
tells
.B awk
to set the output-field separator (the special pattern \fBOFS\fR)
to `:' before it processes any input (as indicated by the special
pattern \fBBEGIN\fR).
.PP
Although we're getting a little ahead of ourselves, note that there's
no reason to print the fields in the order in which they appear in
the input record.
For example, if you wish to print the number of runs scored before the
season, use the command:
.DM
awk 'BEGIN { OFS = ":"}
$4 > $5 { print $4, $1 }' table1
.DE
This prints:
.DM
158:1920
177:1921
151:1923
143:1924
163:1928
.DE
As you recall, the special pattern
.B NR
gives the number of the current input record.
You can execute an action by comparing this pattern with a constant.
For example, the command
.DM
awk 'NR > 12' text1
.DE
prints:
.DM
For thy sweet love remember'd such wealth brings
That then I scorn to change my state with kings.
.DE
That is, the program prints every line after line 12 in the input file.
As you recall, a statement that has a pattern but no action
prints the entire record that matches the pattern.
.PP
As we saw with the special patterns, some patterns can be defined to
be numbers and others to be text.
If you compare a number with a string,
.B awk
by default makes a string comparison.
The following example shows how \fBawk\fR
compares one field to part of the alphabet:
.DM
awk '$1 <= "C"' text1
.DE
This program prints:
.DM
And trouble deaf heaven with my bootless cries,
And look upon myself, and curse my fate,
.DE
The statement
\fB$1 <= "C"\fR selected all records that begin
with an ASCII value less than or equal to that of the letter `C' (0x43) \(em
in this case, both lines that begin with `A' (0x41).
If we ran this example against
.BR table1 ,
it would print every record in the file.
This is because each record begins with the character `1' (0x31), which
matches the pattern \fB$1 <= "C"\fR.
.PP
Finally, you can use a numeric field plus a constant in a comparison
statement.
For example, the following program prints all of the seasons in which
Babe Ruth had at least 100 more runs batted in than home runs:
.DM
awk '$3 + 100 < $5 {print $1}' table1
.DE
This prints the following:
.DM
1921
1927
1929
.DE
.Sh "Boolean Combinations of Expressions"
.PP
.II "awk^Boolean operators"
.B awk
has a number of operators, called
.I Boolean
operators, that let you hook together several small expressions
into one large, complex expression.
.B awk
recognizes the following Boolean operators:
.DS
\fB||\fR Boolean OR (one expression or the other is true)
\fB&&\fR Boolean AND (both expressions are true)
\fB!\fR Boolean NOT (invert the value of an expression)
.DE
.II "Boole, George"
(The eponym ``Boolean'' comes from the English mathematician George Boole.)
In a Boolean expression,
.B awk
evaluates each sub-expression to see if it is true or false;
the relationship of sub-expressions (as set by the Boolean operator)
then determines whether the entire expression is true or false.
.PP
For example, the following program prints all seasons in which
Babe Ruth hit between 40 and 50 home runs:
.DM
awk '$3 >= 40 && $3 <= 50 { print $1, $3 }' table1
.DE
This prints the following:
.DM
1923 41
1924 46
1926 47
1929 46
.DE
In the above program,
.B awk
printed its output only if the subexpression \fB$3 >= 40\fR
was true \fIand\fR (\fB&&\fR) the subexpression \fB$3 <= 50\fR
was true.
.PP
The next example demonstrates the Boolean OR operator.
It prints all seasons for which Babe Ruth hit fewer
than 40 home runs or more than 50 home runs:
.DM
awk '$3 < 40 || $3 > 50 { print $1, $3}' table1
.DE
This example prints the following:
.DM
1920 54
1921 59
1922 35
1925 25
1927 60
1928 54
.DE
In this example,
.B awk
printed its output if the subexpression \fB$3 < 40\fR
was true \fIor\fR (\fB||\fR) the subexpression \fB$3 > 50\fR was true.
Note that the output would also printed if both subexpressions were true
(although in this case, this is impossible).
.PP
Finally, the Boolean operator `!' negates the truth-value of any expression.
For example, the expression \fB$1 = "And"\fR is true if the first field
in the current record equals ``And''; however, the expression
\fB$1 != "And"\fR is true if the first field does \fInot\fR equal ``And''.
For example, the program
.DM
awk '$1 != "And"' text1
.DE
prints:
.DM
When, in disgrace with fortune and men's eyes,
I all alone beweep my outcast state,
Wishing me like to one more rich in hope,
Featured like him, like him with friends possest,
Desiring this man's art and that man's scope,
With what I most enjoy contented least.
Yet in these thoughts myself almost despising,
Haply I think on thee - and then my state,
Like to the lark at break of day arising
From sullen earth, sings hymns at heaven's gate;
For thy sweet love remember'd such wealth brings
That then I scorn to change my state with kings.
.DE
These are the 12 lines from \fBtext1\fR that do not begin with ``And''.
.PP
Note that
.B awk
evaluates all operators from left to right unless sub-expressions are
grouped together with parentheses, as is described in the following section.
.Sh "Patterns"
.II "awk^patterns"
.PP
The previous examples have all matched strings or numbers against predefined
fields in each input record.
This is fine for manipulating tabular information, like our table of
Babe Ruth's batting statistics, but it is not terribly useful when
you are processing free text.
Free text is not organized into predefined columns, nor are you likely to
know which field (that is, which word) will contain the pattern you're seeking.
.PP
.II ed
.II sed
To help you manage free text,
.B awk
has a pattern-matching facility that resembles those of the editors
.B ed
and
.BR sed .
.PP
The most common way to search for a pattern is to enclose it between slashes.
For example, the program
.DM
awk '/and/' text1
.DE
prints every line in
.B text1
that contains the string ``and''.
.DM
When, in disgrace with fortune and men's eyes,
And look upon myself, and curse my fate,
Desiring this man's art and that man's scope,
Haply I think on thee - and then my state,
.DE
Note that ``and'' does not have to be a word by itself \(em it can be
a fragment within a word as well.
Note, too, that this pattern matches ``and'' but does not match ``And'' \(em
but it would if we were to use the \fB\-y\fR option on the \fBawk\fR command
line (described above).
.PP
You can use Boolean operators to search for more than one string at once.
For example, the program
.DM
awk '/and/ && /or/' text1
.DE
finds every line in \fBtext1\fR that contains both ``and'' and ``or''.
There is only one:
.DM
When, in disgrace with fortune and men's eyes,
.DE
Note that the ``or'' in this line is embedded in the word ``fortune''.
.PP
.B awk
can also scan for classes and types of characters.
To do so, enclose the characters within brackets and place the bracketed
characters between the slashes.
For example, the following program looks for every line in \fBtext1\fR
that contains a capital `A' through a capital `E':
.DM
awk '/[A-E]/' text1
.DE
This prints the following:
.DM
And trouble deaf heaven with my bootless cries,
And look upon myself, and curse my fate,
Desiring this man's art and that man's scope,
.DE
.II "awk^special characters"
In addition, you can use the following
special characters for further flexibility:
.DS
\fB[ ]\fR Class of characters
\fB( )\fR Grouping subexpressions
\fB|\fR Alternatives among expressions
\fB+\fR One or more occurrences of the expression
\fB?\fR Zero or more occurrences of the expression
\fB*\fR Zero, one, or more occurrences of the expression
\fB.\fR Any non-newline character
.DE
When adding a special character to a pattern, enclose the special
character as well as the rest of the pattern
within slashes.
.PP
To search for a string that contains one of
the special characters, you must precede the
character with a backslash.
For example, if you are looking for the string ``today?'',
use the following pattern:
.DM
/today\e?/
.DE
When you need to find an expression in a
particular field, not just anywhere in the
record, you can use one of these operators:
.DS
\fB~\fR Contains the data in question
\fB!~\fR Does not contain the data in question
.DE
For example, if you need to find the digit `9'
in the fourth field of file \fBtable1\fR, use the following
program:
.DM
awk '$4~/9/ {print $1, $4}' table1
.DE
This prints the following:
.DM
1922 94
1926 139
.DE
As you can see, the above program found every record with a `9' in its
fourth field, regardless of whether the `9' came at the beginning of the
field or its end.
.B awk
also recognizes two operators that let you set where a pattern is within
a field:
.DS
\fB^\fR Beginning of the record or field
\fB$\fR End of the record or field
.DE
For example, to find every record in \fBtable1\fR whose fourth field
\fIbegins\fR with a `9', run the following program:
.DM
awk '$4~/^9/ {print $1, $4}' table1
.DE
This prints:
.DM
1922 94
.DE
Finally, to negate a pattern use the operator \fB!~\fR.
For example, to print every record in
.B table1
whose fourth column does
.I not
begin with a `9', use the following program:
.DM
awk '$4!~/^9/ {print $1, $4}' table1
.DE
This prints:
.DM
1920 158
1921 177
1923 151
1924 143
1925 61
1926 139
1927 158
1928 163
1929 121
.DE
.Sh "Ranges of Patterns"
.PP
.II "awk^patterns, range"
You can tell \fBawk\fR to perform an action
on all records between two patterns.
For example, to print all records between the
.I patterns
\fB1925\fR and \fB1929\fR, inclusive,
enclose the strings in slashes and separate them with a comma,
then indicate the
.B print
action, as follows:
.DM
awk '/1925/,/1929/ { print }' table1
.DE
You can also use the special pattern
.B NR
(or \fIrecord number\^\fR) to name a range of record numbers.
For example, to print records 5 through 10 of file
.BR text1 ,
use the following program:
.DM
awk 'NR == 5, NR == 10 { print }' text1
.DE
.Sh "Resetting Separators"
.PP
.II "awk^separators, reset"
As noted above,
.B awk
recognizes certain characters by default to parse its input into
records and fields, and to separate its output into records and fields:
.II "awk^FS"
.IP \fBFS\fR
Input-field separator.
By default, this is one or more white-space characters (tabs or spaces).
.II "awk^OFS"
.IP \fBOFS\fR
Output-field separator.
By default, this is exactly one space character.
.II "awk^ORS"
.IP \fBORS\fR
Output-record separator.
By default, this is the newline character.
.II "awk^RS"
.IP \fBRS\fR
Input-record separator.
By default, this is the newline character.
.PP
By resetting any of these special patterns, you can change how
.B awk
parses its input or organizes its output.
Consider, for example, the command:
.DM
awk 'BEGIN {ORS = "|"}
/1920/,/1925/ {print $1, $5}' table1
.DE
This prints the following:
.DM
1920 137|1921 171|1922 99|1923 131|1924 121|1925 66|
.DE
As you can see, this prints the season and the number of runs batted
in for the 1920 through 1925 season.
However,
.B awk
uses the pipe character `|' instead of the newline character to separate
records.
If you wish to change the output-field separator as well as the
output-record separator, use the program:
.DM
awk 'BEGIN {ORS = "|" ; OFS = ":"}
/1920/,/1925/ {print $1, $5}' table1
.DE
This produces:
.DM
1920:137|1921:171|1922:99|1923:131|1924:121|1925:66|
.DE
.II "awk^;"
As you can see,
.B awk
has used the colon `:' instead of a white-space character to separate
one field from another.
.PP
Note, too, that the semicolon `;' character separates expressions
in the action portion of the statement associated with the
.B BEGIN
pattern.
This lets you associate more than one action with a given pattern, so you
do not have to repeat that pattern.
This is discussed at greater length below.
.PP
You can also change the input-record separator from the newline
character to something else that you prefer.
For example, the following program changes the input-record separator
from the newline to the comma:
.DM
awk 'BEGIN {RS = ","}
{print $0}' text1
.DE
This yields the following:
.DM
When
in disgrace with fortune and men's eyes
.sp \n(pDu
I all alone beweep my outcast state
.sp \n(pDu
And trouble deaf heaven with my bootless cries
.sp \n(pDu
And look upon myself
and curse my fate
.sp \n(pDu
Wishing me like to one more rich in hope
.sp \n(pDu
Featured like him
like him with friends possest
.sp \n(pDu
Desiring this man's art and that man's scope
.sp \n(pDu
With what I most enjoy contented least.
Yet in these thoughts myself almost despising
.sp \n(pDu
Haply I think on thee - and then my state
.sp \n(pDu
Like to the lark at break of day arising
From sullen earth
sings hymns at heaven's gate;
For thy sweet love remember'd such wealth brings
That then I scorn to change my state with kings.
.DE
The blank lines resulted from a comma's occurring at the end of a line.
.PP
.II "awk^="
Note that by specifying the null string \fB(RS="")\fR,
you can make two consecutive newlines the record separator.
Note, too, that only one character can be the input-record separator.
If you try to reset this separator to a string,
.B awk
uses the first character in the string as the separator, and ignores the rest.
.PP
You can change the input-field separator by redefining
.BR FS .
The default
.B FS
is \fB<space>\et\fR exactly and in that order
(where \fB<space>\fR is the space character).
In this case,
.B awk
uses its ``white-space rule,'' in which
.B awk
treats any sequence of spaces and tabs as a single separator.
This is the default rule for
.BR FS .
If you set
.B FS
to anything else, including \fB\et<space>\fR, then each separator is separate.
For example, the following program changes the input-field separator
to the comma and prints the first such field it finds in each line from file
.BR text1 :
.DM
awk 'BEGIN {FS = ","}
{print $1}' text1
.DE
This produces:
.DM
When
I all alone beweep my outcast state
And trouble deaf heaven with my bootless cries
And look upon myself
Wishing me like to one more rich in hope
Featured like him
Desiring this man's art and that man's scope
With what I most enjoy contented least.
Yet in these thoughts myself almost despising
Haply I think on thee - and then my state
Like to the lark at break of day arising
From sullen earth
For thy sweet love remember'd such wealth brings
That then I scorn to change my state with kings.
.DE
As you can see, this program prints text up to the first comma in
each line.
.B awk
throws away the comma itself, because the input-field separator is not
explicitly printed.
.PP
You can define several characters to be input-field separators
simultaneously.
When you specify several characters within
quotation marks, each character becomes a field
separator, and all separators have equal precedence.
For example, you can specify the letters `i', `j', and `k' to be
input-field separators.
The following program does this, and prints the first field so
defined from each record in file
.BR text1 :
.DM
awk 'BEGIN {FS = "ijk"}
{print $1}' text1
.DE
This prints:
.DM
When,
I all alone beweep my outcast state,
And trouble deaf heaven w
And loo
W
Featured l
Des
W
Yet
Haply I th
L
From sullen earth, s
For thy sweet love remember'd such wealth br
That then I scorn to change my state w
.DE
Note that if you set the input-record separator to a null string, you
can use the newline character as the input-field separator.
This is a handy way to concatenate clusters of lines into records that
you can then manipulate further.
.PP
One last point about the
.B FS
separator.
If the white-space rule is not invoked and
an assignment is made to a nonexistent field,
.B awk
can add the proper number of field separators.
For example if
\fBFS=":"\fR and the input line is \fBa:b\fR, then
the command \fB$5 = "e"\fR produces \fBa:b:::e\fR.
If the white-space rule were in effect,
.B awk
would add spaces as if each space were a separator,
and print a warning message.
In short, it would try to produce the sanest result from the error.
.PP
Finally, the variable
.B NR
gives the number of the current record.
The next example prints the total number of records in file
.BR text1 :
.DM
awk 'END {print NR}' text1
.DE
The output is
.DM
14
.DE
which is to be expected, since
.B text1
is a sonnet.
.SH "Actions"
.PP
.II "awk^actions"
The previous section described how to construct a
.I pattern
for
.BR awk .
For each pattern, there must be a corresponding
.IR action .
So far, the only action shown has been to print output.
However,
.B awk
can perform many varieties of actions.
In addition to printing, \fBawk\fR can:
.DS
\(bu Execute built-in functions
\(bu Redirect output
\(bu Assign variables
\(bu Use fields as variables
\(bu Define arrays
\(bu Use control statements
.DE
These actions are discussed in detail in the following sections.
.PP
As noted above, each
.B awk
statement must have an action.
If a statement does not include an action,
.B awk
assumes that the action is
.BR {print} .
.PP
Within each statement,
.B awk
distinguishes an action from its corresponding pattern by the fact
that the action is enclosed within braces.
Note that the action section of a statement may include several
individual actions;
however, each action must be separated from the others by
semicolons `;' or newlines.
.PP
.II "Free Software Foundation"
Some forms of
.BR awk ,
such as that provided by the Free Software Foundation (FSF),
allow user-defined functions.
The FSF version of
.B awk
is available from the MWC BBS as well as via COHware.
Note that your system must have at least two megabytes of RAM
to run the FSF version of
.BR awk .
.Sh "awk Functions"
.PP
.II "awk^functions"
\fBawk\fR includes the following functions with which you can
manipulate input.
You can assign a function to any variable or use it in a pattern.
The following lists
.BR awk 's
functions.
Note that an
.I argument
can be a variable, a field, a constant, or an expression:
.II "awk^abs"
.IP \fBabs(\fIargument\^\fB)\fR
Return the absolute value of \fIargument\fR.
.II "awk^exp"
.IP \fBexp(\fIargument\^\fB)\fR
.II "Euler, Leonhart"
Return Euler's number
.I e
(2.178...) to the power of \fIargument\fR.
.II "awk^index"
.IP \fBindex(\fIstring1\^\fB,\fIstring2\^\fB)\fR
Return the position of \fIstring2\fR within
\fIstring1\fR.
If \fIs2\fR does not occur in \fIs1\fR,
.B awk
returns zero.
This
.B awk
function resembles the \*(CO C function
.BR index() .
.II "awk^int"
.IP \fBint(\fIargument\^\fB)\fR
Return the integer portion of \fIargument\fR.
.II "awk^length"
.IP \fBlength\fR
Return the length, in bytes, of the current record.
.IP \fBlength(\fIargument\^\fB)\fR
Return the length, in bytes, of \fIargument\fR.
.II "awk^log"
.IP \fBlog(\fIargument\^\fB)\fR
Return the natural logarithm of \fIargument\fR.
.II "awk^print"
.IP "\fBprint(\fIargument1 argument2 ... argumentN\^\fB)\fR"
Concatenate and print
.I argument1
through
.IR argumentN .
.IP "\fBprint(\fIargument1\^\fB,\fIargument2\^\fB, \fI... argumentN\^\fB)\fR"
Print
.I argument1
through
.IR argumentN .
Separate each
.I argument
with the
.B OFS
character.
.II "awk^printf"
.IP "\fBprintf(\fIf\^\fB, \fIargument1\^\fB, \fI... argumentN\^\fB)\fR"
Format and print strings \fIargument1\fR through \fIargument\fR in the
manner set by the formatting string
.IR f ,
which can use \fBprintf()\fR-style formatting codes.
.IP "\fBsplit(\fIstr\^\fB, \fIarray\^\fB, \fIfs\^\fB)\fR"
Divide the string \fIstr\fR into fields associated with
\fIarray\fR.
The fields are separated by character \fIfs\fR or the default field separator.
.II "awk^sprintf"
.IP "\fBsprintf(\fIf\^\fB, \fIe1\^\fB, \fIe2\^\fB)\fR"
Format strings \fIe1\fR and \fIe2\fR in the
manner set by the formatting string
.IR f ,
and return the formatted string.
.I f
can use \fBprintf()\fR-style formatting codes.
.II "awk^sqrt"
.IP "\fBsqrt(\fIargument\^\fB)\fR"
Return the square root of \fIargument\fR.
.II "awk^substr"
.IP "\fBsubstr(\fIstr\^\fB, \fIbeg\^\fB, \fIlen\^\fB)\fR"
Scan string \fIstr\fR for position \fIbeg\fR;
if found, print the next \fIlen\fR characters.
If \fIlen\fR is not included, print from from \fIbeg\fR
to the end of the record.
.Sh "Printing with awk"
.PP
.II "awk^printing"
Printing is the commonest task you will perform in your
.B awk
programs.
.BR awk 's
printing functions
.B printf
and
.B sprintf
resemble the C functions
.B printf()
and
.BR sprintf() ;
however, there are enough differences to make a close reading of this
section worthwhile.
.PP
.B print
is the commonest, and simplest, \fBawk\fR function.
When used without any arguments,
.B print
prints all of the current record.
The following example prints every record in file
.BR text1 :
.DM
awk '{print}' text1
.DE
You can print fields in any order you desire.
For example, the following program reverses the order of the season
and batting-average columns from file
.BR table1 :
.B
.DM
awk '/1920/,/1925/ { print $2,$1 }' table1
.DE
The output is as follows:
.DM
.376 1920
.378 1921
.315 1922
.393 1923
.378 1924
.290 1925
.DE
Because the field names are separated by a comma,
\fBawk\fR inserts the
.B OFS
between the fields when it prints them.
If you do not separate field names with commas,
\fBawk\fR concatenates the fields when it printing them.
For example, the program
.DM
awk '/1920/,/1925/ { print $2 $1 }' table1
.DE
produces:
.DM
.3761920
.3781921
.3151922
.3931923
.3781924
.2901925
.DE
When you use \fBawk\fR to process a column of text or numbers,
you may wish to specify a consistent format for the output.
The statement for formatting a column of numbers follows this
.IR pattern :
.DM
{printf "format", expression}
.DE
where
.I format
prescribes how to format the output, and
.I expression
specifies the fields for \fBawk\fR to print.
.PP
The following table names and defines
the most commonly used of \fBawk\fR's format control characters.
Each character must be preceded by a percent sign `%'
and a number in the form of
.I n
or
.IR n.m .
.DS
\fB%\fIn\fBd\fR Decimal number
\fB%\fIn\^\fB.\fIm\^\fBf\fR Floating-point number
\fB%\fIn\^\fB.\fIm\^\fBs\fR String
\fB%%\fR Literal `%' character
.DE
When you use the
.B printf()
function, you must define the output-record separator within the
format string.
The following codes are available:
.DS
\fB\en\fR Newline
\fB\et\fR Tab
\fB\ef\fR Form feed
\fB\er\fR Carriage return
\fB\e"\fR Quotation mark
.DE
For example, the following program prints Babe Ruth's RBIs unformatted:
.DM
awk '/1920/,/1925/ { print $1, $5 }' table1
.DE
The output appears as follows:
.DM
1920 137
1921 171
1922 99
1923 131
1924 121
1925 66
.DE
As you can see,
.B awk
right-justifies its output by default.
To left-justify the second column, use the following program:
.DM
awk '/1920/,/1925/ { printf("%d %3d\en", $1, $5) }' table1
.DE
The output is as follows:
.DM
1920 137
1921 171
1922 99
1923 131
1924 121
1925 66
.DE
Note that the `3' in the string
.B %3d
specifies the minimum number of characters to be displayed.
If the size of the number exceeds the space allotted to it,
.B awk
prints the entire number.
A different rule applies when printing strings, as will be
shown below.
.PP
To print a floating-point number, you must specify the minimum number of
digits you wish to appear on either side of the decimal point.
For example, the following program gives the average number of RBIs
Babe Ruth hit in each game between 1920 and 1925:
.DM
awk '/1920/,/1925/ { printf("%d %1.2f\en", $1, $5/154.0) }' table1
.DE
This prints the following:
.DM
1920 0.89
1921 1.11
1922 0.64
1923 0.85
1924 0.79
1925 0.43
.DE
Note the following points about the above program:
.IP \(bu 0.3i
To get the average number of runs batted in, we had to divide the
total number of RBIs in a season by the number of games in a season
(which in the 1920s was 154).
.B awk
permits you to use a constant to perform arithmetic on a field; this will
be discussed in more detail below.
.IP \(bu
To force
.B awk
to produce a floating-point number, the constant had to be in the format
of a floating-point number, i.e., ``154.0'' instead of ``154''.
Dividing an integer by another integer would not have produced what we wanted.
.PP
.B awk
rounds its output to match sensitivity you've requested \(em that is,
the number of digits to the right of the decimal point.
To see how sensitivity affects output, run the following program:
.DM
awk '/1920/,/1925/{printf("%1.2f %1.3f %1.4f\en",$5/154.0,$5/154.0,$5/154.0)}'\e
table1
.DE
This prints the following:
.DM
0.89 0.890 0.8896
1.11 1.110 1.1104
0.64 0.643 0.6429
0.85 0.851 0.8506
0.79 0.786 0.7857
0.43 0.429 0.4286
.DE
As an aside, the above example also shows that
you can break \fBawk\fR's command line across more than one
line using a backslash `\e' at the end of every line but the last.
Note, however, that you
.I cannot
break an
.B awk
statement across more than one line, or
.B awk
will complain about a syntax error.
.PP
One last example of floating-point numbers prints Babe Ruth's
ratio of runs scored to runs batted in between 1920 and 1925:
.DM
awk '/1920/,/1925/{x = ($5*1.0) ; printf("%1.3f\en", $4/x)}' table1
.DE
This produces the following:
.DM
1.153
1.035
0.949
1.153
1.182
0.924
.DE
The expression
.B "x = ($5*1.0)"
was needed to turn field 5 (the divisor) into a floating-point number,
so we could obtain the decimal fraction that we wanted.
This is discussed further below, when we discuss how to manipulate
constants.
.PP
The function
.B sprintf()
also formats expressions; however, instead of printing
its output, it returns it for assignment to a variable.
For example, you could rewrite the previous example program to replace
the multiplication operation with a call to
.BR sprintf() :
.DM
awk '/1920/,/1925/{x = sprintf("%3.1f", $5)
printf("%1.3f\en", $4/x)}' table1
.DE
The output is the same as that shown above.
.PP
The
.B %s
formatting string can be used to align text in fields.
The digit to the left of the period gives the width of the field;
that to the right of the period gives the number of characters to write
into the field.
Note that if input is larger than the number of characters allotted to it,
.B awk
truncates the input.
For example, the following program aligns on seven-character fields
some words from file
.BR text1 :
.DM
awk '{x=sprintf("%7.5s %7.5s %7.5s %7.5s", $1, $2, $3, $4)
print x}' text1
.DE
The output is as follows:
.DM
When, in disgr with
I all alone bewee
And troub deaf heave
And look upon mysel
Wishi me like to
Featu like him, like
Desir this man's art
With what I most
Yet in these thoug
Haply I think on
Like to the lark
From sulle earth sings
For thy sweet love
That then I scorn
.DE
Note that fields (words) longer than five characters are truncated;
and every word is right-justified on a seven-character field.
.Sh "Redirecting Output"
.PP
.II "awk^redirecting output"
In addition to printing to the standard output,
.B awk
can redirect the output of an action into a file, or append it onto
an existing file.
With this feature, you can extract information from
a given file and construct new documents.
The following example shows an easy way to sift Babe Ruth's statistics
into four separate files, for further processing:
.DM
awk '{ print $1, $2 > "average"
print $1, $3 > "home.runs"
print $1, $4 > "runs.scored"
print $1, $5 > "rbi"}' table1
.DE
Note like as under the shell, the
operator `>' creates the named file if it does not exist,
or replaces its contents if it does.
To append
.BR awk 's
onto the end of an existing file, use the operator `>>'.
.PP
.B awk
can also pipe the output of an action to another program.
As under the shell, the operator `|' pipes the output of one
process into another process.
For example, if it is vital for user
.B fred
to know Babe Ruth's batting
average for 1925, you can mail it to him with the following command:
.DM
awk '/1925/ {print $1, $2 | "mail fred"}' table1
.DE
.Sh "Assignment of Variables"
.PP
.II "awk^assigning variables"
A number of the previous examples assign values to variables.
.B awk
lets you create variables, perform arithmetic upon them,
and otherwise work with them.
.PP
An
.B awk
variable can be a string or a number,
depending upon the context.
Unlike C,
.B awk
does not require that you declare a variable.
By default, variables are set to the null
string (numeric value zero) on start-up of the \fBawk\fR program.
To set the variable
.B x
to the numeric value one, you can use the assignment operator `=':
.DM
x = 1
.DE
To set x to the string
.B ted ,
also use the assignment operator:
.DM
x = "ted"
.DE
When the context demands it, \fBawk\fR converts strings to numbers or numbers
to strings.
For example, the statement
.DM
x = "3"
.DE
initializes to
.B x
to the string ``3''.
When an expression contains an
arithmetic operator such as the `-',
\fBawk\fR interprets the expression as numeric.
(Alphabetic strings evaluate to zero.)
Therefore, the expression
.DM
x = "3" - "1"
.DE
assigns the numeric value two to variable
.B x ,
not the string ``2''.
.PP
When the operator is included within the
quotation marks, \fBawk\fR treats the operator as a
character in the string.
In the following example
.DM
x = "3 - 1"
.DE
initializes
.B x
to the string ``3 - 1''.
.PP
A number of examples in the previous section showed you how to perform
arithmetic on fields.
.II "awk^arithmetic operators"
The following table gives \fBawk\fR's arithmetic operators:
.DS
\fB+\fR Addition
\fB-\fR Subtraction
\fB*\fR Multiplication
\fB/\fR Division
\fB%\fR Modulus
\fB++\fR Increment
\fB--\fR Decrement
\fB+=\fR Add and assign value
\fB-=\fR Subtract and assign value
\fB*=\fR Multiply and assign value
\fB/=\fR Divide and assign value
\fB%=\fR Divide modulo and assign value
.DE
Variables are often used with increment operators.
For example, the following program computes the average number of home
runs Babe Ruth hit each season during the 1920s:
.DM
awk ' { x += $3 }
END { y = (NR * 1.0)
printf("Average for %d years: %2.3f.\en", NR, x/y) }' table1
.DE
The output is:
.DM
Average for 10 years: 46.700.
.DE
.Sh "Field Variables"
.PP
.II "awk^field variables"
\fBawk\fR lets fields receive assignments, be used in arithmetic,
and be manipulated in string operations.
One task that has not yet been demonstrated is using
a variable to address a field.
For example, the following program prints the
.BR NR th
field (word) from the first seven lines in file
.BR text1 :
.DM
awk 'NR < 8 {print NR, $(NR)}' text1
.DE
The output is:
.DM
1 When,
2 all
3 deaf
4 myself,
5 one
6 with
7 man's
.DE
.Sh "Control Statements"
.PP
.II "awk^control statements"
\fBawk\fR has seven defined control statements.
This section explains them and gives examples of their use.
.II "awk^if"
.II "awk^else"
.IP "\fBif (\fIcondition\^\fB) \fIaction1 \fB[else\fI action2 \fB]\fR"
If \fIcondition\fR is true, then execute
.IR action1 .
If the optional
.B else
clause is present and
.I condition
is false, then
execute
.IR action2 .
.sp \n(pDu
The following program keeps running totals of Babe Ruth's RBIs, for both
the years where his runs scored exceeded his RBIs and the years where they
did not:
.DM
awk '{ if ( $4 > $5 )
gyear++
else
lyear++
}
END { printf("Scored exceed RBIs: %d years.\n", gyear)
printf("Scored not exceed RBIs: %d years.\n", lyear)
}' table1
.DE
This produces:
.DM
Scored exceed RBIs: 5 years.
Scored not exceed RBIs: 5 years.
.DE
Note that if more than one action is associated with an
.B if
or
.B else
statement, you must enclose the statements between braces.
If you use braces with both the
.B if
and
.B else
statements, note that the beginning and closing braces
.I must
appear on the same line as the
.B else
statement.
For example:
.DM
if (expr) {
stuff
stuff
} else {
stuff
stuff
}
.DE
.II "awk^while"
.IS "\fBwhile (\fIcondition\^\fB) \fIaction\fR"
The
.B while
statement executes
.I action
as long as
.I condition
is true.
For example, the following program counts the number of times the word
.B the
appears in file
.BR text1 .
The
.B while
loop uses a variable to examine every word in every line:
.DM
awk ' { i = 1
while (i <= NF ) {
if ($i == "the") j++
i++
}
}
END { printf ("The word \e"the\e" occurs %d times.\en", j) }' text1
.DE
.II "Shakespeare, William"
The result, as follows, shows Shakespeare's economy of language:
.DM
The word "the" occurs 1 times.
.DE
By the way, note that if a control statement has more than one statement
in its action section,
enclose the action section between braces.
If you do not,
.B awk
will behave erratically or exit with a syntax error.
.II "awk^for"
.IP "\fBfor( \fIinitial\fB ; \fIend\fB ; \fIiteration\fB ) \fIaction\fR"
.IS "\fBfor( \fIvariable\fB in \fIarray\fB ) \fIaction\fR"
.BR awk 's
.B for
statement closely resembles the
.B for
statement in the C language.
The statement
.I initial
defines actions to be performed before the loop begins; this is usually
used to initialize variables, especially counters.
The statement
.I end
defines when the loop is to end.
The statement
.I iteration
defines one or more actions that are performed on every iteration of the
loop; usually this is used to increment counters.
Finally,
.I action
can be one or more statements that are executed on every iteration of
the loop.
.I action
need not be present, in which case only the action defined in the
.I iteration
portion of the
.B for
statement is executed.
.B for
is in fact just an elaboration of the
.B while
statement, but adjusted to make it a little easier to use.
The following example writes the previous example, but replaces the
.B while
loop with a
.B for
mechanism:
.DM
awk ' { for (i = 1 ; i <= NF ; i++)
if ($i == "the") j++
}
END { printf ("The word \e"the\e" occurs %d times.\en", j) }' text1
.DE
The output is the same as the previous example, but the syntax is neater
and easier to read.
.sp \n(pDu
The second form of the
.B for
loop examines the contents of an array.
It is described in the following section, which introduces arrays.
.IP \fBbreak\fR
.II "awk^break"
The statement
.B break
immediately interrupts a
.B while
or
.B for
loop.
For example, the following program is the same as the previous example,
but counts only the first occurrence of the word
.B the
in each line of
.BR text1 .
Thus, it counts the number of lines in
.B text1
that contain
.BR the :
.DM
awk '{ for (i = 1 ; i <= NF ; i++) {
if ($i == "the") {
j++
break
}
}
}
END {printf ("The word \e"the\e" occurs in %d lines.\en", j)}' text1
.DE
.II "awk^continue"
.IS \fBcontinue\fR
The statement
.B continue
immediately begins the next iteration of the nearest
.B while
or
.B for
loop.
For example, the following program prints all of Babe Ruth's statistics \(em
runs scored, runs batted, and home runs \(em in which he had more than 59
in one year:
.DM
awk ' { for (i = 3 ; i <= NF ; i++)
if ($i <= 59)
continue
else
printf("%d, column %d: %d\en", $1, i, $i)
} ' table1
.DE
This produces the following:
.DM
1920, column 4: 158
1920, column 5: 137
1921, column 4: 177
1921, column 5: 171
1922, column 4: 94
1922, column 5: 99
...
.DE
.II "awk^next"
.IS \fBnext\fR
The statement
.B next
forces
.B awk
abort the processing of the current record and
skip to the next input record.
Processing of the new input record begins with the first pattern, just as
if the processing of the previous record had concluded normally.
To demonstrate this, the following program skips all records in file
.B text1
that have an odd number of fields (words):
.DM
awk ' { if (NF % 2 == 0) next }
{ print $0 } ' text1
.DE
This produces:
.DM
I all alone beweep my outcast state,
Wishing me like to one more rich in hope,
With what I most enjoy contented least.
Yet in these thoughts myself almost despising,
Like to the lark at break of day arising
.DE
.II "awk^exit"
.IS \fBexit\fR
Finally, the control statement
.B exit
forces the \fBawk\fP program to skip all remaining input and
execute the \fIactions\fR at the \fBEND\fR pattern, if any.
For example, the following program prints the year in which Babe Ruth
hit his 300th home run:
.DM
awk ' { i = $1 }
(j += $3) >= 300 { exit }
END {print "Babe Ruth hit his 300th homer in", i "."}' table1
.DE
This produces:
.DM
Babe Ruth hit his 300th homer in 1926.
.DE
.SH "Arrays"
.PP
.II "awk^arrays"
.B awk
has a powerful feature for managing arrays.
Unlike C,
.B awk
automatically manages the size of an array, so you do not have to declare
the array's size ahead of time.
Also, unlike C,
.B awk
lets you address each element within an array by a label, not just by its
offset within the array.
This lets you generate arrays ``on the fly,'' which can be very useful
in transforming many varieties of data.
.PP
To declare an array, simply name it within a statement.
.B awk
recognizes as an array every variable that is followed by brackets `[\ ]'.
To initialize a row within an array, you must define its value and name its
label.
A label can be either a number or a string.
A value, too, can be a number or a string; if the value is a number, then
you can perform arithmetic upon it, as will be shown in a following example.
.Sh "Initializing an Array"
.PP
To demonstrate how an array works, use the line editor
.B ed
to add a line of text to the beginning of file
.BR table1 .
Type the following; please note that the token
.B <tab>
means that you should type a tab character:
.DM
ed table1
1i
Year\fB<tab>\fPBA\fB<tab>\fPHRs\fB<tab>\fPScored\fB<tab>\fPRBIs
.
wq
.DE
This change writes a header into \fBtable1\fR that names each column.
Now, we can read these labels into an array and use them to
describe Babe Ruth's statistics.
For example, the following prints a summary of Babe Ruth's statistics for
the year 1926:
.DM
awk ' NR == 1 { for (i=1; i <= NF; i++) header [i] = $i }
$1 == 1926 {
for (i=1; i <= NF; i++)
print header[i] ":\et", $i
} ' table1
.DE
This produces:
.DM
.ta 0.5i 1.5i
Year: 1926
BA: .372
HRs: 47
Scored: 139
RBIs: 145
.DE
The statement
.DM
NR == 1 { for (i=1; i <= NF; i++) header [i] = $i }
.DE
reads the first line in
.BR table1 ,
which contains the column headers, and uses the headers to initialize the array
.BR header .
Each row is labeled with the contents of the variable
.BR i .
.PP
The loop
.DM
for (i=1; i <= NF; i++)
print header[i] ":\et", $i
.DE
prints the contents of
.BR header .
Because we labeled each row within
.B header
with a number, we can use a numeric loop to read its contents.
.Sh "The for() Statement With Arrays"
.PP
In the previous example, each element in the array was labeled with a number.
This permitted us to read the array with an ordinary
.B for
statement, which sets and increments a numeric variable.
However, the rows within an array can be labeled with strings, instead of
numbers.
To read the contents of such an array, you must use a special form of the
.BR for
statement, as follows:
.DS
\fBfor ( \fIoffset \fBin \fIarray \fB)\fR
.DE
.I array
names the array in question.
.I offset
is a variable that you name at the time of contructing the
.B for
statement.
You can use the value of
.I offset
in any subordinate printing actions.
.PP
The following program demonstrates this new form of
.BR for ,
and (incidentally) to demonstrate the power of
.BR awk 's
array-handling feature.
It builds an array of each unique word in the file
.BR text1 ,
and notes the number of times that word occurs within the file:
.DM
awk ' { for (i = 1 ; i <= NF ; i++)
words [$i]++ }
END { for (entry in words)
print entry ":", words[entry] }' text1 | sort
.DE
This prints:
.DM
-: 1
And: 2
Desiring: 1
Featured: 1
For: 1
From: 1
Haply: 1
I: 4
Like: 1
That: 1
When,: 1
Wishing: 1
With: 1
Yet: 1
all: 1
almost: 1
...
.DE
As you can imagine, a similar program in C would require many more
lines of code.
However, a few features of this program are worth noting.
.PP
First, the expression
.DM
{ for (i = 1 ; i <= NF ; i++)
words [$i]++ }
.DE
declares the array
.B words .
Every time
.B awk
encounters a new field (word), it automatically adds another entry to the
array, and labels that entry with the word.
No work on your part is needed for this to happen.
.II "awk^++"
The `++' operator increments the value of the appropriate entry within
.BR words .
Because we did not initialize the entry, it implicitly contains a number.
.PP
The expression
.DM
{ for (entry in words)
print entry ":", words[entry] }
.DE
walks through the array
.BR words .
.B awk
initializes the variable
.B entry
to the label for each row in
.BR words ;
the
.B print
statement then prints
.B entry
and the contents of that row in the array \(em in this case, the number of
times the row appears in our input file.
.PP
Finally, we piped the output of this program to the command
.B sort
to print the words in alphabetical order.
.SH "For More Information"
.PP
This tutorial just gives a brief introduction to the power of
.BR awk .
To explore the language in depth, see \fIsed & awk\fR by Dale Dougherty
(Sebastopol, Calif, O'Reilly & Associates, Inc., 1985).
This book, however, describes a more complex version of
.B awk
than that provided with \*(CO.
.PP
The Lexicon's article on \fBawk\fR gives a quick summary of
its features and options.
| /doc/mwc/doc/coherent/text/awk.r | permissive | gspu/Coherent | R | false | false | 55,238 | r | .II "awk^tutorial"
.ds TL "The awk Language"
.NH "Introduction to the awk Language" 1
.PP
\fBawk\fR is a general-purpose language for processing text.
With \fBawk\fR, you can manipulate strings,
process records, and generate reports.
.PP
.II "Aho, A.V."
.II "Weinberger, P.J."
.II "Kernighan, Brian W."
.B awk
is named after its creators:
A. V. Aho, P. J. Weinberger, and Brian W. Kernighan.
Unfortunately, its name suggests that
.B awk
is awkward \(em whereas in truth the
.B awk
language is simple, elegant, and powerful.
With it, you can perform many tasks that would otherwise require
hours of drudgery.
.PP
\fBawk\fR uses a simple syntax.
Each statement in an \fBawk\fR program contains
either or both of two elements:
a \fIpattern\fR and an \fIaction\fR.
The pattern tells \fBawk\fR what lines to select from the input stream;
and the action tells \fBawk\fR what to do with the selected data.
.PP
This tutorial explains how to write
.B awk
programs.
It explains how to describe a pattern to \fBawk\fR.
It also describes the range of actions that
.B awk
can perform;
these include formatted printing of text,
assigning variables, defining arrays, and controlling the flow of data.
.SH "Example Files"
.PP
Before you begin to study
.BR awk ,
please take the time to type the following text files that
are used by the examples in this tutorial.
.PP
The first is some text from Shakespeare.
Use the command \fBcat\fR to type it into the file
.BR text1 ,
as follows.
Note that \fB<ctrl-D>\fR means that you should hold down the \fBCtrl\fR
(or \fBcontrol\fR) key and simultaneously press `D'.
Do not type it literally.
.DM
.II "Shakespeare, William"
cat > text1
When, in disgrace with fortune and men's eyes,
I all alone beweep my outcast state,
And trouble deaf heaven with my bootless cries,
And look upon myself, and curse my fate,
Wishing me like to one more rich in hope,
Featured like him, like him with friends possest,
Desiring this man's art and that man's scope,
With what I most enjoy contented least.
Yet in these thoughts myself almost despising,
Haply I think on thee - and then my state,
Like to the lark at break of day arising
From sullen earth, sings hymns at heaven's gate;
For thy sweet love remember'd such wealth brings
That then I scorn to change my state with kings.
<ctrl-D>
.DE
.II "Ruth, Babe"
The second example consists of some of Babe Ruth's batting statistics,
which we will use to demonstrate how \fBawk\fR processes tabular input.
Type it into file
.BR table1 ,
as follows:
.DM
cat > table1
1920 .376 54 158 137
1921 .378 59 177 171
1922 .315 35 94 99
1923 .393 41 151 131
1924 .378 46 143 121
1925 .290 25 61 66
1926 .372 47 139 145
1927 .356 60 158 164
1928 .323 54 163 142
1929 .345 46 121 154
<ctrl-D>
.DE
The columns give, respectively,
the season, the batting average, and the numbers of
home runs, runs scored, and runs batted in (RBIs).
.PP
The rest of this tutorial presents many examples that use these files.
Type them in and run them!
In that way, you can get a feel for
.B awk
into your fingers.
Experiment; try some variations on the examples.
Don't be afraid of making mistakes; this is one good way to learn the
limits (and the strengths) of a language.
.SH "Using awk"
.PP
\fBawk\fR reads input from the standard input
(entered from your terminal or from a file you specify),
processes each input line according to a specified \fBawk\fR program,
and writes output to the standard output.
This section explains the structure of an \fBawk\fR program
and the syntax of \fBawk\fR command lines.
.Sh "Command-line Options"
.PP
.II "awk^command-line options"
The complete form for the \fBawk\fR command line is as follows:
.DM
\fBawk\fR [\fB-y\^\fR] [\fB\-F\fIc\^\fR] [\fB\-f \fIprogfile\^\fR] [\fIprog\^\fR] [\fIfile1\^\fR] [\fIfile2\^\fR] ...
.DE
The following describes each element of the command line.
.IP \fB\-y\fR
Map
.I patterns
from lower case to both lower-case and upper-case letters.
For example, with this option the string
.B the
would match
.B the
or
.BR The .
.IP \fB\-F\fIc\fR
Set the field-separator character to the character
.IR c .
The field-separator character and its uses are described below.
.IP "\fB\-f \fIprogfile\fR"
Read the
.B awk
program from
.I progfile .
.IP \fIprog\fR
An
.B awk
program to execute.
If you do not use the
.B -f
option, you must enter
.BR awk 's
statements on its command line.
.sp \n(pDu
Note that if you include
.BR awk 's
program on its command line (instead of in a separate file),
you must enclose the program between apostrophes.
Otherwise, some of the
.B awk
statements will be modified by the shell before \fBawk\fR ever sees them,
which will make a mess of your program.
For example:
.DM
awk 'BEGIN {print "sample output file"}
{print NR, $0}'
.DE
(The following sections explain
what the stuff between the apostrophes means.)
However, if you include the statement within a file that you pass to
.B awk
via its \fB\-f\fR option, you must \fInot\fR enclose the statements within
parentheses; otherwise,
.B awk
will become very confused.
If you were to put the statements in the above program into an
.B awk
program file, they would appear as follows:
.DM
BEGIN {print "sample output file"}
{print NR, $0}
.DE
.IP "\fIfile1 file2 ...\fR"
The files whose text you wish to process.
For example, the command
.DM
awk '{print NR, $0}' text1
.DE
prints the contents of
.BR text1 ,
but precedes each line with a line number.
.sp \n(pDu
If you do not name an input file,
.B awk
processes what it reads from the standard input.
For example, the command
.DM
awk '{print NR, $0}'
.DE
reads what you type from the keyboard and echoes it preceded with a line
number.
To exit from this program, type
.BR <ctrl-D> .
.SH "Structure of an awk Program"
.PP
.II "awk^statement"
An
.B awk
program consists of one or more statements of the form:
.DS
\fIpattern \fB{ \fIaction \fB}\fR
.DE
Note that
.B awk
insists that the
action
be enclosed between braces, so that it can distinguish the action
from the pattern.
.PP
A program can contain as many statements
as you need to accomplish your purposes.
When
.B awk
reads a line of input,
it compares that line with the \fIpattern\fR in each statement.
Each time a line matches \fIpattern\fR,
\fBawk\fR performs the corresponding \fIaction\fR.
.B awk
then reads the next line of input.
.PP
A statement can specify an \fIaction\fR without a \fIpattern\fR.
In this case,
.B awk
performs the action on every line of input.
For example, the program
.DM
awk '{ print }' text1
.DE
prints every line of
.B text1
onto the standard output.
.PP
An \fBawk\fR program may also specify a pattern without an action.
In this case, when an input line matches the pattern,
\fBawk\fR prints it on the standard output.
For example, the command
.DM
awk 'NR > 0' table1
.DE
prints all of
.B table1
onto the standard output.
Note that you can use the same pattern in more than one statement.
Examples of this will be given below.
.PP
.BR awk 's
method of forming patterns uses
.I "regular expressions"
(also called
.IR patterns ),
like those used by the \*(CO commands
.BR sed ,
.BR ed ,
and
.BR egrep .
Likewise,
.BR awk 's
method of constructing actions is modelled after the C programming language.
If you are familiar with regular expressions and with C, you should have
no problem learning how to use
.BR awk .
However,
if you are not familiar with them, they will be explained in the following
sections.
.Sh "Records and Fields"
.PP
.II "awk^records"
.II "awk^fields"
\fBawk\fR divides its input into
.IR records .
It divides each record, in turn, into
.IR fields .
.II "awk^input-record separator"
.II "awk^input-field separator"
Records are separated by a character called
the
.IR "input-record separator" ;
likewise, fields are separated by the
.IR "input-field separator" .
.B awk
in effect conceives of its input as a table with an indefinite
number of columns.
.PP
The newline character is the default input-field separator,
so \fBawk\fR normally regards each input line as a separate record.
The space and the tab characters are
the default input-field separator,
so white space normally separates fields.
.PP
To address a field within a record, use the syntax \fB$\fIN\fR, where
.I N
is the number of the field within the current record.
The pattern \fB$0\fR addresses the entire record.
Examples of this will be given below.
.II "awk^output-record separator"
.II "awk^output-field separator"
In addition to input record and field separators,
\fBawk\fR provides output record and field separators,
which it prints between output records and fields.
The default output-field separator
is the newline character;
\fBawk\fR normally prints each output record as a separate line.
The space character is the default output-field separator.
.SH "Patterns"
.PP
This section describes how
.B awk
interprets the pattern section of a statement.
.Sh "Special Patterns"
.PP
.II "awk^special patterns"
.II "awk^pattern, special"
To begin,
.B awk
defines and sets a number of special \fIpattern\fRs.
You can use these patterns in your program for special purposes.
You can also redefine some of these patterns to suit your preferences.
The following describes the commonest such special
.IR patterns ,
and how they're used:
.II "awk^BEGIN"
.IP \fBBEGIN\fR
This pattern matches the beginning of the input file.
.B awk
executes all
.I actions
associated with this pattern before it begins to read input.
.II "awk^END"
.IP \fBEND\fR
This pattern matches the end of the input file.
.B awk
executes all
.I actions
associated with this pattern after it had read all of its input.
.II "awk^FILENAME"
.IP \fBFILENAME\fR
.B awk
sets this pattern to the name of the file
that it is currently reading.
Should you name more than one input file on the command line,
.B awk
resets this pattern as it reads each file in turn.
.II "awk^FS"
.IP \fBFS\fR
Input-field separator.
This pattern names the character that
.B awk
recognizes as the field separator for the records it reads.
.II "awk^NF"
.IP \fBNF\fR
This pattern gives the number of fields within the current record.
.II "awk^NR"
.IP \fBNR\fR
This pattern gives the number of the current record within the
input stream.
.II "awk^OFS"
.IP \fBOFS\fR
Output-field separator.
.B awk
sets this pattern to the character that it writes in its output to
separate one field from another.
.II "awk^ORS"
.IP \fBORS\fR
Output-record separator.
.B awk
sets this pattern to the character that it writes in its output to
separate one field from another.
.II "awk^RS"
.IP \fBRS\fR
Input-record separator.
.B awk
sets this pattern to the character by which it separates records
that it reads.
.Sh "Arithmetic Relational Expressions"
.II "awk^arithmetic operators"
.PP
An
.I operator
marks a task to be within an expression,
much as the `+' or `/' within an arithmetic expression indicates
that the numbers are to be, respectively, added or divided.
You can use \fBawk\fR's operators to:
.IP \(bu 0.3i
Compare a special pattern with a variable, a field, or a constant.
.IS \(bu
Assign a value to a variable or to a special pattern.
.IS \(bu
Dictate the relationship among two or more expressions.
.PP
The first type of operator to be discussed are
.IR "arithmetic relational operators" .
These compare the input text with an arithmetic value.
.B awk
recognizes the following arithmetic operators:
.DS
\fB<\fR Less than
\fB<=\fR Less than or equal to
\fB==\fR Equivalent
\fB!=\fR Not equal
\fB>=\fR Greater than or equal to
\fB>\fR Greater than
.DE
With these operators, you can compare a field with a constant;
compare one field with another;
or compare a special pattern with either a
field or a constant.
.PP
For example, the following
.B awk
program prints all of the years in which Babe Ruth hit more than
50 home runs:
.DM
awk '$3 >= 50' table1
.DE
(As you recall, column 3 in the file
.B table1
gives the number of home runs.)
The program prints the following on your screen:
.DM
1920 .376 54 158 137
1921 .378 59 177 171
1927 .356 60 158 164
1928 .323 54 163 142
.DE
The following program, however, shows the years in which Babe Ruth scored
more runs than he drove in:
.DM
awk '$4 > $5 { print $1 }' table1
.DE
Remember, field 4 in file
.B table1
gives the number of runs scored, and field 5 gives the number of
runs batted in.
You should see the following on your screen:
.DM
1920
1921
1923
1924
1928
.DE
In the above program, expression
.DM
{print $1}
.DE
defines the action to perform, as noted by the fact that expression is
enclosed between braces.
In this case, the program tells
.B awk
that if the input record matches the pattern, to print only the first
field.
However, to print both the season and the number of runs scored, use the
following program:
.DM
awk '$4 > $5 { print $1, $4 }' table1
.DE
This prints the following:
.DM
1920 158
1921 177
1923 151
1924 143
1928 163
.DE
Note that
.B $1
and
.B $4
are separated by a comma.
.II "awk^,"
The comma tells
.B awk
to print its default output-field separator between columns.
If we had left out the comma, the output would have appeared as follows:
.DM
1920158
1921177
1923151
1924143
1928163
.DE
As we noted above, the special pattern
.B OFS
gives the output-field separator.
.B awk
by default defines this special pattern to the space character.
If we wish to redefine the output-field separator,
we can use an operator, plus the special pattern
.BR BEGIN ,
as follows:
.DM
awk 'BEGIN { OFS = ":" }
$4 > $5 { print $1, $4 }' table1
.DE
This prints:
.DM
1920:158
1921:177
1923:151
1924:143
1928:163
.DE
The first statement
.DM
BEGIN { OFS = ":"}
.DE
tells
.B awk
to set the output-field separator (the special pattern \fBOFS\fR)
to `:' before it processes any input (as indicated by the special
pattern \fBBEGIN\fR).
.PP
Although we're getting a little ahead of ourselves, note that there's
no reason to print the fields in the order in which they appear in
the input record.
For example, if you wish to print the number of runs scored before the
season, use the command:
.DM
awk 'BEGIN { OFS = ":"}
$4 > $5 { print $4, $1 }' table1
.DE
This prints:
.DM
158:1920
177:1921
151:1923
143:1924
163:1928
.DE
As you recall, the special pattern
.B NR
gives the number of the current input record.
You can execute an action by comparing this pattern with a constant.
For example, the command
.DM
awk 'NR > 12' text1
.DE
prints:
.DM
For thy sweet love remember'd such wealth brings
That then I scorn to change my state with kings.
.DE
That is, the program prints every line after line 12 in the input file.
As you recall, a statement that has a pattern but no action
prints the entire record that matches the pattern.
.PP
As we saw with the special patterns, some patterns can be defined to
be numbers and others to be text.
If you compare a number with a string,
.B awk
by default makes a string comparison.
The following example shows how \fBawk\fR
compares one field to part of the alphabet:
.DM
awk '$1 <= "C"' text1
.DE
This program prints:
.DM
And trouble deaf heaven with my bootless cries,
And look upon myself, and curse my fate,
.DE
The statement
\fB$1 <= "C"\fR selected all records that begin
with an ASCII value less than or equal to that of the letter `C' (0x43) \(em
in this case, both lines that begin with `A' (0x41).
If we ran this example against
.BR table1 ,
it would print every record in the file.
This is because each record begins with the character `1' (0x31), which
matches the pattern \fB$1 <= "C"\fR.
.PP
Finally, you can use a numeric field plus a constant in a comparison
statement.
For example, the following program prints all of the seasons in which
Babe Ruth had at least 100 more runs batted in than home runs:
.DM
awk '$3 + 100 < $5 {print $1}' table1
.DE
This prints the following:
.DM
1921
1927
1929
.DE
.Sh "Boolean Combinations of Expressions"
.PP
.II "awk^Boolean operators"
.B awk
has a number of operators, called
.I Boolean
operators, that let you hook together several small expressions
into one large, complex expression.
.B awk
recognizes the following Boolean operators:
.DS
\fB||\fR Boolean OR (one expression or the other is true)
\fB&&\fR Boolean AND (both expressions are true)
\fB!\fR Boolean NOT (invert the value of an expression)
.DE
.II "Boole, George"
(The eponym ``Boolean'' comes from the English mathematician George Boole.)
In a Boolean expression,
.B awk
evaluates each sub-expression to see if it is true or false;
the relationship of sub-expressions (as set by the Boolean operator)
then determines whether the entire expression is true or false.
.PP
For example, the following program prints all seasons in which
Babe Ruth hit between 40 and 50 home runs:
.DM
awk '$3 >= 40 && $3 <= 50 { print $1, $3 }' table1
.DE
This prints the following:
.DM
1923 41
1924 46
1926 47
1929 46
.DE
In the above program,
.B awk
printed its output only if the subexpression \fB$3 >= 40\fR
was true \fIand\fR (\fB&&\fR) the subexpression \fB$3 <= 50\fR
was true.
.PP
The next example demonstrates the Boolean OR operator.
It prints all seasons for which Babe Ruth hit fewer
than 40 home runs or more than 50 home runs:
.DM
awk '$3 < 40 || $3 > 50 { print $1, $3}' table1
.DE
This example prints the following:
.DM
1920 54
1921 59
1922 35
1925 25
1927 60
1928 54
.DE
In this example,
.B awk
printed its output if the subexpression \fB$3 < 40\fR
was true \fIor\fR (\fB||\fR) the subexpression \fB$3 > 50\fR was true.
Note that the output would also printed if both subexpressions were true
(although in this case, this is impossible).
.PP
Finally, the Boolean operator `!' negates the truth-value of any expression.
For example, the expression \fB$1 = "And"\fR is true if the first field
in the current record equals ``And''; however, the expression
\fB$1 != "And"\fR is true if the first field does \fInot\fR equal ``And''.
For example, the program
.DM
awk '$1 != "And"' text1
.DE
prints:
.DM
When, in disgrace with fortune and men's eyes,
I all alone beweep my outcast state,
Wishing me like to one more rich in hope,
Featured like him, like him with friends possest,
Desiring this man's art and that man's scope,
With what I most enjoy contented least.
Yet in these thoughts myself almost despising,
Haply I think on thee - and then my state,
Like to the lark at break of day arising
From sullen earth, sings hymns at heaven's gate;
For thy sweet love remember'd such wealth brings
That then I scorn to change my state with kings.
.DE
These are the 12 lines from \fBtext1\fR that do not begin with ``And''.
.PP
Note that
.B awk
evaluates all operators from left to right unless sub-expressions are
grouped together with parentheses, as is described in the following section.
.Sh "Patterns"
.II "awk^patterns"
.PP
The previous examples have all matched strings or numbers against predefined
fields in each input record.
This is fine for manipulating tabular information, like our table of
Babe Ruth's batting statistics, but it is not terribly useful when
you are processing free text.
Free text is not organized into predefined columns, nor are you likely to
know which field (that is, which word) will contain the pattern you're seeking.
.PP
.II ed
.II sed
To help you manage free text,
.B awk
has a pattern-matching facility that resembles those of the editors
.B ed
and
.BR sed .
.PP
The most common way to search for a pattern is to enclose it between slashes.
For example, the program
.DM
awk '/and/' text1
.DE
prints every line in
.B text1
that contains the string ``and''.
.DM
When, in disgrace with fortune and men's eyes,
And look upon myself, and curse my fate,
Desiring this man's art and that man's scope,
Haply I think on thee - and then my state,
.DE
Note that ``and'' does not have to be a word by itself \(em it can be
a fragment within a word as well.
Note, too, that this pattern matches ``and'' but does not match ``And'' \(em
but it would if we were to use the \fB\-y\fR option on the \fBawk\fR command
line (described above).
.PP
You can use Boolean operators to search for more than one string at once.
For example, the program
.DM
awk '/and/ && /or/' text1
.DE
finds every line in \fBtext1\fR that contains both ``and'' and ``or''.
There is only one:
.DM
When, in disgrace with fortune and men's eyes,
.DE
Note that the ``or'' in this line is embedded in the word ``fortune''.
.PP
.B awk
can also scan for classes and types of characters.
To do so, enclose the characters within brackets and place the bracketed
characters between the slashes.
For example, the following program looks for every line in \fBtext1\fR
that contains a capital `A' through a capital `E':
.DM
awk '/[A-E]/' text1
.DE
This prints the following:
.DM
And trouble deaf heaven with my bootless cries,
And look upon myself, and curse my fate,
Desiring this man's art and that man's scope,
.DE
.II "awk^special characters"
In addition, you can use the following
special characters for further flexibility:
.DS
\fB[ ]\fR Class of characters
\fB( )\fR Grouping subexpressions
\fB|\fR Alternatives among expressions
\fB+\fR One or more occurrences of the expression
\fB?\fR Zero or more occurrences of the expression
\fB*\fR Zero, one, or more occurrences of the expression
\fB.\fR Any non-newline character
.DE
When adding a special character to a pattern, enclose the special
character as well as the rest of the pattern
within slashes.
.PP
To search for a string that contains one of
the special characters, you must precede the
character with a backslash.
For example, if you are looking for the string ``today?'',
use the following pattern:
.DM
/today\e?/
.DE
When you need to find an expression in a
particular field, not just anywhere in the
record, you can use one of these operators:
.DS
\fB~\fR Contains the data in question
\fB!~\fR Does not contain the data in question
.DE
For example, if you need to find the digit `9'
in the fourth field of file \fBtable1\fR, use the following
program:
.DM
awk '$4~/9/ {print $1, $4}' table1
.DE
This prints the following:
.DM
1922 94
1926 139
.DE
As you can see, the above program found every record with a `9' in its
fourth field, regardless of whether the `9' came at the beginning of the
field or its end.
.B awk
also recognizes two operators that let you set where a pattern is within
a field:
.DS
\fB^\fR Beginning of the record or field
\fB$\fR End of the record or field
.DE
For example, to find every record in \fBtable1\fR whose fourth field
\fIbegins\fR with a `9', run the following program:
.DM
awk '$4~/^9/ {print $1, $4}' table1
.DE
This prints:
.DM
1922 94
.DE
Finally, to negate a pattern use the operator \fB!~\fR.
For example, to print every record in
.B table1
whose fourth column does
.I not
begin with a `9', use the following program:
.DM
awk '$4!~/^9/ {print $1, $4}' table1
.DE
This prints:
.DM
1920 158
1921 177
1923 151
1924 143
1925 61
1926 139
1927 158
1928 163
1929 121
.DE
.Sh "Ranges of Patterns"
.PP
.II "awk^patterns, range"
You can tell \fBawk\fR to perform an action
on all records between two patterns.
For example, to print all records between the
.I patterns
\fB1925\fR and \fB1929\fR, inclusive,
enclose the strings in slashes and separate them with a comma,
then indicate the
.B print
action, as follows:
.DM
awk '/1925/,/1929/ { print }' table1
.DE
You can also use the special pattern
.B NR
(or \fIrecord number\^\fR) to name a range of record numbers.
For example, to print records 5 through 10 of file
.BR text1 ,
use the following program:
.DM
awk 'NR == 5, NR == 10 { print }' text1
.DE
.Sh "Resetting Separators"
.PP
.II "awk^separators, reset"
As noted above,
.B awk
recognizes certain characters by default to parse its input into
records and fields, and to separate its output into records and fields:
.II "awk^FS"
.IP \fBFS\fR
Input-field separator.
By default, this is one or more white-space characters (tabs or spaces).
.II "awk^OFS"
.IP \fBOFS\fR
Output-field separator.
By default, this is exactly one space character.
.II "awk^ORS"
.IP \fBORS\fR
Output-record separator.
By default, this is the newline character.
.II "awk^RS"
.IP \fBRS\fR
Input-record separator.
By default, this is the newline character.
.PP
By resetting any of these special patterns, you can change how
.B awk
parses its input or organizes its output.
Consider, for example, the command:
.DM
awk 'BEGIN {ORS = "|"}
/1920/,/1925/ {print $1, $5}' table1
.DE
This prints the following:
.DM
1920 137|1921 171|1922 99|1923 131|1924 121|1925 66|
.DE
As you can see, this prints the season and the number of runs batted
in for the 1920 through 1925 season.
However,
.B awk
uses the pipe character `|' instead of the newline character to separate
records.
If you wish to change the output-field separator as well as the
output-record separator, use the program:
.DM
awk 'BEGIN {ORS = "|" ; OFS = ":"}
/1920/,/1925/ {print $1, $5}' table1
.DE
This produces:
.DM
1920:137|1921:171|1922:99|1923:131|1924:121|1925:66|
.DE
.II "awk^;"
As you can see,
.B awk
has used the colon `:' instead of a white-space character to separate
one field from another.
.PP
Note, too, that the semicolon `;' character separates expressions
in the action portion of the statement associated with the
.B BEGIN
pattern.
This lets you associate more than one action with a given pattern, so you
do not have to repeat that pattern.
This is discussed at greater length below.
.PP
You can also change the input-record separator from the newline
character to something else that you prefer.
For example, the following program changes the input-record separator
from the newline to the comma:
.DM
awk 'BEGIN {RS = ","}
{print $0}' text1
.DE
This yields the following:
.DM
When
in disgrace with fortune and men's eyes
.sp \n(pDu
I all alone beweep my outcast state
.sp \n(pDu
And trouble deaf heaven with my bootless cries
.sp \n(pDu
And look upon myself
and curse my fate
.sp \n(pDu
Wishing me like to one more rich in hope
.sp \n(pDu
Featured like him
like him with friends possest
.sp \n(pDu
Desiring this man's art and that man's scope
.sp \n(pDu
With what I most enjoy contented least.
Yet in these thoughts myself almost despising
.sp \n(pDu
Haply I think on thee - and then my state
.sp \n(pDu
Like to the lark at break of day arising
From sullen earth
sings hymns at heaven's gate;
For thy sweet love remember'd such wealth brings
That then I scorn to change my state with kings.
.DE
The blank lines resulted from a comma's occurring at the end of a line.
.PP
.II "awk^="
Note that by specifying the null string \fB(RS="")\fR,
you can make two consecutive newlines the record separator.
Note, too, that only one character can be the input-record separator.
If you try to reset this separator to a string,
.B awk
uses the first character in the string as the separator, and ignores the rest.
.PP
You can change the input-field separator by redefining
.BR FS .
The default
.B FS
is \fB<space>\et\fR exactly and in that order
(where \fB<space>\fR is the space character).
In this case,
.B awk
uses its ``white-space rule,'' in which
.B awk
treats any sequence of spaces and tabs as a single separator.
This is the default rule for
.BR FS .
If you set
.B FS
to anything else, including \fB\et<space>\fR, then each separator is separate.
For example, the following program changes the input-field separator
to the comma and prints the first such field it finds in each line from file
.BR text1 :
.DM
awk 'BEGIN {FS = ","}
{print $1}' text1
.DE
This produces:
.DM
When
I all alone beweep my outcast state
And trouble deaf heaven with my bootless cries
And look upon myself
Wishing me like to one more rich in hope
Featured like him
Desiring this man's art and that man's scope
With what I most enjoy contented least.
Yet in these thoughts myself almost despising
Haply I think on thee - and then my state
Like to the lark at break of day arising
From sullen earth
For thy sweet love remember'd such wealth brings
That then I scorn to change my state with kings.
.DE
As you can see, this program prints text up to the first comma in
each line.
.B awk
throws away the comma itself, because the input-field separator is not
explicitly printed.
.PP
You can define several characters to be input-field separators
simultaneously.
When you specify several characters within
quotation marks, each character becomes a field
separator, and all separators have equal precedence.
For example, you can specify the letters `i', `j', and `k' to be
input-field separators.
The following program does this, and prints the first field so
defined from each record in file
.BR text1 :
.DM
awk 'BEGIN {FS = "ijk"}
{print $1}' text1
.DE
This prints:
.DM
When,
I all alone beweep my outcast state,
And trouble deaf heaven w
And loo
W
Featured l
Des
W
Yet
Haply I th
L
From sullen earth, s
For thy sweet love remember'd such wealth br
That then I scorn to change my state w
.DE
Note that if you set the input-record separator to a null string, you
can use the newline character as the input-field separator.
This is a handy way to concatenate clusters of lines into records that
you can then manipulate further.
.PP
One last point about the
.B FS
separator.
If the white-space rule is not invoked and
an assignment is made to a nonexistent field,
.B awk
can add the proper number of field separators.
For example if
\fBFS=":"\fR and the input line is \fBa:b\fR, then
the command \fB$5 = "e"\fR produces \fBa:b:::e\fR.
If the white-space rule were in effect,
.B awk
would add spaces as if each space were a separator,
and print a warning message.
In short, it would try to produce the sanest result from the error.
.PP
Finally, the variable
.B NR
gives the number of the current record.
The next example prints the total number of records in file
.BR text1 :
.DM
awk 'END {print NR}' text1
.DE
The output is
.DM
14
.DE
which is to be expected, since
.B text1
is a sonnet.
.SH "Actions"
.PP
.II "awk^actions"
The previous section described how to construct a
.I pattern
for
.BR awk .
For each pattern, there must be a corresponding
.IR action .
So far, the only action shown has been to print output.
However,
.B awk
can perform many varieties of actions.
In addition to printing, \fBawk\fR can:
.DS
\(bu Execute built-in functions
\(bu Redirect output
\(bu Assign variables
\(bu Use fields as variables
\(bu Define arrays
\(bu Use control statements
.DE
These actions are discussed in detail in the following sections.
.PP
As noted above, each
.B awk
statement must have an action.
If a statement does not include an action,
.B awk
assumes that the action is
.BR {print} .
.PP
Within each statement,
.B awk
distinguishes an action from its corresponding pattern by the fact
that the action is enclosed within braces.
Note that the action section of a statement may include several
individual actions;
however, each action must be separated from the others by
semicolons `;' or newlines.
.PP
.II "Free Software Foundation"
Some forms of
.BR awk ,
such as that provided by the Free Software Foundation (FSF),
allow user-defined functions.
The FSF version of
.B awk
is available from the MWC BBS as well as via COHware.
Note that your system must have at least two megabytes of RAM
to run the FSF version of
.BR awk .
.Sh "awk Functions"
.PP
.II "awk^functions"
\fBawk\fR includes the following functions with which you can
manipulate input.
You can assign a function to any variable or use it in a pattern.
The following lists
.BR awk 's
functions.
Note that an
.I argument
can be a variable, a field, a constant, or an expression:
.II "awk^abs"
.IP \fBabs(\fIargument\^\fB)\fR
Return the absolute value of \fIargument\fR.
.II "awk^exp"
.IP \fBexp(\fIargument\^\fB)\fR
.II "Euler, Leonhart"
Return Euler's number
.I e
(2.178...) to the power of \fIargument\fR.
.II "awk^index"
.IP \fBindex(\fIstring1\^\fB,\fIstring2\^\fB)\fR
Return the position of \fIstring2\fR within
\fIstring1\fR.
If \fIs2\fR does not occur in \fIs1\fR,
.B awk
returns zero.
This
.B awk
function resembles the \*(CO C function
.BR index() .
.II "awk^int"
.IP \fBint(\fIargument\^\fB)\fR
Return the integer portion of \fIargument\fR.
.II "awk^length"
.IP \fBlength\fR
Return the length, in bytes, of the current record.
.IP \fBlength(\fIargument\^\fB)\fR
Return the length, in bytes, of \fIargument\fR.
.II "awk^log"
.IP \fBlog(\fIargument\^\fB)\fR
Return the natural logarithm of \fIargument\fR.
.II "awk^print"
.IP "\fBprint(\fIargument1 argument2 ... argumentN\^\fB)\fR"
Concatenate and print
.I argument1
through
.IR argumentN .
.IP "\fBprint(\fIargument1\^\fB,\fIargument2\^\fB, \fI... argumentN\^\fB)\fR"
Print
.I argument1
through
.IR argumentN .
Separate each
.I argument
with the
.B OFS
character.
.II "awk^printf"
.IP "\fBprintf(\fIf\^\fB, \fIargument1\^\fB, \fI... argumentN\^\fB)\fR"
Format and print strings \fIargument1\fR through \fIargument\fR in the
manner set by the formatting string
.IR f ,
which can use \fBprintf()\fR-style formatting codes.
.IP "\fBsplit(\fIstr\^\fB, \fIarray\^\fB, \fIfs\^\fB)\fR"
Divide the string \fIstr\fR into fields associated with
\fIarray\fR.
The fields are separated by character \fIfs\fR or the default field separator.
.II "awk^sprintf"
.IP "\fBsprintf(\fIf\^\fB, \fIe1\^\fB, \fIe2\^\fB)\fR"
Format strings \fIe1\fR and \fIe2\fR in the
manner set by the formatting string
.IR f ,
and return the formatted string.
.I f
can use \fBprintf()\fR-style formatting codes.
.II "awk^sqrt"
.IP "\fBsqrt(\fIargument\^\fB)\fR"
Return the square root of \fIargument\fR.
.II "awk^substr"
.IP "\fBsubstr(\fIstr\^\fB, \fIbeg\^\fB, \fIlen\^\fB)\fR"
Scan string \fIstr\fR for position \fIbeg\fR;
if found, print the next \fIlen\fR characters.
If \fIlen\fR is not included, print from from \fIbeg\fR
to the end of the record.
.Sh "Printing with awk"
.PP
.II "awk^printing"
Printing is the commonest task you will perform in your
.B awk
programs.
.BR awk 's
printing functions
.B printf
and
.B sprintf
resemble the C functions
.B printf()
and
.BR sprintf() ;
however, there are enough differences to make a close reading of this
section worthwhile.
.PP
.B print
is the commonest, and simplest, \fBawk\fR function.
When used without any arguments,
.B print
prints all of the current record.
The following example prints every record in file
.BR text1 :
.DM
awk '{print}' text1
.DE
You can print fields in any order you desire.
For example, the following program reverses the order of the season
and batting-average columns from file
.BR table1 :
.B
.DM
awk '/1920/,/1925/ { print $2,$1 }' table1
.DE
The output is as follows:
.DM
.376 1920
.378 1921
.315 1922
.393 1923
.378 1924
.290 1925
.DE
Because the field names are separated by a comma,
\fBawk\fR inserts the
.B OFS
between the fields when it prints them.
If you do not separate field names with commas,
\fBawk\fR concatenates the fields when it printing them.
For example, the program
.DM
awk '/1920/,/1925/ { print $2 $1 }' table1
.DE
produces:
.DM
.3761920
.3781921
.3151922
.3931923
.3781924
.2901925
.DE
When you use \fBawk\fR to process a column of text or numbers,
you may wish to specify a consistent format for the output.
The statement for formatting a column of numbers follows this
.IR pattern :
.DM
{printf "format", expression}
.DE
where
.I format
prescribes how to format the output, and
.I expression
specifies the fields for \fBawk\fR to print.
.PP
The following table names and defines
the most commonly used of \fBawk\fR's format control characters.
Each character must be preceded by a percent sign `%'
and a number in the form of
.I n
or
.IR n.m .
.DS
\fB%\fIn\fBd\fR Decimal number
\fB%\fIn\^\fB.\fIm\^\fBf\fR Floating-point number
\fB%\fIn\^\fB.\fIm\^\fBs\fR String
\fB%%\fR Literal `%' character
.DE
When you use the
.B printf()
function, you must define the output-record separator within the
format string.
The following codes are available:
.DS
\fB\en\fR Newline
\fB\et\fR Tab
\fB\ef\fR Form feed
\fB\er\fR Carriage return
\fB\e"\fR Quotation mark
.DE
For example, the following program prints Babe Ruth's RBIs unformatted:
.DM
awk '/1920/,/1925/ { print $1, $5 }' table1
.DE
The output appears as follows:
.DM
1920 137
1921 171
1922 99
1923 131
1924 121
1925 66
.DE
As you can see,
.B awk
right-justifies its output by default.
To left-justify the second column, use the following program:
.DM
awk '/1920/,/1925/ { printf("%d %3d\en", $1, $5) }' table1
.DE
The output is as follows:
.DM
1920 137
1921 171
1922 99
1923 131
1924 121
1925 66
.DE
Note that the `3' in the string
.B %3d
specifies the minimum number of characters to be displayed.
If the size of the number exceeds the space allotted to it,
.B awk
prints the entire number.
A different rule applies when printing strings, as will be
shown below.
.PP
To print a floating-point number, you must specify the minimum number of
digits you wish to appear on either side of the decimal point.
For example, the following program gives the average number of RBIs
Babe Ruth hit in each game between 1920 and 1925:
.DM
awk '/1920/,/1925/ { printf("%d %1.2f\en", $1, $5/154.0) }' table1
.DE
This prints the following:
.DM
1920 0.89
1921 1.11
1922 0.64
1923 0.85
1924 0.79
1925 0.43
.DE
Note the following points about the above program:
.IP \(bu 0.3i
To get the average number of runs batted in, we had to divide the
total number of RBIs in a season by the number of games in a season
(which in the 1920s was 154).
.B awk
permits you to use a constant to perform arithmetic on a field; this will
be discussed in more detail below.
.IP \(bu
To force
.B awk
to produce a floating-point number, the constant had to be in the format
of a floating-point number, i.e., ``154.0'' instead of ``154''.
Dividing an integer by another integer would not have produced what we wanted.
.PP
.B awk
rounds its output to match sensitivity you've requested \(em that is,
the number of digits to the right of the decimal point.
To see how sensitivity affects output, run the following program:
.DM
awk '/1920/,/1925/{printf("%1.2f %1.3f %1.4f\en",$5/154.0,$5/154.0,$5/154.0)}'\e
table1
.DE
This prints the following:
.DM
0.89 0.890 0.8896
1.11 1.110 1.1104
0.64 0.643 0.6429
0.85 0.851 0.8506
0.79 0.786 0.7857
0.43 0.429 0.4286
.DE
As an aside, the above example also shows that
you can break \fBawk\fR's command line across more than one
line using a backslash `\e' at the end of every line but the last.
Note, however, that you
.I cannot
break an
.B awk
statement across more than one line, or
.B awk
will complain about a syntax error.
.PP
One last example of floating-point numbers prints Babe Ruth's
ratio of runs scored to runs batted in between 1920 and 1925:
.DM
awk '/1920/,/1925/{x = ($5*1.0) ; printf("%1.3f\en", $4/x)}' table1
.DE
This produces the following:
.DM
1.153
1.035
0.949
1.153
1.182
0.924
.DE
The expression
.B "x = ($5*1.0)"
was needed to turn field 5 (the divisor) into a floating-point number,
so we could obtain the decimal fraction that we wanted.
This is discussed further below, when we discuss how to manipulate
constants.
.PP
The function
.B sprintf()
also formats expressions; however, instead of printing
its output, it returns it for assignment to a variable.
For example, you could rewrite the previous example program to replace
the multiplication operation with a call to
.BR sprintf() :
.DM
awk '/1920/,/1925/{x = sprintf("%3.1f", $5)
printf("%1.3f\en", $4/x)}' table1
.DE
The output is the same as that shown above.
.PP
The
.B %s
formatting string can be used to align text in fields.
The digit to the left of the period gives the width of the field;
that to the right of the period gives the number of characters to write
into the field.
Note that if input is larger than the number of characters allotted to it,
.B awk
truncates the input.
For example, the following program aligns on seven-character fields
some words from file
.BR text1 :
.DM
awk '{x=sprintf("%7.5s %7.5s %7.5s %7.5s", $1, $2, $3, $4)
print x}' text1
.DE
The output is as follows:
.DM
When, in disgr with
I all alone bewee
And troub deaf heave
And look upon mysel
Wishi me like to
Featu like him, like
Desir this man's art
With what I most
Yet in these thoug
Haply I think on
Like to the lark
From sulle earth sings
For thy sweet love
That then I scorn
.DE
Note that fields (words) longer than five characters are truncated;
and every word is right-justified on a seven-character field.
.Sh "Redirecting Output"
.PP
.II "awk^redirecting output"
In addition to printing to the standard output,
.B awk
can redirect the output of an action into a file, or append it onto
an existing file.
With this feature, you can extract information from
a given file and construct new documents.
The following example shows an easy way to sift Babe Ruth's statistics
into four separate files, for further processing:
.DM
awk '{ print $1, $2 > "average"
print $1, $3 > "home.runs"
print $1, $4 > "runs.scored"
print $1, $5 > "rbi"}' table1
.DE
Note like as under the shell, the
operator `>' creates the named file if it does not exist,
or replaces its contents if it does.
To append
.BR awk 's
onto the end of an existing file, use the operator `>>'.
.PP
.B awk
can also pipe the output of an action to another program.
As under the shell, the operator `|' pipes the output of one
process into another process.
For example, if it is vital for user
.B fred
to know Babe Ruth's batting
average for 1925, you can mail it to him with the following command:
.DM
awk '/1925/ {print $1, $2 | "mail fred"}' table1
.DE
.Sh "Assignment of Variables"
.PP
.II "awk^assigning variables"
A number of the previous examples assign values to variables.
.B awk
lets you create variables, perform arithmetic upon them,
and otherwise work with them.
.PP
An
.B awk
variable can be a string or a number,
depending upon the context.
Unlike C,
.B awk
does not require that you declare a variable.
By default, variables are set to the null
string (numeric value zero) on start-up of the \fBawk\fR program.
To set the variable
.B x
to the numeric value one, you can use the assignment operator `=':
.DM
x = 1
.DE
To set x to the string
.B ted ,
also use the assignment operator:
.DM
x = "ted"
.DE
When the context demands it, \fBawk\fR converts strings to numbers or numbers
to strings.
For example, the statement
.DM
x = "3"
.DE
initializes to
.B x
to the string ``3''.
When an expression contains an
arithmetic operator such as the `-',
\fBawk\fR interprets the expression as numeric.
(Alphabetic strings evaluate to zero.)
Therefore, the expression
.DM
x = "3" - "1"
.DE
assigns the numeric value two to variable
.B x ,
not the string ``2''.
.PP
When the operator is included within the
quotation marks, \fBawk\fR treats the operator as a
character in the string.
In the following example
.DM
x = "3 - 1"
.DE
initializes
.B x
to the string ``3 - 1''.
.PP
A number of examples in the previous section showed you how to perform
arithmetic on fields.
.II "awk^arithmetic operators"
The following table gives \fBawk\fR's arithmetic operators:
.DS
\fB+\fR Addition
\fB-\fR Subtraction
\fB*\fR Multiplication
\fB/\fR Division
\fB%\fR Modulus
\fB++\fR Increment
\fB--\fR Decrement
\fB+=\fR Add and assign value
\fB-=\fR Subtract and assign value
\fB*=\fR Multiply and assign value
\fB/=\fR Divide and assign value
\fB%=\fR Divide modulo and assign value
.DE
Variables are often used with increment operators.
For example, the following program computes the average number of home
runs Babe Ruth hit each season during the 1920s:
.DM
awk ' { x += $3 }
END { y = (NR * 1.0)
printf("Average for %d years: %2.3f.\en", NR, x/y) }' table1
.DE
The output is:
.DM
Average for 10 years: 46.700.
.DE
.Sh "Field Variables"
.PP
.II "awk^field variables"
\fBawk\fR lets fields receive assignments, be used in arithmetic,
and be manipulated in string operations.
One task that has not yet been demonstrated is using
a variable to address a field.
For example, the following program prints the
.BR NR th
field (word) from the first seven lines in file
.BR text1 :
.DM
awk 'NR < 8 {print NR, $(NR)}' text1
.DE
The output is:
.DM
1 When,
2 all
3 deaf
4 myself,
5 one
6 with
7 man's
.DE
.Sh "Control Statements"
.PP
.II "awk^control statements"
\fBawk\fR has seven defined control statements.
This section explains them and gives examples of their use.
.II "awk^if"
.II "awk^else"
.IP "\fBif (\fIcondition\^\fB) \fIaction1 \fB[else\fI action2 \fB]\fR"
If \fIcondition\fR is true, then execute
.IR action1 .
If the optional
.B else
clause is present and
.I condition
is false, then
execute
.IR action2 .
.sp \n(pDu
The following program keeps running totals of Babe Ruth's RBIs, for both
the years where his runs scored exceeded his RBIs and the years where they
did not:
.DM
awk '{ if ( $4 > $5 )
gyear++
else
lyear++
}
END { printf("Scored exceed RBIs: %d years.\n", gyear)
printf("Scored not exceed RBIs: %d years.\n", lyear)
}' table1
.DE
This produces:
.DM
Scored exceed RBIs: 5 years.
Scored not exceed RBIs: 5 years.
.DE
Note that if more than one action is associated with an
.B if
or
.B else
statement, you must enclose the statements between braces.
If you use braces with both the
.B if
and
.B else
statements, note that the beginning and closing braces
.I must
appear on the same line as the
.B else
statement.
For example:
.DM
if (expr) {
stuff
stuff
} else {
stuff
stuff
}
.DE
.II "awk^while"
.IS "\fBwhile (\fIcondition\^\fB) \fIaction\fR"
The
.B while
statement executes
.I action
as long as
.I condition
is true.
For example, the following program counts the number of times the word
.B the
appears in file
.BR text1 .
The
.B while
loop uses a variable to examine every word in every line:
.DM
awk ' { i = 1
while (i <= NF ) {
if ($i == "the") j++
i++
}
}
END { printf ("The word \e"the\e" occurs %d times.\en", j) }' text1
.DE
.II "Shakespeare, William"
The result, as follows, shows Shakespeare's economy of language:
.DM
The word "the" occurs 1 times.
.DE
By the way, note that if a control statement has more than one statement
in its action section,
enclose the action section between braces.
If you do not,
.B awk
will behave erratically or exit with a syntax error.
.II "awk^for"
.IP "\fBfor( \fIinitial\fB ; \fIend\fB ; \fIiteration\fB ) \fIaction\fR"
.IS "\fBfor( \fIvariable\fB in \fIarray\fB ) \fIaction\fR"
.BR awk 's
.B for
statement closely resembles the
.B for
statement in the C language.
The statement
.I initial
defines actions to be performed before the loop begins; this is usually
used to initialize variables, especially counters.
The statement
.I end
defines when the loop is to end.
The statement
.I iteration
defines one or more actions that are performed on every iteration of the
loop; usually this is used to increment counters.
Finally,
.I action
can be one or more statements that are executed on every iteration of
the loop.
.I action
need not be present, in which case only the action defined in the
.I iteration
portion of the
.B for
statement is executed.
.B for
is in fact just an elaboration of the
.B while
statement, but adjusted to make it a little easier to use.
The following example writes the previous example, but replaces the
.B while
loop with a
.B for
mechanism:
.DM
awk ' { for (i = 1 ; i <= NF ; i++)
if ($i == "the") j++
}
END { printf ("The word \e"the\e" occurs %d times.\en", j) }' text1
.DE
The output is the same as the previous example, but the syntax is neater
and easier to read.
.sp \n(pDu
The second form of the
.B for
loop examines the contents of an array.
It is described in the following section, which introduces arrays.
.IP \fBbreak\fR
.II "awk^break"
The statement
.B break
immediately interrupts a
.B while
or
.B for
loop.
For example, the following program is the same as the previous example,
but counts only the first occurrence of the word
.B the
in each line of
.BR text1 .
Thus, it counts the number of lines in
.B text1
that contain
.BR the :
.DM
awk '{ for (i = 1 ; i <= NF ; i++) {
if ($i == "the") {
j++
break
}
}
}
END {printf ("The word \e"the\e" occurs in %d lines.\en", j)}' text1
.DE
.II "awk^continue"
.IS \fBcontinue\fR
The statement
.B continue
immediately begins the next iteration of the nearest
.B while
or
.B for
loop.
For example, the following program prints all of Babe Ruth's statistics \(em
runs scored, runs batted, and home runs \(em in which he had more than 59
in one year:
.DM
awk ' { for (i = 3 ; i <= NF ; i++)
if ($i <= 59)
continue
else
printf("%d, column %d: %d\en", $1, i, $i)
} ' table1
.DE
This produces the following:
.DM
1920, column 4: 158
1920, column 5: 137
1921, column 4: 177
1921, column 5: 171
1922, column 4: 94
1922, column 5: 99
...
.DE
.II "awk^next"
.IS \fBnext\fR
The statement
.B next
forces
.B awk
abort the processing of the current record and
skip to the next input record.
Processing of the new input record begins with the first pattern, just as
if the processing of the previous record had concluded normally.
To demonstrate this, the following program skips all records in file
.B text1
that have an odd number of fields (words):
.DM
awk ' { if (NF % 2 == 0) next }
{ print $0 } ' text1
.DE
This produces:
.DM
I all alone beweep my outcast state,
Wishing me like to one more rich in hope,
With what I most enjoy contented least.
Yet in these thoughts myself almost despising,
Like to the lark at break of day arising
.DE
.II "awk^exit"
.IS \fBexit\fR
Finally, the control statement
.B exit
forces the \fBawk\fP program to skip all remaining input and
execute the \fIactions\fR at the \fBEND\fR pattern, if any.
For example, the following program prints the year in which Babe Ruth
hit his 300th home run:
.DM
awk ' { i = $1 }
(j += $3) >= 300 { exit }
END {print "Babe Ruth hit his 300th homer in", i "."}' table1
.DE
This produces:
.DM
Babe Ruth hit his 300th homer in 1926.
.DE
.SH "Arrays"
.PP
.II "awk^arrays"
.B awk
has a powerful feature for managing arrays.
Unlike C,
.B awk
automatically manages the size of an array, so you do not have to declare
the array's size ahead of time.
Also, unlike C,
.B awk
lets you address each element within an array by a label, not just by its
offset within the array.
This lets you generate arrays ``on the fly,'' which can be very useful
in transforming many varieties of data.
.PP
To declare an array, simply name it within a statement.
.B awk
recognizes as an array every variable that is followed by brackets `[\ ]'.
To initialize a row within an array, you must define its value and name its
label.
A label can be either a number or a string.
A value, too, can be a number or a string; if the value is a number, then
you can perform arithmetic upon it, as will be shown in a following example.
.Sh "Initializing an Array"
.PP
To demonstrate how an array works, use the line editor
.B ed
to add a line of text to the beginning of file
.BR table1 .
Type the following; please note that the token
.B <tab>
means that you should type a tab character:
.DM
ed table1
1i
Year\fB<tab>\fPBA\fB<tab>\fPHRs\fB<tab>\fPScored\fB<tab>\fPRBIs
.
wq
.DE
This change writes a header into \fBtable1\fR that names each column.
Now, we can read these labels into an array and use them to
describe Babe Ruth's statistics.
For example, the following prints a summary of Babe Ruth's statistics for
the year 1926:
.DM
awk ' NR == 1 { for (i=1; i <= NF; i++) header [i] = $i }
$1 == 1926 {
for (i=1; i <= NF; i++)
print header[i] ":\et", $i
} ' table1
.DE
This produces:
.DM
.ta 0.5i 1.5i
Year: 1926
BA: .372
HRs: 47
Scored: 139
RBIs: 145
.DE
The statement
.DM
NR == 1 { for (i=1; i <= NF; i++) header [i] = $i }
.DE
reads the first line in
.BR table1 ,
which contains the column headers, and uses the headers to initialize the array
.BR header .
Each row is labeled with the contents of the variable
.BR i .
.PP
The loop
.DM
for (i=1; i <= NF; i++)
print header[i] ":\et", $i
.DE
prints the contents of
.BR header .
Because we labeled each row within
.B header
with a number, we can use a numeric loop to read its contents.
.Sh "The for() Statement With Arrays"
.PP
In the previous example, each element in the array was labeled with a number.
This permitted us to read the array with an ordinary
.B for
statement, which sets and increments a numeric variable.
However, the rows within an array can be labeled with strings, instead of
numbers.
To read the contents of such an array, you must use a special form of the
.BR for
statement, as follows:
.DS
\fBfor ( \fIoffset \fBin \fIarray \fB)\fR
.DE
.I array
names the array in question.
.I offset
is a variable that you name at the time of contructing the
.B for
statement.
You can use the value of
.I offset
in any subordinate printing actions.
.PP
The following program demonstrates this new form of
.BR for ,
and (incidentally) to demonstrate the power of
.BR awk 's
array-handling feature.
It builds an array of each unique word in the file
.BR text1 ,
and notes the number of times that word occurs within the file:
.DM
awk ' { for (i = 1 ; i <= NF ; i++)
words [$i]++ }
END { for (entry in words)
print entry ":", words[entry] }' text1 | sort
.DE
This prints:
.DM
-: 1
And: 2
Desiring: 1
Featured: 1
For: 1
From: 1
Haply: 1
I: 4
Like: 1
That: 1
When,: 1
Wishing: 1
With: 1
Yet: 1
all: 1
almost: 1
...
.DE
As you can imagine, a similar program in C would require many more
lines of code.
However, a few features of this program are worth noting.
.PP
First, the expression
.DM
{ for (i = 1 ; i <= NF ; i++)
words [$i]++ }
.DE
declares the array
.B words .
Every time
.B awk
encounters a new field (word), it automatically adds another entry to the
array, and labels that entry with the word.
No work on your part is needed for this to happen.
.II "awk^++"
The `++' operator increments the value of the appropriate entry within
.BR words .
Because we did not initialize the entry, it implicitly contains a number.
.PP
The expression
.DM
{ for (entry in words)
print entry ":", words[entry] }
.DE
walks through the array
.BR words .
.B awk
initializes the variable
.B entry
to the label for each row in
.BR words ;
the
.B print
statement then prints
.B entry
and the contents of that row in the array \(em in this case, the number of
times the row appears in our input file.
.PP
Finally, we piped the output of this program to the command
.B sort
to print the words in alphabetical order.
.SH "For More Information"
.PP
This tutorial just gives a brief introduction to the power of
.BR awk .
To explore the language in depth, see \fIsed & awk\fR by Dale Dougherty
(Sebastopol, Calif, O'Reilly & Associates, Inc., 1985).
This book, however, describes a more complex version of
.B awk
than that provided with \*(CO.
.PP
The Lexicon's article on \fBawk\fR gives a quick summary of
its features and options.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aaa.R
\name{as_cli_string}
\alias{as_cli_string}
\alias{as_cli_string.numeric}
\alias{as_cli_string.character}
\alias{as_cli_string.logical}
\title{Convert an object to a character string for the command line}
\usage{
as_cli_string(x, ...)
\method{as_cli_string}{numeric}(x, ...)
\method{as_cli_string}{character}(x, ...)
\method{as_cli_string}{logical}(x, ...)
}
\arguments{
\item{x}{object}
\item{...}{other arguments ignored}
}
\value{
a representation of the object for the command line.
}
\description{
Convert an object to a character string for the command line
}
| /man/as_cli_string.Rd | permissive | coolbutuseless/btnvips | R | false | true | 653 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aaa.R
\name{as_cli_string}
\alias{as_cli_string}
\alias{as_cli_string.numeric}
\alias{as_cli_string.character}
\alias{as_cli_string.logical}
\title{Convert an object to a character string for the command line}
\usage{
as_cli_string(x, ...)
\method{as_cli_string}{numeric}(x, ...)
\method{as_cli_string}{character}(x, ...)
\method{as_cli_string}{logical}(x, ...)
}
\arguments{
\item{x}{object}
\item{...}{other arguments ignored}
}
\value{
a representation of the object for the command line.
}
\description{
Convert an object to a character string for the command line
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NetworkFeatureCalculation.R
\name{BetFeature}
\alias{BetFeature}
\title{Compute betweenness centrality}
\usage{
BetFeature(Net, edge_data)
}
\arguments{
\item{Net}{a graph object representing the input network}
\item{edge_data}{a dataframe specifying the edge information}
}
\value{
a vector of betweenness centrality values
}
\description{
This function computes the betweenness centrality for a list of candidate snps
}
| /man/BetFeature.Rd | no_license | tanlabcode/arvin | R | false | true | 501 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NetworkFeatureCalculation.R
\name{BetFeature}
\alias{BetFeature}
\title{Compute betweenness centrality}
\usage{
BetFeature(Net, edge_data)
}
\arguments{
\item{Net}{a graph object representing the input network}
\item{edge_data}{a dataframe specifying the edge information}
}
\value{
a vector of betweenness centrality values
}
\description{
This function computes the betweenness centrality for a list of candidate snps
}
|
#parameter for the graph to be made. Here I specify the margins, where I want the tics, and how I would like the axis labels arranged. For more info type ?par into the console
par(mar=c(5, 4, 4, 9) + 0.1,tck=0.025, las=1)
#just setting up the line plot. I've definined my w adn y limits, as well as given it a main title. You can also designate the ylabel (ylab). For multiple plots you want the first axis to be drawn so axes=T. cex defines the font size of the graph. For posters and presentations make things at least cex=2 so that people can see them
plot(vuf.mean.diff.area*.35*1000 ~ Year, data = vuf.allometric.full, type="l", lwd=2, xlim= c(2000,2015), ylim= c(0,900), main = "VUF Incremental Biomass vs. VCM GPP", ylab="Mean biomass increment (gC*m^-2)", axes=T, cex=2)
#making a new plot
par(new=T)
#plotting new plot, but now drawing the axes (axes=F), as I will add them to the right side of hte graph later. Making the xlim and ylim align so that they can be read on the same scale
plot(vuf.mean.diff.area*.35*1000 ~ Year, data = vuf.allometric.full, xlim= c(2000,2015), ylim= c(0,900), pch=19, axes=F, ylab="", xlab="", cex=2)
#here you can define what you would like the axes to read
axis(1:2, at = c(1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010, 2020))
#turning off axes on the top
axis(3, labels=F)
#adding a horizontal dashed line to the graph
abline(h=mean(vuf.allometric.full$vuf.mean.diff.area*0.35*1000, na.rm=T), col = "red", lty="dashed", lwd=2)
#new graph
par(new=T)
#adding another plot
plot(vuf.all.count ~ Year, data = vuf.allometric.full, type="l", xlim= c(2000,2015), axes=F, col= "Forest Green", xlab="", ylab="")
#adding an axis on the right side of the graph
axis(4, line =5)
#mtext will let you add a label on the right side, 4 = right side of grapgh
mtext("Sample Depth (Tree)",4, line=6.5, col="Forest Green", las=3)
| /Ross' plot tips.R | no_license | vlscaven/Snow | R | false | false | 1,879 | r | #parameter for the graph to be made. Here I specify the margins, where I want the tics, and how I would like the axis labels arranged. For more info type ?par into the console
par(mar=c(5, 4, 4, 9) + 0.1,tck=0.025, las=1)
#just setting up the line plot. I've definined my w adn y limits, as well as given it a main title. You can also designate the ylabel (ylab). For multiple plots you want the first axis to be drawn so axes=T. cex defines the font size of the graph. For posters and presentations make things at least cex=2 so that people can see them
plot(vuf.mean.diff.area*.35*1000 ~ Year, data = vuf.allometric.full, type="l", lwd=2, xlim= c(2000,2015), ylim= c(0,900), main = "VUF Incremental Biomass vs. VCM GPP", ylab="Mean biomass increment (gC*m^-2)", axes=T, cex=2)
#making a new plot
par(new=T)
#plotting new plot, but now drawing the axes (axes=F), as I will add them to the right side of hte graph later. Making the xlim and ylim align so that they can be read on the same scale
plot(vuf.mean.diff.area*.35*1000 ~ Year, data = vuf.allometric.full, xlim= c(2000,2015), ylim= c(0,900), pch=19, axes=F, ylab="", xlab="", cex=2)
#here you can define what you would like the axes to read
axis(1:2, at = c(1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010, 2020))
#turning off axes on the top
axis(3, labels=F)
#adding a horizontal dashed line to the graph
abline(h=mean(vuf.allometric.full$vuf.mean.diff.area*0.35*1000, na.rm=T), col = "red", lty="dashed", lwd=2)
#new graph
par(new=T)
#adding another plot
plot(vuf.all.count ~ Year, data = vuf.allometric.full, type="l", xlim= c(2000,2015), axes=F, col= "Forest Green", xlab="", ylab="")
#adding an axis on the right side of the graph
axis(4, line =5)
#mtext will let you add a label on the right side, 4 = right side of grapgh
mtext("Sample Depth (Tree)",4, line=6.5, col="Forest Green", las=3)
|
\name{printFormulae.asreml}
\alias{printFormulae.asreml}
\alias{printFormulae}
\title{Prints the formulae from an asreml object.}
\description{Prints the \code{\link{formula}}e nominated in the \code{which} argument from the \code{call} stored in an \code{asreml} object.}
\usage{
\method{printFormulae}{asreml}(asreml.obj, which = c("fixed", "random", "residual"),
expanded = FALSE, envir = parent.frame(), ...)
}
\arguments{
\item{asreml.obj}{An \code{asreml} object resulting from the fitting of
a model using REML.}
\item{which}{A \code{character} listing the \code{\link{formula}}(e) to be printed from the
\code{call} stored in \code{asreml.obj}. It should be some combination
of \code{fixed}, \code{random}, \code{residual}, \code{sparse} and
\code{all}. If \code{all} is included then all \code{\link{formula}}(e)
will be printed.}
\item{expanded}{A \code{logical} indicating whether terms are to be expanded to the
sum of a set of individual terms.}
\item{envir}{The environment in which the \code{\link{formula}}(e) are to be evaluated.
May also be \code{NULL}, a \code{list}, a \code{data.frame}, a \code{pairlist}
or an integer as specified to \code{sys.call}.}
\item{\dots}{Arguments passed on to \code{getFormulae.asreml} and ultimately to
\code{update.formula} and \code{terms.formula}.}
}
\value{Invisibly returns a \code{character}, each element of which contains one of the extracted
\code{\link{formula}}e.}
\author{Chris Brien}
\seealso{\code{\link{printFormulae.asreml}}}
\examples{\dontrun{
data(Wheat.dat)
current.asr <- asreml(yield ~ Rep + WithinColPairs + Variety,
random = ~ Row + Column + units,
residual = ~ ar1(Row):ar1(Column),
data=Wheat.dat)
printFormulae(current.asr)
}}
\keyword{manip}
\keyword{asreml}
| /man/printFormulae.asreml.Rd | no_license | cran/asremlPlus | R | false | false | 2,032 | rd | \name{printFormulae.asreml}
\alias{printFormulae.asreml}
\alias{printFormulae}
\title{Prints the formulae from an asreml object.}
\description{Prints the \code{\link{formula}}e nominated in the \code{which} argument from the \code{call} stored in an \code{asreml} object.}
\usage{
\method{printFormulae}{asreml}(asreml.obj, which = c("fixed", "random", "residual"),
expanded = FALSE, envir = parent.frame(), ...)
}
\arguments{
\item{asreml.obj}{An \code{asreml} object resulting from the fitting of
a model using REML.}
\item{which}{A \code{character} listing the \code{\link{formula}}(e) to be printed from the
\code{call} stored in \code{asreml.obj}. It should be some combination
of \code{fixed}, \code{random}, \code{residual}, \code{sparse} and
\code{all}. If \code{all} is included then all \code{\link{formula}}(e)
will be printed.}
\item{expanded}{A \code{logical} indicating whether terms are to be expanded to the
sum of a set of individual terms.}
\item{envir}{The environment in which the \code{\link{formula}}(e) are to be evaluated.
May also be \code{NULL}, a \code{list}, a \code{data.frame}, a \code{pairlist}
or an integer as specified to \code{sys.call}.}
\item{\dots}{Arguments passed on to \code{getFormulae.asreml} and ultimately to
\code{update.formula} and \code{terms.formula}.}
}
\value{Invisibly returns a \code{character}, each element of which contains one of the extracted
\code{\link{formula}}e.}
\author{Chris Brien}
\seealso{\code{\link{printFormulae.asreml}}}
\examples{\dontrun{
data(Wheat.dat)
current.asr <- asreml(yield ~ Rep + WithinColPairs + Variety,
random = ~ Row + Column + units,
residual = ~ ar1(Row):ar1(Column),
data=Wheat.dat)
printFormulae(current.asr)
}}
\keyword{manip}
\keyword{asreml}
|
componiSpartitoCompleto<-function(path){
library("tuneR")
#library("questionr")
x <- readMidi(path)
x <- getMidiNotes(x)
for (i in c(1:nrow(x))){
x[i,2] <- x[i,1] + x[i,2]
}
start <- unlist(x[1],use.names=FALSE)
end <- unlist(x[2],use.names=FALSE)
startEndUnito <- union(start,end)
startEndUnito <- sort(startEndUnito)
risultatoFinale <- data.frame(startEndUnito,0)
for (i in c(1:nrow(risultatoFinale))){
valore <- risultatoFinale[i,1]
tutteLeRighe <- x[which((x$length > valore) & (x$time <= valore)),]
if (nrow(tutteLeRighe) >= 1){
for (m in (c(1:nrow(tutteLeRighe)))){
risultatoFinale[i,2] <- risultatoFinale[i,2] + tutteLeRighe[m,5]
}
}
}
return(ts(unlist(risultatoFinale[,2],use.names="FALSE")))
}
#da cancellare
cal1 <- function(ts,nlags="auto", method="ML", d=50){
library("fractaldim")
library("entropy")
library("pracma")
library("tseriesChaos")
fractaldim <- fd.estim.boxcount(ts, nlags = nlags)
entropy <- entropy(ts, method = method)
hurst <- hurstexp(ts, display=FALSE, d=d)
df <- data.frame(fractaldim[2],entropy,hurst[2])
df
}
baseCal <- function(ts,nlags="auto", method="ML", d=50){
if(class(ts)=="list"){
df <- data.frame()
for (i in c(1:(length(ts)-1))){
fractaldim <- fd.estim.boxcount(unlist(ts[i],use.names="FALSE"), nlags = nlags)
entropy <- entropy(unlist(ts[i],use.names="FALSE"), method = method)
hurst <- hurstexp(unlist(ts[i],use.names="FALSE"), display=FALSE, d=d)
df <- rbind(df,c(fractaldim[2],entropy,hurst[2]))
df <- unname(df)
}
colnames(df) <- c("fractalDim","entropy","hurst")
return(df)
}
fractaldim <- fd.estim.boxcount(ts, nlags = nlags)
entropy <- entropy(ts, method = method)
hurst <- hurstexp(ts, display=FALSE, d=d)
df <- data.frame(fractaldim[2],entropy,hurst[2])
df
}
lyapMia <- function(ts, k=15){
lyapp <- lyap_k(ts, m = 1, d = 2, s = 200, t = 40, ref = length(ts)/1.5, k = k, eps = 3)
lyapp <- lyapp[which(lyapp!="-Inf")]
lyapp
}
myJointEntropy <- function(df){
disc1 = discretize2d(ts(df[2]), ts(df[1]), numBins1=5, numBins2=5)
disc2 = discretize2d(ts(df[2]), ts(df[3]), numBins1=5, numBins2=5)
res <- discretize2d(disc1,disc2,numBins1=5, numBins2=5)
res <- entropy(res)
res
}
myPlot2d <- function(lyap,title=NULL){
library("plot3D")
riga <- c(1:length(lyap))
col.v <- sqrt(riga^2 + lyap^2)
scatter2D(riga, lyap, colvar = col.v, pch = 16, bty ="g",
type ="b",col = gg.col(100),main=toupper(title),xlab="Time",ylab="Value")
}
fastaTs <- function(path,n=1){
x <- read.fasta(path)
x <- x[1:n]
x <- unlist(x,use.names=FALSE)
x <- ts(x)
x <- gsub("a","0",x)
x <- gsub("c","1",x)
x <- gsub("g","3",x)
x <- gsub("t","2",x)
x <- gsub("n","6",x)
x <- gsub("r","4",x)
x <- gsub("y","3",x)
x <- gsub("k","5",x)
x <- gsub("m","2",x)
x <- gsub("s","4",x)
x <- gsub("w","3",x)
x <- gsub("b","3",x)
x <- gsub("d","3",x)
x <- gsub("h","3",x)
x <- gsub("v","3",x)
x <- as.double(x)
x
}
csvToTs <- function (path, n = 1, row = FALSE)
{
res <- read.csv(path)
if (row == FALSE && n <= ncol(res)) {
res <- ts(res[n])
} else if (row==TRUE && n <= nrow(res)){
res <- ts(res[n, ])
} else res <- 0
return(res)
}
fromTo <- function(x,from,to){
if (class(x) != "data.frame"){
x <- data.frame(x)
}
if (from < to) {
return (x[c(from:to),])
} else return(x)
}
howManyNotes <- function(x){
return (length(x))
}
saveAsCsv <- function(df,name="myResults"){
dest <- getwd()
dest <- paste(dest,"/",sep="")
dest <- paste(dest,"resources/csvResults/",name,sep="")
dest <- paste(dest,".csv",sep="")
write.csv(df,dest, row.names = FALSE,quote=FALSE)
}
tsToGraph <- function(ts,n=70,directed="FALSE"){
visGraph <- function(x){
matrix <- matrix(data = NA, nrow = 0, ncol = 4)
for (i in c(1:(nrow(x)-1))){
j = 1;
while (x[i,1] >= x[i+j,1]){
#tmp <- paste(x[i,1],i,sep=",")
matrix <- rbind(matrix,c(x[i,1],x[i+j,1],x[i,2],x[i+j,2]))
if (i+j+1 < nrow(x)){
j = j+1
} else break
}
}
return (matrix)
}
if (class(ts) != "data.frame") {
ts <- data.frame(ts)
}
y<-head(ts,n)
for (i in c(1:(nrow(y)))){
y[i,2] <- paste(y[i,1],i,sep=",")
}
res<-visGraph(y)
archi <- data.frame(res[,3],res[,4])
vertici <- y[,2]
if (directed=="TRUE"){
g <- graph_from_data_frame(archi, directed=TRUE, vertices=vertici)
} else {
g <- graph_from_data_frame(archi, directed=FALSE, vertices=vertici)
}
return (g)
}
interactivePlot <- function(graph){
edges <- ends(graph,E(graph))
res <- data.frame(edges[,1],edges[,2])
res <- simpleNetwork(res, height="100px", width="100px",
Source = 1, # column number of source
Target = 2, # column number of target
linkDistance = 240, # distance between node. Increase this value to have more space between nodes
charge = -115, # numeric value indicating either the strength of the node repulsion (negative value) or attraction (positive value)
fontSize = 10, # size of the node names
fontFamily = "serif", # font og node names
linkColour = "#666", # colour of edges, MUST be a common colour for the whole graph
nodeColour = "#3f7ed1", # colour of nodes, MUST be a common colour for the whole graph
opacity = 0.9, # opacity of nodes. 0=transparent. 1=no transparency
zoom = T # Can you zoom on the figure?
)
path <- paste(getwd(),"/src/interPlot/interPlot.html",sep="")
newwd <- paste(getwd(),"/src",sep="")
oldwd <- getwd()
setwd(newwd)
saveWidget(res, file=path,title="provaTitolo",selfcontained=FALSE)
setwd(oldwd)
return (res)
}
addIsolatedNodes<- function(graph){
vertici <- as_ids(V(graph))
edges <- ends(graph,E(graph))
archiCompleti <- union(edges[,1],edges[,2])
diff <- setdiff(vertici,archiCompleti)
if(!length(diff)==0){
for (i in c(1:length(diff))){
edges <- rbind(edges, c(diff[i],NULL))
}
}
res <- data.frame(edges[,1],edges[,2])
res <- simpleNetwork(res, height="100px", width="100px",
Source = 1, # column number of source
Target = 2, # column number of target
linkDistance = 240, # distance between node. Increase this value to have more space between nodes
charge = -115, # numeric value indicating either the strength of the node repulsion (negative value) or attraction (positive value)
fontSize = 10, # size of the node names
fontFamily = "serif", # font og node names
linkColour = "#666", # colour of edges, MUST be a common colour for the whole graph
nodeColour = "#3f7ed1", # colour of nodes, MUST be a common colour for the whole graph
opacity = 0.9, # opacity of nodes. 0=transparent. 1=no transparency
zoom = T # Can you zoom on the figure?
)
path <- paste(getwd(),"/src/interPlot/interPlot.html",sep="")
newwd <- paste(getwd(),"/src",sep="")
oldwd <- getwd()
setwd(newwd)
saveWidget(res, file=path,title="provaTitolo",selfcontained=FALSE)
setwd(oldwd)
return (res)
}
splitWithOverlap <- function(vec, seg.length, overlap) {
#if(window <= overlpa){ return(NULL)}
starts <- seq(1, length(vec), by=seg.length-overlap)
ends <- starts + seg.length - 1
ends[ends > length(vec)] <- length(vec)
lapply(1:length(starts), function(i) vec[starts[i]:ends[i]])
}
myApprox<-function(ts){
if (class(ts)=="list"){
df <- data.frame()
for (i in c(1:(length(ts)-1))){
appEnt <- approx_entropy(unlist(ts[i],use.names=FALSE))
df <- rbind(df,c(appEnt))
}
colnames(df) <- c("approx_entropy")
return(df)
}
appEnt <- approx_entropy(x)
return(appEnt)
}
mySample<-function(ts){
if (class(ts)=="list"){
df <- data.frame()
for (i in c(1:(length(ts)-1))){
samEnt <- sample_entropy(unlist(ts[i],use.names = FALSE))
df <- rbind(df,c(samEnt))
}
colnames(df) <- c("sample_entropy")
return(df)
}
samEnt <- sample_entropy(x)
return(samEnt)
}
mergeDf<-function(df1,df2){
if (!is.null(df1) && !is.null(df2)){
for (i in c(1:ncol(df2))){
df1 <- cbind(df1,df2[i])
}
}
return(df1)
}
saveWindowGraph <- function(df){
for (i in c(1:ncol(df))){
name <- paste("resources/plots/windowGraph",i,".jpg",sep="")
jpeg(name, width=750, height=575)
myPlot2d(ts(df[i]),colnames(df[i]))
dev.off()
}
}
loadResults <- function(path){
out <- tryCatch(
{
res <- read.csv(path)
if(!is.null(res)){
saveWindowGraph(res)
}
return(1)
},
error=function(cond) {
message(paste("There was an error with the csv:"),path)
message("Here's the original error message:")
message(cond)
return(NA)
},
warning=function(cond) {
message(paste("There was a warning with the csv!"),path)
message("Here's the original warning message:")
message(cond)
return(NULL)
}
)
}
windowTsToGraph <- function(lista){
result <- vector("list",length(lista))
for (i in c(1:length(lista))){
result[[i]] <- tsToGraph(lista[i],length(lista[[1]]))
}
return(result)
} | /myRPackage/R/myRFunctions.R | no_license | MPiangerelli/SequenceAnalyzer | R | false | false | 9,757 | r | componiSpartitoCompleto<-function(path){
library("tuneR")
#library("questionr")
x <- readMidi(path)
x <- getMidiNotes(x)
for (i in c(1:nrow(x))){
x[i,2] <- x[i,1] + x[i,2]
}
start <- unlist(x[1],use.names=FALSE)
end <- unlist(x[2],use.names=FALSE)
startEndUnito <- union(start,end)
startEndUnito <- sort(startEndUnito)
risultatoFinale <- data.frame(startEndUnito,0)
for (i in c(1:nrow(risultatoFinale))){
valore <- risultatoFinale[i,1]
tutteLeRighe <- x[which((x$length > valore) & (x$time <= valore)),]
if (nrow(tutteLeRighe) >= 1){
for (m in (c(1:nrow(tutteLeRighe)))){
risultatoFinale[i,2] <- risultatoFinale[i,2] + tutteLeRighe[m,5]
}
}
}
return(ts(unlist(risultatoFinale[,2],use.names="FALSE")))
}
#da cancellare
cal1 <- function(ts,nlags="auto", method="ML", d=50){
library("fractaldim")
library("entropy")
library("pracma")
library("tseriesChaos")
fractaldim <- fd.estim.boxcount(ts, nlags = nlags)
entropy <- entropy(ts, method = method)
hurst <- hurstexp(ts, display=FALSE, d=d)
df <- data.frame(fractaldim[2],entropy,hurst[2])
df
}
baseCal <- function(ts,nlags="auto", method="ML", d=50){
if(class(ts)=="list"){
df <- data.frame()
for (i in c(1:(length(ts)-1))){
fractaldim <- fd.estim.boxcount(unlist(ts[i],use.names="FALSE"), nlags = nlags)
entropy <- entropy(unlist(ts[i],use.names="FALSE"), method = method)
hurst <- hurstexp(unlist(ts[i],use.names="FALSE"), display=FALSE, d=d)
df <- rbind(df,c(fractaldim[2],entropy,hurst[2]))
df <- unname(df)
}
colnames(df) <- c("fractalDim","entropy","hurst")
return(df)
}
fractaldim <- fd.estim.boxcount(ts, nlags = nlags)
entropy <- entropy(ts, method = method)
hurst <- hurstexp(ts, display=FALSE, d=d)
df <- data.frame(fractaldim[2],entropy,hurst[2])
df
}
lyapMia <- function(ts, k=15){
lyapp <- lyap_k(ts, m = 1, d = 2, s = 200, t = 40, ref = length(ts)/1.5, k = k, eps = 3)
lyapp <- lyapp[which(lyapp!="-Inf")]
lyapp
}
myJointEntropy <- function(df){
disc1 = discretize2d(ts(df[2]), ts(df[1]), numBins1=5, numBins2=5)
disc2 = discretize2d(ts(df[2]), ts(df[3]), numBins1=5, numBins2=5)
res <- discretize2d(disc1,disc2,numBins1=5, numBins2=5)
res <- entropy(res)
res
}
myPlot2d <- function(lyap,title=NULL){
library("plot3D")
riga <- c(1:length(lyap))
col.v <- sqrt(riga^2 + lyap^2)
scatter2D(riga, lyap, colvar = col.v, pch = 16, bty ="g",
type ="b",col = gg.col(100),main=toupper(title),xlab="Time",ylab="Value")
}
fastaTs <- function(path,n=1){
x <- read.fasta(path)
x <- x[1:n]
x <- unlist(x,use.names=FALSE)
x <- ts(x)
x <- gsub("a","0",x)
x <- gsub("c","1",x)
x <- gsub("g","3",x)
x <- gsub("t","2",x)
x <- gsub("n","6",x)
x <- gsub("r","4",x)
x <- gsub("y","3",x)
x <- gsub("k","5",x)
x <- gsub("m","2",x)
x <- gsub("s","4",x)
x <- gsub("w","3",x)
x <- gsub("b","3",x)
x <- gsub("d","3",x)
x <- gsub("h","3",x)
x <- gsub("v","3",x)
x <- as.double(x)
x
}
csvToTs <- function (path, n = 1, row = FALSE)
{
res <- read.csv(path)
if (row == FALSE && n <= ncol(res)) {
res <- ts(res[n])
} else if (row==TRUE && n <= nrow(res)){
res <- ts(res[n, ])
} else res <- 0
return(res)
}
fromTo <- function(x,from,to){
if (class(x) != "data.frame"){
x <- data.frame(x)
}
if (from < to) {
return (x[c(from:to),])
} else return(x)
}
howManyNotes <- function(x){
return (length(x))
}
saveAsCsv <- function(df,name="myResults"){
dest <- getwd()
dest <- paste(dest,"/",sep="")
dest <- paste(dest,"resources/csvResults/",name,sep="")
dest <- paste(dest,".csv",sep="")
write.csv(df,dest, row.names = FALSE,quote=FALSE)
}
tsToGraph <- function(ts,n=70,directed="FALSE"){
visGraph <- function(x){
matrix <- matrix(data = NA, nrow = 0, ncol = 4)
for (i in c(1:(nrow(x)-1))){
j = 1;
while (x[i,1] >= x[i+j,1]){
#tmp <- paste(x[i,1],i,sep=",")
matrix <- rbind(matrix,c(x[i,1],x[i+j,1],x[i,2],x[i+j,2]))
if (i+j+1 < nrow(x)){
j = j+1
} else break
}
}
return (matrix)
}
if (class(ts) != "data.frame") {
ts <- data.frame(ts)
}
y<-head(ts,n)
for (i in c(1:(nrow(y)))){
y[i,2] <- paste(y[i,1],i,sep=",")
}
res<-visGraph(y)
archi <- data.frame(res[,3],res[,4])
vertici <- y[,2]
if (directed=="TRUE"){
g <- graph_from_data_frame(archi, directed=TRUE, vertices=vertici)
} else {
g <- graph_from_data_frame(archi, directed=FALSE, vertices=vertici)
}
return (g)
}
interactivePlot <- function(graph){
edges <- ends(graph,E(graph))
res <- data.frame(edges[,1],edges[,2])
res <- simpleNetwork(res, height="100px", width="100px",
Source = 1, # column number of source
Target = 2, # column number of target
linkDistance = 240, # distance between node. Increase this value to have more space between nodes
charge = -115, # numeric value indicating either the strength of the node repulsion (negative value) or attraction (positive value)
fontSize = 10, # size of the node names
fontFamily = "serif", # font og node names
linkColour = "#666", # colour of edges, MUST be a common colour for the whole graph
nodeColour = "#3f7ed1", # colour of nodes, MUST be a common colour for the whole graph
opacity = 0.9, # opacity of nodes. 0=transparent. 1=no transparency
zoom = T # Can you zoom on the figure?
)
path <- paste(getwd(),"/src/interPlot/interPlot.html",sep="")
newwd <- paste(getwd(),"/src",sep="")
oldwd <- getwd()
setwd(newwd)
saveWidget(res, file=path,title="provaTitolo",selfcontained=FALSE)
setwd(oldwd)
return (res)
}
addIsolatedNodes<- function(graph){
vertici <- as_ids(V(graph))
edges <- ends(graph,E(graph))
archiCompleti <- union(edges[,1],edges[,2])
diff <- setdiff(vertici,archiCompleti)
if(!length(diff)==0){
for (i in c(1:length(diff))){
edges <- rbind(edges, c(diff[i],NULL))
}
}
res <- data.frame(edges[,1],edges[,2])
res <- simpleNetwork(res, height="100px", width="100px",
Source = 1, # column number of source
Target = 2, # column number of target
linkDistance = 240, # distance between node. Increase this value to have more space between nodes
charge = -115, # numeric value indicating either the strength of the node repulsion (negative value) or attraction (positive value)
fontSize = 10, # size of the node names
fontFamily = "serif", # font og node names
linkColour = "#666", # colour of edges, MUST be a common colour for the whole graph
nodeColour = "#3f7ed1", # colour of nodes, MUST be a common colour for the whole graph
opacity = 0.9, # opacity of nodes. 0=transparent. 1=no transparency
zoom = T # Can you zoom on the figure?
)
path <- paste(getwd(),"/src/interPlot/interPlot.html",sep="")
newwd <- paste(getwd(),"/src",sep="")
oldwd <- getwd()
setwd(newwd)
saveWidget(res, file=path,title="provaTitolo",selfcontained=FALSE)
setwd(oldwd)
return (res)
}
splitWithOverlap <- function(vec, seg.length, overlap) {
#if(window <= overlpa){ return(NULL)}
starts <- seq(1, length(vec), by=seg.length-overlap)
ends <- starts + seg.length - 1
ends[ends > length(vec)] <- length(vec)
lapply(1:length(starts), function(i) vec[starts[i]:ends[i]])
}
myApprox<-function(ts){
if (class(ts)=="list"){
df <- data.frame()
for (i in c(1:(length(ts)-1))){
appEnt <- approx_entropy(unlist(ts[i],use.names=FALSE))
df <- rbind(df,c(appEnt))
}
colnames(df) <- c("approx_entropy")
return(df)
}
appEnt <- approx_entropy(x)
return(appEnt)
}
mySample<-function(ts){
if (class(ts)=="list"){
df <- data.frame()
for (i in c(1:(length(ts)-1))){
samEnt <- sample_entropy(unlist(ts[i],use.names = FALSE))
df <- rbind(df,c(samEnt))
}
colnames(df) <- c("sample_entropy")
return(df)
}
samEnt <- sample_entropy(x)
return(samEnt)
}
mergeDf<-function(df1,df2){
if (!is.null(df1) && !is.null(df2)){
for (i in c(1:ncol(df2))){
df1 <- cbind(df1,df2[i])
}
}
return(df1)
}
saveWindowGraph <- function(df){
for (i in c(1:ncol(df))){
name <- paste("resources/plots/windowGraph",i,".jpg",sep="")
jpeg(name, width=750, height=575)
myPlot2d(ts(df[i]),colnames(df[i]))
dev.off()
}
}
loadResults <- function(path){
out <- tryCatch(
{
res <- read.csv(path)
if(!is.null(res)){
saveWindowGraph(res)
}
return(1)
},
error=function(cond) {
message(paste("There was an error with the csv:"),path)
message("Here's the original error message:")
message(cond)
return(NA)
},
warning=function(cond) {
message(paste("There was a warning with the csv!"),path)
message("Here's the original warning message:")
message(cond)
return(NULL)
}
)
}
windowTsToGraph <- function(lista){
result <- vector("list",length(lista))
for (i in c(1:length(lista))){
result[[i]] <- tsToGraph(lista[i],length(lista[[1]]))
}
return(result)
} |
numPerPatch569000 <- c(2514,2486)
| /NatureEE-data-archive/Run203121/JAFSdata/JAFSnumPerPatch569000.R | no_license | flaxmans/NatureEE2017 | R | false | false | 34 | r | numPerPatch569000 <- c(2514,2486)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kms_operations.R
\name{kms_disable_key_rotation}
\alias{kms_disable_key_rotation}
\title{Disables automatic rotation of the key material for the specified
symmetric customer master key (CMK)}
\usage{
kms_disable_key_rotation(KeyId)
}
\arguments{
\item{KeyId}{[required] Identifies a symmetric customer master key (CMK). You cannot enable or
disable automatic rotation of \href{https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html#asymmetric-cmks}{asymmetric CMKs},
CMKs with \href{https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html}{imported key material},
or CMKs in a \href{https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html}{custom key store}.
Specify the key ID or the Amazon Resource Name (ARN) of the CMK.
For example:
\itemize{
\item Key ID: \verb{1234abcd-12ab-34cd-56ef-1234567890ab}
\item Key ARN:
\verb{arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab}
}
To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey.}
}
\description{
Disables \href{https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html}{automatic rotation of the key material}
for the specified symmetric customer master key (CMK).
You cannot enable automatic rotation of asymmetric CMKs, CMKs with
imported key material, or CMKs in a \href{https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html}{custom key store}.
The CMK that you use for this operation must be in a compatible key
state. For details, see \href{https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html}{How Key State Affects Use of a Customer Master Key}
in the \emph{AWS Key Management Service Developer Guide}.
\strong{Cross-account use}: No. You cannot perform this operation on a CMK in
a different AWS account.
\strong{Required permissions}:
\href{https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html}{kms:DisableKeyRotation}
(key policy)
\strong{Related operations:}
\itemize{
\item EnableKeyRotation
\item GetKeyRotationStatus
}
}
\section{Request syntax}{
\preformatted{svc$disable_key_rotation(
KeyId = "string"
)
}
}
\examples{
\dontrun{
# The following example disables automatic annual rotation of the key
# material for the specified CMK.
svc$disable_key_rotation(
KeyId = "1234abcd-12ab-34cd-56ef-1234567890ab"
)
}
}
\keyword{internal}
| /cran/paws.security.identity/man/kms_disable_key_rotation.Rd | permissive | sanchezvivi/paws | R | false | true | 2,503 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kms_operations.R
\name{kms_disable_key_rotation}
\alias{kms_disable_key_rotation}
\title{Disables automatic rotation of the key material for the specified
symmetric customer master key (CMK)}
\usage{
kms_disable_key_rotation(KeyId)
}
\arguments{
\item{KeyId}{[required] Identifies a symmetric customer master key (CMK). You cannot enable or
disable automatic rotation of \href{https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html#asymmetric-cmks}{asymmetric CMKs},
CMKs with \href{https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html}{imported key material},
or CMKs in a \href{https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html}{custom key store}.
Specify the key ID or the Amazon Resource Name (ARN) of the CMK.
For example:
\itemize{
\item Key ID: \verb{1234abcd-12ab-34cd-56ef-1234567890ab}
\item Key ARN:
\verb{arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab}
}
To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey.}
}
\description{
Disables \href{https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html}{automatic rotation of the key material}
for the specified symmetric customer master key (CMK).
You cannot enable automatic rotation of asymmetric CMKs, CMKs with
imported key material, or CMKs in a \href{https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html}{custom key store}.
The CMK that you use for this operation must be in a compatible key
state. For details, see \href{https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html}{How Key State Affects Use of a Customer Master Key}
in the \emph{AWS Key Management Service Developer Guide}.
\strong{Cross-account use}: No. You cannot perform this operation on a CMK in
a different AWS account.
\strong{Required permissions}:
\href{https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html}{kms:DisableKeyRotation}
(key policy)
\strong{Related operations:}
\itemize{
\item EnableKeyRotation
\item GetKeyRotationStatus
}
}
\section{Request syntax}{
\preformatted{svc$disable_key_rotation(
KeyId = "string"
)
}
}
\examples{
\dontrun{
# The following example disables automatic annual rotation of the key
# material for the specified CMK.
svc$disable_key_rotation(
KeyId = "1234abcd-12ab-34cd-56ef-1234567890ab"
)
}
}
\keyword{internal}
|
setwd("C:/Users/Pigkappa/Dropbox/Data_Science/Digit_Recognition")
source("data_munging.R")
pixel_names = character(19*19)
for (k in 1:(19*19)){
pixel_names[k] = paste0(c("pixel", as.character(k)), collapse = "")
}
nmax = 70000
reduced_data = data.frame(matrix(nrow = nmax, ncol = 19*19))
stime = system.time({
for(obs in 1:nmax){
pixelMatrix = matrix(as.numeric(full[obs,2:785, with = F]), nrow = 28, ncol=28, byrow = T)
rowStart = full[obs,rowStart]
rowEnd = full[obs,rowEnd]
colStart = full[obs,colStart]
colEnd = full[obs,colEnd]
pixelMatrixReduced = matrix(0, nrow = 19, ncol = 19, byrow = T)
for (r in seq(rowStart, min(rowStart+18, rowEnd))){
for (k in seq(colStart, min(colStart+18, colEnd))){
pixelMatrixReduced[1+r-rowStart,1+k-colStart] = pixelMatrix[r,k]
}
}
reduced_data[obs,] = as.vector(t(pixelMatrixReduced))
}
})
reduced_data = cbind(full[1:nmax,label], reduced_data)
names(reduced_data) = c("label",pixel_names)
write.table(reduced_data, "reduced.data.csv", sep = ",", row.names = F)
| /data_reduction.R | no_license | andreacaleo/DigitRecognition | R | false | false | 1,185 | r | setwd("C:/Users/Pigkappa/Dropbox/Data_Science/Digit_Recognition")
source("data_munging.R")
pixel_names = character(19*19)
for (k in 1:(19*19)){
pixel_names[k] = paste0(c("pixel", as.character(k)), collapse = "")
}
nmax = 70000
reduced_data = data.frame(matrix(nrow = nmax, ncol = 19*19))
stime = system.time({
for(obs in 1:nmax){
pixelMatrix = matrix(as.numeric(full[obs,2:785, with = F]), nrow = 28, ncol=28, byrow = T)
rowStart = full[obs,rowStart]
rowEnd = full[obs,rowEnd]
colStart = full[obs,colStart]
colEnd = full[obs,colEnd]
pixelMatrixReduced = matrix(0, nrow = 19, ncol = 19, byrow = T)
for (r in seq(rowStart, min(rowStart+18, rowEnd))){
for (k in seq(colStart, min(colStart+18, colEnd))){
pixelMatrixReduced[1+r-rowStart,1+k-colStart] = pixelMatrix[r,k]
}
}
reduced_data[obs,] = as.vector(t(pixelMatrixReduced))
}
})
reduced_data = cbind(full[1:nmax,label], reduced_data)
names(reduced_data) = c("label",pixel_names)
write.table(reduced_data, "reduced.data.csv", sep = ",", row.names = F)
|
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
# Expression that generates a histogram. The expression is
# wrapped in a call to renderPlot to indicate that:
#
# 1) It is "reactive" and therefore should be automatically
# re-executed when inputs change
# 2) Its output type is a plot
output$distPlot <- renderPlot({
bins <- input$bins
x<-iris$Sepal.Length[1:bins]
y<-iris$Sepal.Width[1:bins]
plot(y~x,
col = "red",
lwd = 8,
xlab="Sepal.Length",
ylab = "Sepal.Width")
})
})
| /server.R | no_license | david2588e/iris-wk-12 | R | false | false | 593 | r | library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
# Expression that generates a histogram. The expression is
# wrapped in a call to renderPlot to indicate that:
#
# 1) It is "reactive" and therefore should be automatically
# re-executed when inputs change
# 2) Its output type is a plot
output$distPlot <- renderPlot({
bins <- input$bins
x<-iris$Sepal.Length[1:bins]
y<-iris$Sepal.Width[1:bins]
plot(y~x,
col = "red",
lwd = 8,
xlab="Sepal.Length",
ylab = "Sepal.Width")
})
})
|
library(reshape2)
library(lme4)
library(lmerTest)
library(dplyr)
library(tidyr)
library(ggplot2)
library(Hmisc)
library(MuMIn)
library(lsmeans)
library(effects)
library(betapart)
###############################################################################
################################ Beta diversity ###############################
###############################################################################
betamatrix <- datat.cast[datat.cast$brn == "NO",9:60]
rownames(betamatrix) <- datat.cast[datat.cast$brn == "NO",]$ID
for (i in 1:length(betamatrix$NONE))
{
betamatrix[i,which(colnames(betamatrix) == "NONE")] <- ifelse(sum(betamatrix[i,]) == 0, 1, 0)
}
betamatrix <- betamatrix[betamatrix$NONE == 0,]
betamatrix <- as.data.frame(ifelse(betamatrix == 0, 0, 1))
betamatrix <- subset(betamatrix, select = -NONE)
beta1 <- beta.pair(betamatrix)
beta1.sne <- as.matrix(beta1$beta.sne)
beta1.sne.melt <- melt(data = beta1.sne)
vals <- strsplit(as.character(beta1.sne.melt$Var1), " ")
x <- as.data.frame(do.call("rbind", vals))
beta1.sne.melt3 <- cbind(x, beta1.sne.melt)
vals <- strsplit(as.character(beta1.sne.melt$Var2), " ")
x <- as.data.frame(do.call("rbind", vals))
nes.melt <- cbind(x, beta1.sne.melt3)
colnames(nes.melt) <- c("Site.x", "LUH.x", "CT.x", "SP.x", "ET.x", "EP.x", "Site.y", "LUH.y", "CT.y", "SP.y", "ET.y", "EP.y", "ID.x", "ID.y", "nes")
nes.melt <- nes.melt[nes.melt$Site.x == nes.melt$Site.y,]
nes.melt <- nes.melt[nes.melt$LUH.x == nes.melt$LUH.y,]
nes.melt <- nes.melt[nes.melt$CT.x == nes.melt$CT.y,]
nes.melt$pair <- ifelse(nes.melt$ET.x == "E" & nes.melt$ET.y == "P" , "A", "P")
nes.melt$pair <- ifelse(nes.melt$ET.x == "P" & nes.melt$ET.y == "E" , "A", nes.melt$pair)
hist(sqrt(nes.melt$nes))
mnes1 <- lmer(sqrt(nes) ~ LUH.x*CT.x*pair + (1|Site.x), data = nes.melt)
sumnes <- summary(mnes1)
plot(mnes1)
hist(resid(mnes1))
qqnorm(resid(mnes1))
lsnes <- lsmeansLT(mnes1)
plot(allEffects(mnes1))
| /BetaDiversity_SeedlingDiversity.R | no_license | qmsorenson/SeedlingDiversity | R | false | false | 1,955 | r | library(reshape2)
library(lme4)
library(lmerTest)
library(dplyr)
library(tidyr)
library(ggplot2)
library(Hmisc)
library(MuMIn)
library(lsmeans)
library(effects)
library(betapart)
###############################################################################
################################ Beta diversity ###############################
###############################################################################
betamatrix <- datat.cast[datat.cast$brn == "NO",9:60]
rownames(betamatrix) <- datat.cast[datat.cast$brn == "NO",]$ID
for (i in 1:length(betamatrix$NONE))
{
betamatrix[i,which(colnames(betamatrix) == "NONE")] <- ifelse(sum(betamatrix[i,]) == 0, 1, 0)
}
betamatrix <- betamatrix[betamatrix$NONE == 0,]
betamatrix <- as.data.frame(ifelse(betamatrix == 0, 0, 1))
betamatrix <- subset(betamatrix, select = -NONE)
beta1 <- beta.pair(betamatrix)
beta1.sne <- as.matrix(beta1$beta.sne)
beta1.sne.melt <- melt(data = beta1.sne)
vals <- strsplit(as.character(beta1.sne.melt$Var1), " ")
x <- as.data.frame(do.call("rbind", vals))
beta1.sne.melt3 <- cbind(x, beta1.sne.melt)
vals <- strsplit(as.character(beta1.sne.melt$Var2), " ")
x <- as.data.frame(do.call("rbind", vals))
nes.melt <- cbind(x, beta1.sne.melt3)
colnames(nes.melt) <- c("Site.x", "LUH.x", "CT.x", "SP.x", "ET.x", "EP.x", "Site.y", "LUH.y", "CT.y", "SP.y", "ET.y", "EP.y", "ID.x", "ID.y", "nes")
nes.melt <- nes.melt[nes.melt$Site.x == nes.melt$Site.y,]
nes.melt <- nes.melt[nes.melt$LUH.x == nes.melt$LUH.y,]
nes.melt <- nes.melt[nes.melt$CT.x == nes.melt$CT.y,]
nes.melt$pair <- ifelse(nes.melt$ET.x == "E" & nes.melt$ET.y == "P" , "A", "P")
nes.melt$pair <- ifelse(nes.melt$ET.x == "P" & nes.melt$ET.y == "E" , "A", nes.melt$pair)
hist(sqrt(nes.melt$nes))
mnes1 <- lmer(sqrt(nes) ~ LUH.x*CT.x*pair + (1|Site.x), data = nes.melt)
sumnes <- summary(mnes1)
plot(mnes1)
hist(resid(mnes1))
qqnorm(resid(mnes1))
lsnes <- lsmeansLT(mnes1)
plot(allEffects(mnes1))
|
# Lexical-Scoping
#This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmean <- function(mean) m <<- mean
getmean <- function() m
list(set = set, get = get,
setmean = setmean,
getmean = getmean)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getmean()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- mean(data, ...)
x$setmean(m)
m
}
| /cachematrix.R | no_license | ouhannouhajar/scoping | R | false | false | 710 | r | # Lexical-Scoping
#This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmean <- function(mean) m <<- mean
getmean <- function() m
list(set = set, get = get,
setmean = setmean,
getmean = getmean)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getmean()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- mean(data, ...)
x$setmean(m)
m
}
|
#!/usr/bin/env Rscript
clist=c("European"="coral","African"="darkolivegreen","Asian"="dodgerblue","Caucasian"="orchid","Other"="gold","Homoplastic"="brown1","No haplogroup"="dimgray")
#clist=c("Pathogenic"="darkcyan","Non-pathogenic"="mediumpurple","Pathogenic (Cfrm status)"="brown1")
#clist=c("syn"="cornflowerblue","nonsyn"="darkgoldenrod1","non-coding"="firebrick")
| /scripts/plotting/misc/colors_for_fig2.R | no_license | clody23/UKBiobank_mtPheWas | R | false | false | 371 | r | #!/usr/bin/env Rscript
clist=c("European"="coral","African"="darkolivegreen","Asian"="dodgerblue","Caucasian"="orchid","Other"="gold","Homoplastic"="brown1","No haplogroup"="dimgray")
#clist=c("Pathogenic"="darkcyan","Non-pathogenic"="mediumpurple","Pathogenic (Cfrm status)"="brown1")
#clist=c("syn"="cornflowerblue","nonsyn"="darkgoldenrod1","non-coding"="firebrick")
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/effort_initialize.R
\name{effort_initialize}
\alias{effort_initialize}
\title{Formats a reference dataset for title/abstract screening efforts.}
\usage{
effort_initialize(aDataFrame, study_ID = TRUE,
unscreenedValue = "not vetted", dual = FALSE, front = TRUE)
}
\arguments{
\item{aDataFrame}{A data.frame object that includes the titles and
abstracts to be screened. It will be formatted for screening efforts.
See example: \code{\link{example_references_metagear}}}
\item{study_ID}{When \code{FALSE}, does not add a column "STUDY_ID" that
includes a unique identification number for each reference (row) in the
dataFrame.}
\item{unscreenedValue}{Changes the default coding (a string) of "not vetted"
that designates whether an abstract remains to be screened or vetted as
part of the "INCLUDE" column.}
\item{dual}{When \code{TRUE}, formats dataFrame for a dual screening (paired)
design. Creates two reviewer teams: REVIEWERS_A and REVIEWERS_B.}
\item{front}{When \code{FALSE}, adds new columns to the back end of the
dataframe. When \code{TRUE}, adds columns to the front.}
}
\value{
A data.frame formatted for title/abstract screening efforts.
}
\description{
Adds columns with standardized labels to a data framw with bibliographic data
on journal articles. These columns will be used to assign reviewers,
implementation of dual screening design, and the coding of
inclusion/exclusions screening decisions.
}
\examples{
data(example_references_metagear)
effort_initialize(example_references_metagear)
}
\seealso{
\code{\link{effort_distribute}}, \code{\link{effort_merge}},
\code{\link{effort_summary}}
}
| /man/effort_initialize.Rd | no_license | haiyangzhang798/metagear | R | false | false | 1,759 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/effort_initialize.R
\name{effort_initialize}
\alias{effort_initialize}
\title{Formats a reference dataset for title/abstract screening efforts.}
\usage{
effort_initialize(aDataFrame, study_ID = TRUE,
unscreenedValue = "not vetted", dual = FALSE, front = TRUE)
}
\arguments{
\item{aDataFrame}{A data.frame object that includes the titles and
abstracts to be screened. It will be formatted for screening efforts.
See example: \code{\link{example_references_metagear}}}
\item{study_ID}{When \code{FALSE}, does not add a column "STUDY_ID" that
includes a unique identification number for each reference (row) in the
dataFrame.}
\item{unscreenedValue}{Changes the default coding (a string) of "not vetted"
that designates whether an abstract remains to be screened or vetted as
part of the "INCLUDE" column.}
\item{dual}{When \code{TRUE}, formats dataFrame for a dual screening (paired)
design. Creates two reviewer teams: REVIEWERS_A and REVIEWERS_B.}
\item{front}{When \code{FALSE}, adds new columns to the back end of the
dataframe. When \code{TRUE}, adds columns to the front.}
}
\value{
A data.frame formatted for title/abstract screening efforts.
}
\description{
Adds columns with standardized labels to a data framw with bibliographic data
on journal articles. These columns will be used to assign reviewers,
implementation of dual screening design, and the coding of
inclusion/exclusions screening decisions.
}
\examples{
data(example_references_metagear)
effort_initialize(example_references_metagear)
}
\seealso{
\code{\link{effort_distribute}}, \code{\link{effort_merge}},
\code{\link{effort_summary}}
}
|
give_pinnacle_odds <- function(country = 'ENG',pwd){
library(pinnacle.API)
library(httr)
library(jsonlite)
library(askpass)
AcceptTermsAndConditions(accepted = TRUE)
user <- "KD1049991"
#pwd <- askpass()
base <- "https://api.pinnaclesports.com/"
# using the api pinnacle package
SetCredentials(user, pwd)
# Get Sports
sport_data <- GetSports()
# Get Soccer id - 29
soccer_id <- with(sport_data, id[name == 'Soccer'])
# Get league id
# Swedish allsvenska 1728
# English Premier league 1980
#with(GetLeaguesByID(29), leagues.id[grepl("england - premier league",tolower(leagues.name))])
eng_pl_id <- with(GetLeaguesByID(29), leagues.id[leagues.name=="England - Premier League"])
# Get eventid
# Get Odds
soccer_data <- showOddsDF(soccer_id, leagueids=1980 ,tableformat = "mainlines",oddsformat = "DECIMAL")
# Translating to our models output structure
soccer_data <- soccer_data[soccer_data$events.periods.number==0,]
structured_odds <- soccer_data[,c('leagues.name','events.periods.cutoff','league.events.home','league.events.away'
,'periods.moneyline.home','periods.moneyline.draw','periods.moneyline.away')]
# rename the columns
names(structured_odds) <- c('league','Date','HomeTeam','AwayTeam','P_H_odds','P_D_odds','P_A_odds')
# To get team names similar to already used teamnames in main script
p.names=c("Arsenal","Aston Villa","Birmingham","Blackburn","Blackpool","Bolton","Bournemouth","Bradford","Brighton and Hove Albion","Burnley","Cardiff City","Charlton","Chelsea","Coventry","Crystal Palace","Derby","Everton","Fulham","Huddersfield Town","Hull","Ipswich","Leeds","Leicester City","Liverpool","Manchester City","Manchester United","Middlesbrough","Newcastle United","Norwich","Portsmouth","QPR","Reading","Sheffield United","Southampton","Stoke","Sunderland","Swansea","Tottenham Hotspur","Watford","West Brom","West Ham United","Wigan","Wolverhampton Wanderers","Wimbledon")
team.names=c("Arsenal","AstonVilla","Birmingham","Blackburn","Blackpool","Bolton","Bournemouth","Bradford","Brighton","Burnley","Cardiff","Charlton","Chelsea","Coventry","CrystalPalace","Derby","Everton","Fulham","Huddersfield","Hull","Ipswich","Leeds","Leicester","Liverpool","ManCity","ManUnited","Middlesbrough","Newcastle","Norwich","Portsmouth","QPR","Reading","SheffieldUnited","Southampton","Stoke","Sunderland","Swansea","Tottenham","Watford","WestBrom","WestHam","Wigan","Wolves","Wimbledon")
for (i in 1:nrow(structured_odds)){
structured_odds$HomeTeam[i]=team.names[which(p.names==structured_odds$HomeTeam[i])]
structured_odds$AwayTeam[i]=team.names[which(p.names==structured_odds$AwayTeam[i])]
}
# Placing a bett (BE CAREFULL WITH THIS)
# periodNumber represents the period of the match. For example, for soccer we have 0 (Game), 1 (1st Half) & 2 (2nd Half)
# PlaceBet(
# stake = 100/4.85,
# sportId = 29,
# eventId = 901498168,
# periodNumber = 0,
# lineId = 598210299,
# betType = 'MONEYLINE',
# team = 'TEAM1'
# )
# take a look at kelly criterion for betting
return(structured_odds)
}
| /external_connected_scripts/pinnacle_betting.R | no_license | kaspersgit/Premier-League | R | false | false | 3,173 | r | give_pinnacle_odds <- function(country = 'ENG',pwd){
library(pinnacle.API)
library(httr)
library(jsonlite)
library(askpass)
AcceptTermsAndConditions(accepted = TRUE)
user <- "KD1049991"
#pwd <- askpass()
base <- "https://api.pinnaclesports.com/"
# using the api pinnacle package
SetCredentials(user, pwd)
# Get Sports
sport_data <- GetSports()
# Get Soccer id - 29
soccer_id <- with(sport_data, id[name == 'Soccer'])
# Get league id
# Swedish allsvenska 1728
# English Premier league 1980
#with(GetLeaguesByID(29), leagues.id[grepl("england - premier league",tolower(leagues.name))])
eng_pl_id <- with(GetLeaguesByID(29), leagues.id[leagues.name=="England - Premier League"])
# Get eventid
# Get Odds
soccer_data <- showOddsDF(soccer_id, leagueids=1980 ,tableformat = "mainlines",oddsformat = "DECIMAL")
# Translating to our models output structure
soccer_data <- soccer_data[soccer_data$events.periods.number==0,]
structured_odds <- soccer_data[,c('leagues.name','events.periods.cutoff','league.events.home','league.events.away'
,'periods.moneyline.home','periods.moneyline.draw','periods.moneyline.away')]
# rename the columns
names(structured_odds) <- c('league','Date','HomeTeam','AwayTeam','P_H_odds','P_D_odds','P_A_odds')
# To get team names similar to already used teamnames in main script
p.names=c("Arsenal","Aston Villa","Birmingham","Blackburn","Blackpool","Bolton","Bournemouth","Bradford","Brighton and Hove Albion","Burnley","Cardiff City","Charlton","Chelsea","Coventry","Crystal Palace","Derby","Everton","Fulham","Huddersfield Town","Hull","Ipswich","Leeds","Leicester City","Liverpool","Manchester City","Manchester United","Middlesbrough","Newcastle United","Norwich","Portsmouth","QPR","Reading","Sheffield United","Southampton","Stoke","Sunderland","Swansea","Tottenham Hotspur","Watford","West Brom","West Ham United","Wigan","Wolverhampton Wanderers","Wimbledon")
team.names=c("Arsenal","AstonVilla","Birmingham","Blackburn","Blackpool","Bolton","Bournemouth","Bradford","Brighton","Burnley","Cardiff","Charlton","Chelsea","Coventry","CrystalPalace","Derby","Everton","Fulham","Huddersfield","Hull","Ipswich","Leeds","Leicester","Liverpool","ManCity","ManUnited","Middlesbrough","Newcastle","Norwich","Portsmouth","QPR","Reading","SheffieldUnited","Southampton","Stoke","Sunderland","Swansea","Tottenham","Watford","WestBrom","WestHam","Wigan","Wolves","Wimbledon")
for (i in 1:nrow(structured_odds)){
structured_odds$HomeTeam[i]=team.names[which(p.names==structured_odds$HomeTeam[i])]
structured_odds$AwayTeam[i]=team.names[which(p.names==structured_odds$AwayTeam[i])]
}
# Placing a bett (BE CAREFULL WITH THIS)
# periodNumber represents the period of the match. For example, for soccer we have 0 (Game), 1 (1st Half) & 2 (2nd Half)
# PlaceBet(
# stake = 100/4.85,
# sportId = 29,
# eventId = 901498168,
# periodNumber = 0,
# lineId = 598210299,
# betType = 'MONEYLINE',
# team = 'TEAM1'
# )
# take a look at kelly criterion for betting
return(structured_odds)
}
|
library(ggplot2)
head(mpg)
#basic distribution plot
ggplot(data = mpg) + geom_point(aes(x = displ, y = hwy, color = class))
#facet wraps
ggplot(mpg) +
geom_point(aes(displ, hwy)) +
facet_wrap(~class, nrow=7)
#smooth line
ggplot(mpg) +
geom_smooth(aes(displ, hwy))
#line per class using linetype
ggplot(mpg) +
geom_smooth(aes(displ,hwy, linetype = drv))
#line per class using color
ggplot(mpg) +
geom_smooth(aes(displ,hwy, color = drv)) | /info_3010/Homeworks/practice_w10/practice_w10_ark0174.R | no_license | Silicon-beep/UNT_coursework | R | false | false | 452 | r | library(ggplot2)
head(mpg)
#basic distribution plot
ggplot(data = mpg) + geom_point(aes(x = displ, y = hwy, color = class))
#facet wraps
ggplot(mpg) +
geom_point(aes(displ, hwy)) +
facet_wrap(~class, nrow=7)
#smooth line
ggplot(mpg) +
geom_smooth(aes(displ, hwy))
#line per class using linetype
ggplot(mpg) +
geom_smooth(aes(displ,hwy, linetype = drv))
#line per class using color
ggplot(mpg) +
geom_smooth(aes(displ,hwy, color = drv)) |
# Copyright 2018 Observational Health Data Sciences and Informatics
#
# This file is part of CohortMethodAceiVsThz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Prepare results for the Evidence Explorer Shiny app.
#'
#' @param resultsZipFile Path to a zip file containing results from a study executed by this package.
#' @param dataFolder A folder where the data files for the Evidence Explorer app will be stored.
#'
#' @examples
#'
#' \dontrun{
#' # Add results from three databases to the Shiny app data folder:
#' prepareForEvidenceExplorer("ResultsMDCD.zip", "/shinyData")
#' prepareForEvidenceExplorer("ResultsMDCR.zip", "/shinyData")
#' prepareForEvidenceExplorer("ResultsCCAE.zip", "/shinyData")
#'
#' # Launch the Shiny app:
#' launchEvidenceExplorer("/shinyData")
#' }
#'
#' @export
prepareForEvidenceExplorer <- function(resultsZipFile, dataFolder) {
# resultsZipFile <- "c:/temp/ResultsMDCD.zip"
# dataFolder <- "c:/temp/shinyData"
if (!file.exists(dataFolder)) {
dir.create(dataFolder, recursive = TRUE)
}
tempFolder <- tempdir()
on.exit(unlink(tempFolder, recursive = TRUE))
utils::unzip(resultsZipFile, exdir = tempFolder)
databaseFileName <- file.path(tempFolder, "database.csv")
if (!file.exists(databaseFileName)) {
stop("Cannot find file database.csv in zip file")
}
databaseId <- read.csv(databaseFileName, stringsAsFactors = FALSE)$database_id
splittableTables <- c("covariate_balance", "preference_score_dist", "kaplan_meier_dist")
processSubet <- function(subset, tableName) {
targetId <- subset$target_id[1]
comparatorId <- subset$comparator_id[1]
fileName <- sprintf("%s_t%s_c%s_%s.rds", tableName, targetId, comparatorId, databaseId)
saveRDS(subset, file.path(dataFolder, fileName))
}
processFile <- function(file) {
tableName <- gsub(".csv$", "", file)
table <- read.csv(file.path(tempFolder, file))
if (tableName %in% splittableTables) {
subsets <- split(table, list(table$target_id, table$comparator_id))
plyr::l_ply(subsets, processSubet, tableName = tableName)
} else {
saveRDS(table, file.path(dataFolder, sprintf("%s_%s.rds", tableName, databaseId)))
}
}
files <- list.files(tempFolder, ".*.csv")
plyr::l_ply(files, processFile, .progress = "text")
}
#' Launch the SqlRender Developer Shiny app
#'
#' @param dataFolder A folder where the data files for the Evidence Explorer app will be stored. Use the
#' \code{\link{prepareForEvidenceExplorer}} to populate this folder.
#' @param blind Should the user be blinded to the main results?
#' @param launch.browser Should the app be launched in your default browser, or in a Shiny window.
#' Note: copying to clipboard will not work in a Shiny window.
#'
#' @details
#' Launches a Shiny app that allows the user to explore the evidence
#'
#' @export
launchEvidenceExplorer <- function(dataFolder, blind = TRUE, launch.browser = TRUE) {
ensure_installed("DT")
appDir <- system.file("shiny", "EvidenceExplorer", package = "CohortMethodAceiVsThz")
.GlobalEnv$shinySettings <- list(dataFolder = dataFolder, blind = blind)
on.exit(rm(shinySettings, envir=.GlobalEnv))
shiny::runApp(appDir)
}
# Borrowed from devtools: https://github.com/hadley/devtools/blob/ba7a5a4abd8258c52cb156e7b26bb4bf47a79f0b/R/utils.r#L44
is_installed <- function (pkg, version = 0) {
installed_version <- tryCatch(utils::packageVersion(pkg),
error = function(e) NA)
!is.na(installed_version) && installed_version >= version
}
# Borrowed and adapted from devtools: https://github.com/hadley/devtools/blob/ba7a5a4abd8258c52cb156e7b26bb4bf47a79f0b/R/utils.r#L74
ensure_installed <- function(pkg) {
if (!is_installed(pkg)) {
msg <- paste0(sQuote(pkg), " must be installed for this functionality.")
if (interactive()) {
message(msg, "\nWould you like to install it?")
if (menu(c("Yes", "No")) == 1) {
install.packages(pkg)
} else {
stop(msg, call. = FALSE)
}
} else {
stop(msg, call. = FALSE)
}
}
}
| /extras/CohortMethodAceiVsThz/R/ShinyApps.R | permissive | mokjpn/TheBookOfOhdsi | R | false | false | 4,647 | r | # Copyright 2018 Observational Health Data Sciences and Informatics
#
# This file is part of CohortMethodAceiVsThz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Prepare results for the Evidence Explorer Shiny app.
#'
#' @param resultsZipFile Path to a zip file containing results from a study executed by this package.
#' @param dataFolder A folder where the data files for the Evidence Explorer app will be stored.
#'
#' @examples
#'
#' \dontrun{
#' # Add results from three databases to the Shiny app data folder:
#' prepareForEvidenceExplorer("ResultsMDCD.zip", "/shinyData")
#' prepareForEvidenceExplorer("ResultsMDCR.zip", "/shinyData")
#' prepareForEvidenceExplorer("ResultsCCAE.zip", "/shinyData")
#'
#' # Launch the Shiny app:
#' launchEvidenceExplorer("/shinyData")
#' }
#'
#' @export
prepareForEvidenceExplorer <- function(resultsZipFile, dataFolder) {
# resultsZipFile <- "c:/temp/ResultsMDCD.zip"
# dataFolder <- "c:/temp/shinyData"
if (!file.exists(dataFolder)) {
dir.create(dataFolder, recursive = TRUE)
}
tempFolder <- tempdir()
on.exit(unlink(tempFolder, recursive = TRUE))
utils::unzip(resultsZipFile, exdir = tempFolder)
databaseFileName <- file.path(tempFolder, "database.csv")
if (!file.exists(databaseFileName)) {
stop("Cannot find file database.csv in zip file")
}
databaseId <- read.csv(databaseFileName, stringsAsFactors = FALSE)$database_id
splittableTables <- c("covariate_balance", "preference_score_dist", "kaplan_meier_dist")
processSubet <- function(subset, tableName) {
targetId <- subset$target_id[1]
comparatorId <- subset$comparator_id[1]
fileName <- sprintf("%s_t%s_c%s_%s.rds", tableName, targetId, comparatorId, databaseId)
saveRDS(subset, file.path(dataFolder, fileName))
}
processFile <- function(file) {
tableName <- gsub(".csv$", "", file)
table <- read.csv(file.path(tempFolder, file))
if (tableName %in% splittableTables) {
subsets <- split(table, list(table$target_id, table$comparator_id))
plyr::l_ply(subsets, processSubet, tableName = tableName)
} else {
saveRDS(table, file.path(dataFolder, sprintf("%s_%s.rds", tableName, databaseId)))
}
}
files <- list.files(tempFolder, ".*.csv")
plyr::l_ply(files, processFile, .progress = "text")
}
#' Launch the SqlRender Developer Shiny app
#'
#' @param dataFolder A folder where the data files for the Evidence Explorer app will be stored. Use the
#' \code{\link{prepareForEvidenceExplorer}} to populate this folder.
#' @param blind Should the user be blinded to the main results?
#' @param launch.browser Should the app be launched in your default browser, or in a Shiny window.
#' Note: copying to clipboard will not work in a Shiny window.
#'
#' @details
#' Launches a Shiny app that allows the user to explore the evidence
#'
#' @export
launchEvidenceExplorer <- function(dataFolder, blind = TRUE, launch.browser = TRUE) {
ensure_installed("DT")
appDir <- system.file("shiny", "EvidenceExplorer", package = "CohortMethodAceiVsThz")
.GlobalEnv$shinySettings <- list(dataFolder = dataFolder, blind = blind)
on.exit(rm(shinySettings, envir=.GlobalEnv))
shiny::runApp(appDir)
}
# Borrowed from devtools: https://github.com/hadley/devtools/blob/ba7a5a4abd8258c52cb156e7b26bb4bf47a79f0b/R/utils.r#L44
is_installed <- function (pkg, version = 0) {
installed_version <- tryCatch(utils::packageVersion(pkg),
error = function(e) NA)
!is.na(installed_version) && installed_version >= version
}
# Borrowed and adapted from devtools: https://github.com/hadley/devtools/blob/ba7a5a4abd8258c52cb156e7b26bb4bf47a79f0b/R/utils.r#L74
ensure_installed <- function(pkg) {
if (!is_installed(pkg)) {
msg <- paste0(sQuote(pkg), " must be installed for this functionality.")
if (interactive()) {
message(msg, "\nWould you like to install it?")
if (menu(c("Yes", "No")) == 1) {
install.packages(pkg)
} else {
stop(msg, call. = FALSE)
}
} else {
stop(msg, call. = FALSE)
}
}
}
|
#!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
if (length(args)<2) {
stop("At least 2 argument must be supplied.\n", call.=FALSE)
}
RegulomFolder <- "/home/studerf/mnt/Zone4/rnalizer/SCNorm/results/Ref.H9_SC.Orga3B_lfc.1_pval.0.01_Norm.Mean/SingleCell/ResultTetra/Reguloms"
outdir <- "/home/studerf/mnt/Zone2/Pipeline/SCNorm/results/Ref.H9_SC.Orga1A_lfc.1_pval.0.1_Norm.Mean/SingleCell/pval.0.05_lfc.1/ResultTetra/"
RegulomFolder <- args[1]
outdir <- args[2]
dir.create(outdir, showWarnings = FALSE)
dir.create(file.path(outdir,"RegulomListGenes"), showWarnings = FALSE)
listfileRegulom <- list.files(path = RegulomFolder, pattern=".tsv")
i <- 1
while (i <= length(listfileRegulom)){
result <- read.table(file.path(RegulomFolder,listfileRegulom[i]), header = TRUE, sep = "\t")
Col1 <- toupper(result[,1])
Col2 <- toupper(result[,2])
if (nrow(result) > 0){
Col <- c(Col1,Col2)
ListGenes <- matrix(data = Col,ncol = 1)
colnames(ListGenes) <- c("Node")
ListGenes <- ListGenes[!duplicated(ListGenes[,1,drop=FALSE]),1,drop=FALSE]
write.table(ListGenes, file = file.path(file.path(outdir,"RegulomListGenes"),paste("ListGenesRegu.",substr(listfileRegulom[i],17,nchar(listfileRegulom[i])-4),".tsv", sep = "")),row.names = FALSE, quote = FALSE, sep='\t')
}
i <- i+1
} | /rnalizer/Tetramer/convertnetworktolist.R | no_license | studyfranco/GNEA | R | false | false | 1,311 | r | #!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
if (length(args)<2) {
stop("At least 2 argument must be supplied.\n", call.=FALSE)
}
RegulomFolder <- "/home/studerf/mnt/Zone4/rnalizer/SCNorm/results/Ref.H9_SC.Orga3B_lfc.1_pval.0.01_Norm.Mean/SingleCell/ResultTetra/Reguloms"
outdir <- "/home/studerf/mnt/Zone2/Pipeline/SCNorm/results/Ref.H9_SC.Orga1A_lfc.1_pval.0.1_Norm.Mean/SingleCell/pval.0.05_lfc.1/ResultTetra/"
RegulomFolder <- args[1]
outdir <- args[2]
dir.create(outdir, showWarnings = FALSE)
dir.create(file.path(outdir,"RegulomListGenes"), showWarnings = FALSE)
listfileRegulom <- list.files(path = RegulomFolder, pattern=".tsv")
i <- 1
while (i <= length(listfileRegulom)){
result <- read.table(file.path(RegulomFolder,listfileRegulom[i]), header = TRUE, sep = "\t")
Col1 <- toupper(result[,1])
Col2 <- toupper(result[,2])
if (nrow(result) > 0){
Col <- c(Col1,Col2)
ListGenes <- matrix(data = Col,ncol = 1)
colnames(ListGenes) <- c("Node")
ListGenes <- ListGenes[!duplicated(ListGenes[,1,drop=FALSE]),1,drop=FALSE]
write.table(ListGenes, file = file.path(file.path(outdir,"RegulomListGenes"),paste("ListGenesRegu.",substr(listfileRegulom[i],17,nchar(listfileRegulom[i])-4),".tsv", sep = "")),row.names = FALSE, quote = FALSE, sep='\t')
}
i <- i+1
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nnf-activation.R
\name{nnf_log_softmax}
\alias{nnf_log_softmax}
\title{Log_softmax}
\usage{
nnf_log_softmax(input, dim = NULL, dtype = NULL)
}
\arguments{
\item{input}{(Tensor) input}
\item{dim}{(int) A dimension along which log_softmax will be computed.}
\item{dtype}{(\code{torch.dtype}, optional) the desired data type of returned tensor.
If specified, the input tensor is casted to \code{dtype} before the operation
is performed. This is useful for preventing data type overflows.
Default: \code{NULL}.}
}
\description{
Applies a softmax followed by a logarithm.
}
\details{
While mathematically equivalent to log(softmax(x)), doing these two
operations separately is slower, and numerically unstable. This function
uses an alternative formulation to compute the output and gradient correctly.
}
| /man/nnf_log_softmax.Rd | permissive | mlverse/torch | R | false | true | 880 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nnf-activation.R
\name{nnf_log_softmax}
\alias{nnf_log_softmax}
\title{Log_softmax}
\usage{
nnf_log_softmax(input, dim = NULL, dtype = NULL)
}
\arguments{
\item{input}{(Tensor) input}
\item{dim}{(int) A dimension along which log_softmax will be computed.}
\item{dtype}{(\code{torch.dtype}, optional) the desired data type of returned tensor.
If specified, the input tensor is casted to \code{dtype} before the operation
is performed. This is useful for preventing data type overflows.
Default: \code{NULL}.}
}
\description{
Applies a softmax followed by a logarithm.
}
\details{
While mathematically equivalent to log(softmax(x)), doing these two
operations separately is slower, and numerically unstable. This function
uses an alternative formulation to compute the output and gradient correctly.
}
|
#!/usr/bin/Rscript
library(biomaRt)
x <- read.table("merged_reports.txt", sep="\t", header=T)
mart <- useMart(biomart = "ensembl", dataset = "hsapiens_gene_ensembl")
results <- getBM(attributes=c("entrezgene_description", "hgnc_symbol"), mart=mart)
colnames(results) <- c("Description", "Parent_Gene")
# allow for missing descriptions! Crucial to capture lncRNAs
x <- merge(x, results, by="Parent_Gene", all.x=T)
x <- subset(x, select=c(circRNA_ID, Type, Mature_Length, Parent_Gene, Strand, Log2FC, pvalue, Adjusted_pvalue, Description))
up <- subset(x, x$Log2FC > 0)
down <- subset(x, x$Log2FC < 0)
up <- up[order(abs(up$Log2FC), decreasing=T),]
down <- down[order(abs(down$Log2FC), decreasing=T),]
write.table(up, "Up_Regulated_circRNAs.txt", sep="\t", quote=F, row.names=F)
write.table(down, "Down_Regulated_circRNAs.txt", sep="\t", quote=F, row.names=F)
write.table(x, "DE_circRNAs.txt", sep="\t", quote=F, row.names=F)
| /bin/annotate_report.R | permissive | BarryDigby/circrna_original | R | false | false | 931 | r | #!/usr/bin/Rscript
library(biomaRt)
x <- read.table("merged_reports.txt", sep="\t", header=T)
mart <- useMart(biomart = "ensembl", dataset = "hsapiens_gene_ensembl")
results <- getBM(attributes=c("entrezgene_description", "hgnc_symbol"), mart=mart)
colnames(results) <- c("Description", "Parent_Gene")
# allow for missing descriptions! Crucial to capture lncRNAs
x <- merge(x, results, by="Parent_Gene", all.x=T)
x <- subset(x, select=c(circRNA_ID, Type, Mature_Length, Parent_Gene, Strand, Log2FC, pvalue, Adjusted_pvalue, Description))
up <- subset(x, x$Log2FC > 0)
down <- subset(x, x$Log2FC < 0)
up <- up[order(abs(up$Log2FC), decreasing=T),]
down <- down[order(abs(down$Log2FC), decreasing=T),]
write.table(up, "Up_Regulated_circRNAs.txt", sep="\t", quote=F, row.names=F)
write.table(down, "Down_Regulated_circRNAs.txt", sep="\t", quote=F, row.names=F)
write.table(x, "DE_circRNAs.txt", sep="\t", quote=F, row.names=F)
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "transplant")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass")
lrn = makeLearner("classif.OneR", par.vals = list(B = 6), predict.type = "prob")
#:# hash
#:# 9d291e09f218b4b2aaa053eaa987496b
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
| /models/openml_transplant/classification_binaryClass/9d291e09f218b4b2aaa053eaa987496b/code.R | no_license | pysiakk/CaseStudies2019S | R | false | false | 693 | r | #:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "transplant")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass")
lrn = makeLearner("classif.OneR", par.vals = list(B = 6), predict.type = "prob")
#:# hash
#:# 9d291e09f218b4b2aaa053eaa987496b
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
####################################################################################
#Collects all SNPs within a gene region, their rsids and positions
#Input: Gene name and upstream/downstream distance
#Output: Info of all SNPs in the region, as a data frame
####################################################################################
library(RSQLite)
library(dplyr)
library(dbplyr)
library(biomaRt)
#Get all SNPs in a gene region, upstream_dist and downstream_dist from the start site and stop site of the gene respectively.
GetFlankingSNPs <- function(chr,gene_position_start,gene_position_end,upstream_dist,downstream_dist){
#Connect to annotation database
anno_sql_name<- "all_snp_stats.sqlite"
path <- '~/bsu_scratch/SQL/'
setwd(path)
anno_con <- RSQLite::dbConnect(SQLite(), dbname = anno_sql_name)
anno_db <- tbl(anno_con,'all_snp_stats')
#Start of flanking region
start_of_region <- gene_position_start - upstream_dist
#End of flanking region
end_of_region <- gene_position_end + downstream_dist
snps_within_region <- dplyr::filter(anno_db,as.numeric(chromosome)==chr & as.numeric(position) >= start_of_region & as.numeric(position) <= end_of_region) %>% dplyr::select(chromosome,position,rsid,minor_allele_frequency,info,alleleA,alleleB,alleleA_frequency,alleleB_frequency) %>% collect()
RSQLite::dbDisconnect(anno_con)
return(snps_within_region)
}
#Get genomic position, given a gene name.
GetGenePosition <- function(gene_name){
#UKB data is based on the GRCh37 reference genome. All SNP positions are based on this.
ensembl = useEnsembl(biomart="ensembl", dataset="hsapiens_gene_ensembl", GRCh=37)
#Get gene chr and position information
gene_pos <- getBM(attributes=c('ensembl_gene_id','hgnc_symbol','chromosome_name','start_position','end_position'), filters = 'hgnc_symbol', values =gene_name, mart = ensembl)
return(gene_pos)
}
#Main function, calls GetGenePosition() to get genomic position,
#then calls GetFlankingSNPs() to get all SNPs in the gene region
AllSNPsOfGene <- function(gene_name,upstream_dist,downstream_dist){
print('Getting Gene Pos')
gene_pos <- GetGenePosition(gene_name)
print('Getting Flanking SNPs')
flanking_snps <- GetFlankingSNPs(gene_pos$chromosome_name,gene_pos$start_position,gene_pos$end_position,upstream_dist,downstream_dist)
return(flanking_snps)
} | /Gene_Phenotype_Association/GetSNPsOfGene.R | no_license | zmx21/polyresponse | R | false | false | 2,364 | r | ####################################################################################
#Collects all SNPs within a gene region, their rsids and positions
#Input: Gene name and upstream/downstream distance
#Output: Info of all SNPs in the region, as a data frame
####################################################################################
library(RSQLite)
library(dplyr)
library(dbplyr)
library(biomaRt)
#Get all SNPs in a gene region, upstream_dist and downstream_dist from the start site and stop site of the gene respectively.
GetFlankingSNPs <- function(chr,gene_position_start,gene_position_end,upstream_dist,downstream_dist){
#Connect to annotation database
anno_sql_name<- "all_snp_stats.sqlite"
path <- '~/bsu_scratch/SQL/'
setwd(path)
anno_con <- RSQLite::dbConnect(SQLite(), dbname = anno_sql_name)
anno_db <- tbl(anno_con,'all_snp_stats')
#Start of flanking region
start_of_region <- gene_position_start - upstream_dist
#End of flanking region
end_of_region <- gene_position_end + downstream_dist
snps_within_region <- dplyr::filter(anno_db,as.numeric(chromosome)==chr & as.numeric(position) >= start_of_region & as.numeric(position) <= end_of_region) %>% dplyr::select(chromosome,position,rsid,minor_allele_frequency,info,alleleA,alleleB,alleleA_frequency,alleleB_frequency) %>% collect()
RSQLite::dbDisconnect(anno_con)
return(snps_within_region)
}
#Get genomic position, given a gene name.
GetGenePosition <- function(gene_name){
#UKB data is based on the GRCh37 reference genome. All SNP positions are based on this.
ensembl = useEnsembl(biomart="ensembl", dataset="hsapiens_gene_ensembl", GRCh=37)
#Get gene chr and position information
gene_pos <- getBM(attributes=c('ensembl_gene_id','hgnc_symbol','chromosome_name','start_position','end_position'), filters = 'hgnc_symbol', values =gene_name, mart = ensembl)
return(gene_pos)
}
#Main function, calls GetGenePosition() to get genomic position,
#then calls GetFlankingSNPs() to get all SNPs in the gene region
AllSNPsOfGene <- function(gene_name,upstream_dist,downstream_dist){
print('Getting Gene Pos')
gene_pos <- GetGenePosition(gene_name)
print('Getting Flanking SNPs')
flanking_snps <- GetFlankingSNPs(gene_pos$chromosome_name,gene_pos$start_position,gene_pos$end_position,upstream_dist,downstream_dist)
return(flanking_snps)
} |
library(ape)
testtree <- read.tree("3484_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="3484_0_unrooted.txt") | /codeml_files/newick_trees_processed/3484_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("3484_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="3484_0_unrooted.txt") |
library(mlogitBMA)
test.specification <- function() {
spec <- mnl.spec(choice ~ fuel + price + cost | hsg2, Car,
varying=5:ncol(Car), sep='')
stopifnot(all(dim(spec$variable.used) == c(6,4)))
stopifnot(all(spec$varying.names == c('fuel', 'price', 'cost')))
stopifnot(sum(spec$same.coefs) == 3 & !spec$same.coefs['hsg2'])
stopifnot(sum(spec$intercepts)==5 & !spec$intercepts[1])
cat('\nSpecification test OK.\n')
}
test.estimation.car <- function() {
cat('\nRunning test for MNL estimation on Car dataset ... ')
est <- estimate.mlogit(choice ~ price + cost | coml5, Car,
varying=5:ncol(Car), sep='')
sest <- summary(est)
stopifnot(all(dim(sest$coefs) == c(12, 4)))
stopifnot(sest$lratio > 0.1)
cat(' OK.\n')
}
test.estimation.catsup <- function() {
cat('\nRunning test for MNL estimation on Catsup dataset ... ')
est <- estimate.mlogit(choice ~ disp + feat + price, Catsup,
varying=2:(ncol(Catsup)-1), sep='.')
sest <- summary(est)
stopifnot(all(dim(sest$coefs) == c(6, 4)))
stopifnot(sest$lratio > 0.3)
stopifnot(sest$bic > 5083)
print(sest)
cat(' OK.\n')
}
test.bic.mlogit.car <- function() {
cat('\nRunning test for BMA on Car dataset ... ')
res <- bic.mlogit(choice ~ price + cost + speed + acc + size | hsg2, Car,
varying=5:ncol(Car), sep='', include.intercepts = FALSE,
verbose = TRUE)
stopifnot(all(dim(res$bic.glm$which)==c(1,14))) # 1 model selected, 14 variables in total
cat('... BMA test OK.\n')
}
test.bic.mlogit.catsup <- function() {
cat('\nRunning test for BMA on Catsup dataset ... ')
res <- bic.mlogit(choice ~ 1 | disp + feat + price, Catsup,
varying=2:(ncol(Catsup)-1), sep='.',
base.choice = 4,
include.intercepts = FALSE,
verbose = TRUE)
summary(res)
stopifnot(all(dim(res$bic.glm$which)==c(2,11))) # 2 models selected, 11 variables in total
cat('... BMA test OK.\n')
est.res <- estimate.mlogit(res, Catsup)
stopifnot(length(est.res)==2)
stopifnot(all(dim(est.res[[1]]$coefs) == c(12, 4)))
stopifnot(all(dim(est.res[[2]]$coefs) == c(11, 4)))
cat('Estimation on the BMA object OK.\n')
}
# load data
data('Car', package='mlogit')
# convert the choice column into a numerical code,
# since it is the way the alternative-specific variables are constructed
Car[,'choice'] <- as.integer(gsub('^choice', '', Car[,'choice']))
data('Catsup', package='mlogit')
test.specification()
test.estimation.car()
test.estimation.catsup()
test.bic.mlogit.car()
# turn off to speed the tests up
#test.bic.mlogit.catsup() | /tests/test_functions.R | no_license | cran/mlogitBMA | R | false | false | 2,524 | r | library(mlogitBMA)
test.specification <- function() {
spec <- mnl.spec(choice ~ fuel + price + cost | hsg2, Car,
varying=5:ncol(Car), sep='')
stopifnot(all(dim(spec$variable.used) == c(6,4)))
stopifnot(all(spec$varying.names == c('fuel', 'price', 'cost')))
stopifnot(sum(spec$same.coefs) == 3 & !spec$same.coefs['hsg2'])
stopifnot(sum(spec$intercepts)==5 & !spec$intercepts[1])
cat('\nSpecification test OK.\n')
}
test.estimation.car <- function() {
cat('\nRunning test for MNL estimation on Car dataset ... ')
est <- estimate.mlogit(choice ~ price + cost | coml5, Car,
varying=5:ncol(Car), sep='')
sest <- summary(est)
stopifnot(all(dim(sest$coefs) == c(12, 4)))
stopifnot(sest$lratio > 0.1)
cat(' OK.\n')
}
test.estimation.catsup <- function() {
cat('\nRunning test for MNL estimation on Catsup dataset ... ')
est <- estimate.mlogit(choice ~ disp + feat + price, Catsup,
varying=2:(ncol(Catsup)-1), sep='.')
sest <- summary(est)
stopifnot(all(dim(sest$coefs) == c(6, 4)))
stopifnot(sest$lratio > 0.3)
stopifnot(sest$bic > 5083)
print(sest)
cat(' OK.\n')
}
test.bic.mlogit.car <- function() {
cat('\nRunning test for BMA on Car dataset ... ')
res <- bic.mlogit(choice ~ price + cost + speed + acc + size | hsg2, Car,
varying=5:ncol(Car), sep='', include.intercepts = FALSE,
verbose = TRUE)
stopifnot(all(dim(res$bic.glm$which)==c(1,14))) # 1 model selected, 14 variables in total
cat('... BMA test OK.\n')
}
test.bic.mlogit.catsup <- function() {
cat('\nRunning test for BMA on Catsup dataset ... ')
res <- bic.mlogit(choice ~ 1 | disp + feat + price, Catsup,
varying=2:(ncol(Catsup)-1), sep='.',
base.choice = 4,
include.intercepts = FALSE,
verbose = TRUE)
summary(res)
stopifnot(all(dim(res$bic.glm$which)==c(2,11))) # 2 models selected, 11 variables in total
cat('... BMA test OK.\n')
est.res <- estimate.mlogit(res, Catsup)
stopifnot(length(est.res)==2)
stopifnot(all(dim(est.res[[1]]$coefs) == c(12, 4)))
stopifnot(all(dim(est.res[[2]]$coefs) == c(11, 4)))
cat('Estimation on the BMA object OK.\n')
}
# load data
data('Car', package='mlogit')
# convert the choice column into a numerical code,
# since it is the way the alternative-specific variables are constructed
Car[,'choice'] <- as.integer(gsub('^choice', '', Car[,'choice']))
data('Catsup', package='mlogit')
test.specification()
test.estimation.car()
test.estimation.catsup()
test.bic.mlogit.car()
# turn off to speed the tests up
#test.bic.mlogit.catsup() |
source("~/Dropbox/Chido/convBase.R")
source("~/Dropbox/Chido/obtAtrGenIte.R")
source("~/Dropbox/Chido/plotAtr.R")
#Definiendo sistema
nodos <-c("Temperatura", "Presion", "Precipitacion", "Herbivoros", "Depredadores", "MaizG", "FrijolEG", "CalabazaG", "Maiz", "FrijolE", "Calabaza", "MaizJ", "CalabazaJ", "Polinizadores", "FloresNoQuelites", "NoQuelites", "FloresQuelites", "Quelites", "FloresBorde", "Borde", "Desyerbe", "Herbicida", "Plaguicida")
#Matriz de adyacencia
matAdya<-matrix(0,length(nodos),length(nodos))
rownames(matAdya)<-nodos
colnames(matAdya)<-nodos
#Definiendo parametros globales
n <-length(nodos)
valu=2
no=3000
manejo="Roundup"
diversidad="mzfre"
perturbacion="herbivoros"
nivel="1212"
#Ciclo de simulaciones
lista.repe <-list()
set.seed(1212)
rep=100
seed.rep <-sample(runif(100000,0,999999),rep)
for(r in 1:rep){
set.seed(seed.rep[r])
p <-sample(0:(valu^n-1),no,1/valu^n)
#p <-seq(0,valu^length(nodos)-1,1)#Exploracion exhaustiva
ei <-convBase(valu,nodos,p)
rownames(ei) <-nodos
lista.repe[[r]] <-obtAtr(ei,nodos,100,manejo,diversidad,perturbacion,nivel)
}
save(lista.repe,file=paste0("~/Dropbox/Chido/",perturbacion,"/",manejo,"/",diversidad,"/",diversidad,"_",manejo,"_",perturbacion,"_",nivel,".RData"))
| /herbivoros/mzfre_Roundup_herbivoros_1212.R | no_license | laparcela/modelo_red_booleana_milpa_rafa | R | false | false | 1,243 | r | source("~/Dropbox/Chido/convBase.R")
source("~/Dropbox/Chido/obtAtrGenIte.R")
source("~/Dropbox/Chido/plotAtr.R")
#Definiendo sistema
nodos <-c("Temperatura", "Presion", "Precipitacion", "Herbivoros", "Depredadores", "MaizG", "FrijolEG", "CalabazaG", "Maiz", "FrijolE", "Calabaza", "MaizJ", "CalabazaJ", "Polinizadores", "FloresNoQuelites", "NoQuelites", "FloresQuelites", "Quelites", "FloresBorde", "Borde", "Desyerbe", "Herbicida", "Plaguicida")
#Matriz de adyacencia
matAdya<-matrix(0,length(nodos),length(nodos))
rownames(matAdya)<-nodos
colnames(matAdya)<-nodos
#Definiendo parametros globales
n <-length(nodos)
valu=2
no=3000
manejo="Roundup"
diversidad="mzfre"
perturbacion="herbivoros"
nivel="1212"
#Ciclo de simulaciones
lista.repe <-list()
set.seed(1212)
rep=100
seed.rep <-sample(runif(100000,0,999999),rep)
for(r in 1:rep){
set.seed(seed.rep[r])
p <-sample(0:(valu^n-1),no,1/valu^n)
#p <-seq(0,valu^length(nodos)-1,1)#Exploracion exhaustiva
ei <-convBase(valu,nodos,p)
rownames(ei) <-nodos
lista.repe[[r]] <-obtAtr(ei,nodos,100,manejo,diversidad,perturbacion,nivel)
}
save(lista.repe,file=paste0("~/Dropbox/Chido/",perturbacion,"/",manejo,"/",diversidad,"/",diversidad,"_",manejo,"_",perturbacion,"_",nivel,".RData"))
|
testlist <- list(m = NULL, repetitions = -304992360L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146), .Dim = c(3L, 1L )))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615786855-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 254 | r | testlist <- list(m = NULL, repetitions = -304992360L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146), .Dim = c(3L, 1L )))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
\name{basic.stats}
\alias{basic.stats}
\alias{Hs}
\alias{Ho}
\alias{print.basic.stats}
\title{Basic diversity and differentiation statistics}
\description{Estimates individual counts, allelic frequencies, observed heterozygosities and genetic diversities per locus and population.
Also Estimates mean observed heterozygosities, mean gene diversities within population Hs, Gene diversities overall Ht and corrected Htp, and Dst, Dstp.
Finally, estimates Fst and Fstp as well as Fis following Nei (1987) per locus and overall loci}
\usage{basic.stats(data,diploid=TRUE,digits=4)
\method{print}{basic.stats}(x,...)
Hs(data,...)
Ho(data,...)
}
\arguments{
\item{data}{a data frame where the first column contains the population to which the different individuals belong, and the following columns contain the genotype of the individuals -one locus per column- }
\item{diploid}{Whether individuals are diploids (default) or haploids}
\item{digits}{how many digits to print out in the output (default is 4)}
\item{x}{an object of class basic.stats}
\item{...}{further arguments to pass to print.bas.stats}
}
\value{
\item{n.ind.samp}{A table --with np (number of populations) columns and nl (number of loci) rows-- of genotype counts}
\item{pop.freq}{A list containing allele frequencies. Each element of the list is one locus.
For each locus, Populations are in columns and alleles in rows}
\item{Ho}{A table --with np (number of populations) columns and nl (number of loci) rows-- of observed heterozygosities}
\item{Hs}{A table --with np (number of populations) columns and nl (number of loci) rows-- of observed gene diversities}
\item{Fis}{A table --with np (number of populations) columns and nl (number of loci) rows--of observed Fis}
\item{perloc}{A table --with as many rows as loci-- containing basic statistics Ho, Hs, Ht, Dst, Ht', Dst', Fst, Fst' ,Fis, Dest}
\item{overall}{Basic statistics averaged over loci}
}
\references{
Nei M. (1987) Molecular Evolutionary Genetics. Columbia University Press
Jost L (2008) GST and its relatives do not measure differentiation.
Molecular Ecology, 17, 4015-4026.
Nei M, Chesser R (1983) Estimation of fixation indexes and gene diversities.
Annals of Human Genetics, 47, 253-259.
}
\author{Jerome Goudet \email{jerome.goudet@unil.ch}}
\seealso{\code{\link{ind.count}},\code{\link{pop.freq}}.}
\note{
For the perloc and overall tables (see value section), the following statistics, defined in eq.7.38-- 7.43 pp.164--5 of Nei (1987)
are estimated:
The observed heterozygosity
\deqn{Ho= 1-\sum_k \sum_i Pkii/np,}
where \eqn{Pkii} represents the proportion of homozygote \eqn{i} in sample \eqn{k} and
\eqn{np} the number of samples.
The within population gene diversity (sometimes misleadingly called expected
heterozygosity):
\deqn{Hs=\tilde{n}/(\tilde{n}-1)[1-\sum_i\bar{p_i^2}-Ho/2\tilde{n}],
}
where \eqn{\tilde{n}=np/\sum_k 1/n_k} and
\eqn{\bar{p_i^2}=\sum_k p_{ki}^2/np}
The overall gene diversity
\deqn{
Ht= 1-\sum_i\bar{p_i}^2+Hs/(\tilde{n} np)-Ho/(2\tilde{n}
np),}
where \eqn{\bar{p_i}=\sum_kp_{ki}/np}.
The amount of gene diversity among samples \eqn{Dst=Ht-Hs}
\eqn{Dst'=np/(np-1)Dst}
\eqn{Ht'=Hs+Dst'}
\eqn{Fst=Dst/Ht}.(This is not the same as Nei's \eqn{Gst},
Nei's \eqn{Gst} is an estimator of \eqn{Fst} based on allele frequencies only)
\eqn{Fst'=Dst'/Ht'}
\eqn{Fis=1-Ho/Hs}
Last, \eqn{Dest=np/(np-1) (Ht'-Hs)/(1-Hs)} a measure of population
differentiation as defined by Jost (2008) is also given
Here, the \eqn{p_{ki}} are unweighted by sample size. These statistics are
estimated for each locus and an overall loci estimates is also given, as the
unweighted average of the per locus estimates. In this way, monomorphic loci
are accounted for (with estimated value of 0) in the overall estimates.
Note that the equations used here all rely on genotypic rather than allelic
number and are corrected for heterozygosity.
}
\examples{
data(gtrunchier)
basic.stats(gtrunchier[,-1])
Hs(gtrunchier[,-2])
Ho(gtrunchier[,-2])
}
\keyword{univar}
| /man/basic.stats.rd | no_license | jgx65/hierfstat | R | false | false | 4,024 | rd | \name{basic.stats}
\alias{basic.stats}
\alias{Hs}
\alias{Ho}
\alias{print.basic.stats}
\title{Basic diversity and differentiation statistics}
\description{Estimates individual counts, allelic frequencies, observed heterozygosities and genetic diversities per locus and population.
Also Estimates mean observed heterozygosities, mean gene diversities within population Hs, Gene diversities overall Ht and corrected Htp, and Dst, Dstp.
Finally, estimates Fst and Fstp as well as Fis following Nei (1987) per locus and overall loci}
\usage{basic.stats(data,diploid=TRUE,digits=4)
\method{print}{basic.stats}(x,...)
Hs(data,...)
Ho(data,...)
}
\arguments{
\item{data}{a data frame where the first column contains the population to which the different individuals belong, and the following columns contain the genotype of the individuals -one locus per column- }
\item{diploid}{Whether individuals are diploids (default) or haploids}
\item{digits}{how many digits to print out in the output (default is 4)}
\item{x}{an object of class basic.stats}
\item{...}{further arguments to pass to print.bas.stats}
}
\value{
\item{n.ind.samp}{A table --with np (number of populations) columns and nl (number of loci) rows-- of genotype counts}
\item{pop.freq}{A list containing allele frequencies. Each element of the list is one locus.
For each locus, Populations are in columns and alleles in rows}
\item{Ho}{A table --with np (number of populations) columns and nl (number of loci) rows-- of observed heterozygosities}
\item{Hs}{A table --with np (number of populations) columns and nl (number of loci) rows-- of observed gene diversities}
\item{Fis}{A table --with np (number of populations) columns and nl (number of loci) rows--of observed Fis}
\item{perloc}{A table --with as many rows as loci-- containing basic statistics Ho, Hs, Ht, Dst, Ht', Dst', Fst, Fst' ,Fis, Dest}
\item{overall}{Basic statistics averaged over loci}
}
\references{
Nei M. (1987) Molecular Evolutionary Genetics. Columbia University Press
Jost L (2008) GST and its relatives do not measure differentiation.
Molecular Ecology, 17, 4015-4026.
Nei M, Chesser R (1983) Estimation of fixation indexes and gene diversities.
Annals of Human Genetics, 47, 253-259.
}
\author{Jerome Goudet \email{jerome.goudet@unil.ch}}
\seealso{\code{\link{ind.count}},\code{\link{pop.freq}}.}
\note{
For the perloc and overall tables (see value section), the following statistics, defined in eq.7.38-- 7.43 pp.164--5 of Nei (1987)
are estimated:
The observed heterozygosity
\deqn{Ho= 1-\sum_k \sum_i Pkii/np,}
where \eqn{Pkii} represents the proportion of homozygote \eqn{i} in sample \eqn{k} and
\eqn{np} the number of samples.
The within population gene diversity (sometimes misleadingly called expected
heterozygosity):
\deqn{Hs=\tilde{n}/(\tilde{n}-1)[1-\sum_i\bar{p_i^2}-Ho/2\tilde{n}],
}
where \eqn{\tilde{n}=np/\sum_k 1/n_k} and
\eqn{\bar{p_i^2}=\sum_k p_{ki}^2/np}
The overall gene diversity
\deqn{
Ht= 1-\sum_i\bar{p_i}^2+Hs/(\tilde{n} np)-Ho/(2\tilde{n}
np),}
where \eqn{\bar{p_i}=\sum_kp_{ki}/np}.
The amount of gene diversity among samples \eqn{Dst=Ht-Hs}
\eqn{Dst'=np/(np-1)Dst}
\eqn{Ht'=Hs+Dst'}
\eqn{Fst=Dst/Ht}.(This is not the same as Nei's \eqn{Gst},
Nei's \eqn{Gst} is an estimator of \eqn{Fst} based on allele frequencies only)
\eqn{Fst'=Dst'/Ht'}
\eqn{Fis=1-Ho/Hs}
Last, \eqn{Dest=np/(np-1) (Ht'-Hs)/(1-Hs)} a measure of population
differentiation as defined by Jost (2008) is also given
Here, the \eqn{p_{ki}} are unweighted by sample size. These statistics are
estimated for each locus and an overall loci estimates is also given, as the
unweighted average of the per locus estimates. In this way, monomorphic loci
are accounted for (with estimated value of 0) in the overall estimates.
Note that the equations used here all rely on genotypic rather than allelic
number and are corrected for heterozygosity.
}
\examples{
data(gtrunchier)
basic.stats(gtrunchier[,-1])
Hs(gtrunchier[,-2])
Ho(gtrunchier[,-2])
}
\keyword{univar}
|
## Two functions "makeCacheMatrix" and "cacheSolve" below
## cache the inverse of a matrix
## "makeCacheMatrix" function creates a special matrix object
## that can cache its inverse (I)
makeCacheMatrix <- function(x = matrix()) {
## Initialize the inverse property
I <- NULL
## Set the matrix
set <- function( matrix ) {
x <<- matrix
I <<- NULL
}
## Get the matrix
get <- function() {
## Return the matrix
x
}
## Set the inverse of the matrix
setInverse <- function(inverse) {
I <<- inverse
}
## Get the inverse of the matrix
getInverse <- function() {
## Return the inverse property
I
}
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## "cacheSolve" function Compute the inverse of the special matrix
## that is the inverse of 'x'
cacheSolve <- function(x, ...) {
## If the inverse has already been calculated (the matrix has not changed),
## the "cachesolve" should retrieve the inverse from the cache.
I <- x$getInverse()
## Return the inverse if its already calculated
if(!is.null(I)) {
message("getting cached data")
return(I)
}
## Otherwise get the matrix from object
data <- x$get()
## Calculate the inverse using matrix multiplication
I <- solve(data, ...)
## Set the inverse to the object
x$setInverse(I)
## Return the matrix
I
}
| /cachematrix.R | no_license | zhouning188/ProgrammingAssignment2-1 | R | false | false | 1,401 | r | ## Two functions "makeCacheMatrix" and "cacheSolve" below
## cache the inverse of a matrix
## "makeCacheMatrix" function creates a special matrix object
## that can cache its inverse (I)
makeCacheMatrix <- function(x = matrix()) {
## Initialize the inverse property
I <- NULL
## Set the matrix
set <- function( matrix ) {
x <<- matrix
I <<- NULL
}
## Get the matrix
get <- function() {
## Return the matrix
x
}
## Set the inverse of the matrix
setInverse <- function(inverse) {
I <<- inverse
}
## Get the inverse of the matrix
getInverse <- function() {
## Return the inverse property
I
}
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## "cacheSolve" function Compute the inverse of the special matrix
## that is the inverse of 'x'
cacheSolve <- function(x, ...) {
## If the inverse has already been calculated (the matrix has not changed),
## the "cachesolve" should retrieve the inverse from the cache.
I <- x$getInverse()
## Return the inverse if its already calculated
if(!is.null(I)) {
message("getting cached data")
return(I)
}
## Otherwise get the matrix from object
data <- x$get()
## Calculate the inverse using matrix multiplication
I <- solve(data, ...)
## Set the inverse to the object
x$setInverse(I)
## Return the matrix
I
}
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Classifier/endometrium.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=1,family="gaussian",standardize=FALSE)
sink('./endometrium_100.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Classifier/endometrium/endometrium_100.R | no_license | esbgkannan/QSMART | R | false | false | 357 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Classifier/endometrium.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=1,family="gaussian",standardize=FALSE)
sink('./endometrium_100.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/splash.point.R
\name{splash.point}
\alias{splash.point}
\title{Simple process-led algorithms for simulating habitats (SPLASH v.2.0)}
\usage{
splash.point(
sw_in,
tc,
pn,
lat,
elev,
slop = 0,
asp = 0,
soil_data,
Au = 0,
resolution = 250
)
}
\arguments{
\item{sw_in}{Incoming shortwave solar radiation (W m-2), timeseries object of monthly or daily averages.}
\item{tc}{Air temperature (戼㸰C), same timestep as sw_in}
\item{pn}{Precipitation (mm), same timestep as sw_in}
\item{elev}{Elevation (m.a.s.l)}
\item{slop}{Terrain feature: slope inclination (戼㸰)}
\item{asp}{Terrain feature: slope orientation (戼㸰), standard clockwise from 0.0戼㸰 North}
\item{soil_data}{Soil data organized as a vector in the way: c(sand(perc),clay(perc),organic matter(perc),coarse-fragments-fraction(perc), bulk density(g cm-3))}
}
\value{
a time series matrix including:
\itemize{
\item \eqn{wn}: Soil water content within the first 2m of depth (mm).
\item \eqn{ro}: Runoff (mm d-1).
\item \eqn{pet}: Potential evapotranspiration (mm d-1).
\item \eqn{aet}: Actual evapotranspiration (mm d-1).
\item \eqn{snow}: Snow water equivalent (mm).
\item \eqn{cond}: Condensation (mm d-1).
\item \eqn{bflow}: Lateral flow (mm d-1).
\item \eqn{netr}: Daytime net radiation (MJ d-1).
}
}
\description{
R/C++ implementation of the SPLASH v.2.0 algorithm (Davis et al., 2017; Sandoval et al., in prep.).
}
\examples{
splash.point(sw_in=200, tc=15, pn=10, lat=44,elev=1800,slop=10,asp=270,soil_data=c(sand=44,clay=2,OM=6,fgravel=12))
}
\keyword{evapotranspiration,}
\keyword{moisture}
\keyword{soil}
\keyword{splash,}
| /man/splash.point.Rd | no_license | dsval/rsplash | R | false | true | 1,764 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/splash.point.R
\name{splash.point}
\alias{splash.point}
\title{Simple process-led algorithms for simulating habitats (SPLASH v.2.0)}
\usage{
splash.point(
sw_in,
tc,
pn,
lat,
elev,
slop = 0,
asp = 0,
soil_data,
Au = 0,
resolution = 250
)
}
\arguments{
\item{sw_in}{Incoming shortwave solar radiation (W m-2), timeseries object of monthly or daily averages.}
\item{tc}{Air temperature (戼㸰C), same timestep as sw_in}
\item{pn}{Precipitation (mm), same timestep as sw_in}
\item{elev}{Elevation (m.a.s.l)}
\item{slop}{Terrain feature: slope inclination (戼㸰)}
\item{asp}{Terrain feature: slope orientation (戼㸰), standard clockwise from 0.0戼㸰 North}
\item{soil_data}{Soil data organized as a vector in the way: c(sand(perc),clay(perc),organic matter(perc),coarse-fragments-fraction(perc), bulk density(g cm-3))}
}
\value{
a time series matrix including:
\itemize{
\item \eqn{wn}: Soil water content within the first 2m of depth (mm).
\item \eqn{ro}: Runoff (mm d-1).
\item \eqn{pet}: Potential evapotranspiration (mm d-1).
\item \eqn{aet}: Actual evapotranspiration (mm d-1).
\item \eqn{snow}: Snow water equivalent (mm).
\item \eqn{cond}: Condensation (mm d-1).
\item \eqn{bflow}: Lateral flow (mm d-1).
\item \eqn{netr}: Daytime net radiation (MJ d-1).
}
}
\description{
R/C++ implementation of the SPLASH v.2.0 algorithm (Davis et al., 2017; Sandoval et al., in prep.).
}
\examples{
splash.point(sw_in=200, tc=15, pn=10, lat=44,elev=1800,slop=10,asp=270,soil_data=c(sand=44,clay=2,OM=6,fgravel=12))
}
\keyword{evapotranspiration,}
\keyword{moisture}
\keyword{soil}
\keyword{splash,}
|
source("../../../R/h2oPerf/prologue.R")
runGLM(x = 1:11, y = 12, family = "binomial", nfolds = 0)
source("../../../R/h2oPerf/epilogue.R")
| /h2o-perf/bench/tests/multinode/glm_fv_one-billion-rows/model.R | permissive | ivanliu1989/h2o | R | false | false | 138 | r | source("../../../R/h2oPerf/prologue.R")
runGLM(x = 1:11, y = 12, family = "binomial", nfolds = 0)
source("../../../R/h2oPerf/epilogue.R")
|
set.seed(1000)
sample(1:10)
for (i in 1:5) {
print(paste("Ini perulangan ke", i))
}
#ekuivalen
print(paste("Ini perulangan ke", 1))
print(paste("Ini perulangan ke", 2))
print(paste("Ini perulangan ke", 3))
print(paste("Ini perulangan ke", 4))
print(paste("Ini perulangan ke", 5))
#slicing vektor
vektor1<- c(1,2,3,4)
vektor1[2]
vektor2 <- c(1:20)
vektor2[c(1,3)]
View(iris)
iris[1] #menambil kolom pertama dan tetap data frame
iris[,1] #mengambil kolom pertama tapi jadi vektor
str(iris[,1])
iris[1,2]
iris[1:4,2:3]
1+1
2/10
| /preview.R | no_license | armdhn23/praktikum-mss | R | false | false | 535 | r | set.seed(1000)
sample(1:10)
for (i in 1:5) {
print(paste("Ini perulangan ke", i))
}
#ekuivalen
print(paste("Ini perulangan ke", 1))
print(paste("Ini perulangan ke", 2))
print(paste("Ini perulangan ke", 3))
print(paste("Ini perulangan ke", 4))
print(paste("Ini perulangan ke", 5))
#slicing vektor
vektor1<- c(1,2,3,4)
vektor1[2]
vektor2 <- c(1:20)
vektor2[c(1,3)]
View(iris)
iris[1] #menambil kolom pertama dan tetap data frame
iris[,1] #mengambil kolom pertama tapi jadi vektor
str(iris[,1])
iris[1,2]
iris[1:4,2:3]
1+1
2/10
|
library(circlize)
### Name: convert_length
### Title: Convert units
### Aliases: convert_length
### ** Examples
fa = letters[1:10]
circos.par(cell.padding = c(0, 0, 0, 0), track.margin = c(0, 0))
circos.initialize(fa, xlim = cbind(rep(0, 10), runif(10, 0.5, 1.5)))
circos.track(ylim = c(0, 1), track.height = convert_length(5, "mm"))
circos.par(track.margin = c(0, convert_length(2, "mm")))
circos.track(ylim = c(0, 1), track.height = convert_length(1, "cm"))
circos.par(track.margin = c(0, convert_length(5, "mm")))
circos.track(ylim = c(0, 1), track.height = convert_length(1, "inches"))
circos.clear()
| /data/genthat_extracted_code/circlize/examples/convert_length.rd.R | no_license | surayaaramli/typeRrh | R | false | false | 612 | r | library(circlize)
### Name: convert_length
### Title: Convert units
### Aliases: convert_length
### ** Examples
fa = letters[1:10]
circos.par(cell.padding = c(0, 0, 0, 0), track.margin = c(0, 0))
circos.initialize(fa, xlim = cbind(rep(0, 10), runif(10, 0.5, 1.5)))
circos.track(ylim = c(0, 1), track.height = convert_length(5, "mm"))
circos.par(track.margin = c(0, convert_length(2, "mm")))
circos.track(ylim = c(0, 1), track.height = convert_length(1, "cm"))
circos.par(track.margin = c(0, convert_length(5, "mm")))
circos.track(ylim = c(0, 1), track.height = convert_length(1, "inches"))
circos.clear()
|
## Trends_CompareMethods.R
#
source(file.path("code", "paths+packages.R"))
## metrics we care about
metrics <- c("annualnoflowdays", "zeroflowfirst", "peak2z_length")
## load data
gage_regions <-
readr::read_csv(file.path("results", "00_SelectGagesForAnalysis_GageRegions.csv"))
gage_trends <-
readr::read_csv(file.path("results", "00_SelectGagesForAnalysis_GageSampleTrends.csv")) %>%
subset(metric %in% metrics) %>%
dplyr::left_join(gage_regions, by = "gage_ID") %>%
subset(complete.cases(.))
## summarize
p_thres <- 0.05
gage_trends %>%
dplyr::group_by(metric) %>%
dplyr::summarize(n_finite = sum(is.finite(sen_slope)),
n_sig = sum(mk_p < p_thres, na.rm = T),
n_sig_pos = sum(sen_slope > 0 & mk_p < p_thres, na.rm = T),
n_sig_neg = sum(sen_slope <= 0 & mk_p < p_thres, na.rm = T))
## focus on annualnoflowdays (most common metric)
sigtrends_noslope <-
gage_trends$gage_ID[gage_trends$mk_p < 0.05 &
gage_trends$metric == "annualnoflowdays" &
gage_trends$sen_slope == 0]
sigtrends_yesslope <-
gage_trends$gage_ID[gage_trends$mk_p < 0.05 &
gage_trends$metric == "annualnoflowdays" &
gage_trends$sen_slope != 0]
## now: load data and choose a few case studies for different types of trends
gage_sample_annual <-
readr::read_csv(file = file.path("results", "00_SelectGagesForAnalysis_GageSampleAnnual.csv"))
## explore gages for example
gage_sample_annual %>%
subset(gage_ID == sigtrends_yesslope[43]) %>%
ggplot(aes(x = currentclimyear, y = annualnoflowdays)) +
geom_point()
# sample to a few test gages showing different types of slopes
gages_test <- c(2313230, 6177500, 7362100, 2266205, 2291580, 7315200)
gage_example <-
gage_sample_annual %>%
subset(gage_ID %in% gages_test) %>%
mutate(year = currentclimyear,
gage = factor(gage_ID, levels = gages_test)) %>%
dplyr::select(gage, year, annualnoflowdays)
sen <- function(..., weights = NULL) {
mblm::mblm(...)
}
ggplot(gage_example, aes(x = year, y = annualnoflowdays)) +
geom_point() +
facet_wrap(~gage, scales = "free_y", ncol = 2) +
stat_smooth(method = "lm", color = "blue", se = F) +
stat_smooth(method = "glm", method.args = list(family = "poisson"), color = "green", se = F) +
stat_smooth(method = sen, color = "red", se = F) +
scale_y_continuous(name = "Annual No-Flow Days") +
scale_x_continuous(name = "Year") +
labs(title = "Comparison of Slope Methods for Some Example Gages",
subtitle = "Blue = Linear, Green = Poisson, Red = Theil-Sen") +
ggsave(file.path("figures_manuscript", "Trends_CompareMethods.png"),
width = 190, height = 220, units = "mm")
## count number of gages that have no-flow in at least 50% of years
df_test <-
gage_sample_annual %>%
group_by(gage_ID) %>%
summarize(n_noflow = sum(annualnoflowdays > 0, na.rm = T),
n_total = sum(is.finite(annualnoflowdays)),
prc_noflow = n_noflow/n_total)
sum(df_test$prc_noflow > 0.5)
| /figures_manuscript/Trends_CompareMethods.R | no_license | dry-rivers-rcn/IntermittencyTrends | R | false | false | 3,078 | r | ## Trends_CompareMethods.R
#
source(file.path("code", "paths+packages.R"))
## metrics we care about
metrics <- c("annualnoflowdays", "zeroflowfirst", "peak2z_length")
## load data
gage_regions <-
readr::read_csv(file.path("results", "00_SelectGagesForAnalysis_GageRegions.csv"))
gage_trends <-
readr::read_csv(file.path("results", "00_SelectGagesForAnalysis_GageSampleTrends.csv")) %>%
subset(metric %in% metrics) %>%
dplyr::left_join(gage_regions, by = "gage_ID") %>%
subset(complete.cases(.))
## summarize
p_thres <- 0.05
gage_trends %>%
dplyr::group_by(metric) %>%
dplyr::summarize(n_finite = sum(is.finite(sen_slope)),
n_sig = sum(mk_p < p_thres, na.rm = T),
n_sig_pos = sum(sen_slope > 0 & mk_p < p_thres, na.rm = T),
n_sig_neg = sum(sen_slope <= 0 & mk_p < p_thres, na.rm = T))
## focus on annualnoflowdays (most common metric)
sigtrends_noslope <-
gage_trends$gage_ID[gage_trends$mk_p < 0.05 &
gage_trends$metric == "annualnoflowdays" &
gage_trends$sen_slope == 0]
sigtrends_yesslope <-
gage_trends$gage_ID[gage_trends$mk_p < 0.05 &
gage_trends$metric == "annualnoflowdays" &
gage_trends$sen_slope != 0]
## now: load data and choose a few case studies for different types of trends
gage_sample_annual <-
readr::read_csv(file = file.path("results", "00_SelectGagesForAnalysis_GageSampleAnnual.csv"))
## explore gages for example
gage_sample_annual %>%
subset(gage_ID == sigtrends_yesslope[43]) %>%
ggplot(aes(x = currentclimyear, y = annualnoflowdays)) +
geom_point()
# sample to a few test gages showing different types of slopes
gages_test <- c(2313230, 6177500, 7362100, 2266205, 2291580, 7315200)
gage_example <-
gage_sample_annual %>%
subset(gage_ID %in% gages_test) %>%
mutate(year = currentclimyear,
gage = factor(gage_ID, levels = gages_test)) %>%
dplyr::select(gage, year, annualnoflowdays)
sen <- function(..., weights = NULL) {
mblm::mblm(...)
}
ggplot(gage_example, aes(x = year, y = annualnoflowdays)) +
geom_point() +
facet_wrap(~gage, scales = "free_y", ncol = 2) +
stat_smooth(method = "lm", color = "blue", se = F) +
stat_smooth(method = "glm", method.args = list(family = "poisson"), color = "green", se = F) +
stat_smooth(method = sen, color = "red", se = F) +
scale_y_continuous(name = "Annual No-Flow Days") +
scale_x_continuous(name = "Year") +
labs(title = "Comparison of Slope Methods for Some Example Gages",
subtitle = "Blue = Linear, Green = Poisson, Red = Theil-Sen") +
ggsave(file.path("figures_manuscript", "Trends_CompareMethods.png"),
width = 190, height = 220, units = "mm")
## count number of gages that have no-flow in at least 50% of years
df_test <-
gage_sample_annual %>%
group_by(gage_ID) %>%
summarize(n_noflow = sum(annualnoflowdays > 0, na.rm = T),
n_total = sum(is.finite(annualnoflowdays)),
prc_noflow = n_noflow/n_total)
sum(df_test$prc_noflow > 0.5)
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
%
%
% on Wed Feb 08 14:37:46 2006.
%
% Generator was the Rdoc class, which is part of the R.oo package written
% by Henrik Bengtsson, 2001-2004.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{progeny.Niche}
\alias{progeny.Niche}
\alias{Niche.progeny}
\alias{progeny.Niche}
\alias{progeny,Niche-method}
\keyword{methods}
\keyword{internal}
\title{Performs offspring, crossover, mutation, and elitism mechanism to generate the ``evolved'' niche}
\description{
Performs offspring, crossover, mutation, and elitism mechanism to generate the ``evolved'' niche.
}
\usage{\method{progeny}{Niche}(ni, immigration=NULL, ...)}
\arguments{
\item{immigration}{Chromosomes wanted to immigrate (replacing) in the niche.}
}
\details{
The basic idea to generate a progeny is a random selection biased toward
the best chromosomes (see Goldberg). We implented this idea as a weighted
probability for a chromosome to be selected using the formula:
p = scale * max(0,fitness - mean * mean(fitness))\^\ power
where scale, mean and power are the properties of the niche
(\code{offspringScaleFactor, offspringMeanFactor and offspringPowerFactor}
respectively). The default values were selected to be reasonably bias
when the variance in the fitness are both high (at early generations) and low
(in late generatios).
\code{offspring} is part of \code{progeny} method.
For related details For more information see \code{\link{Niche}}.
}
\value{
Returns nothing.
}
\examples{
cr <- Chromosome(genes=newCollection(Gene(shape1=1, shape2=100),5))
cr
ni <- Niche(chromosomes = newRandomCollection(cr, 10))
ni
ni$fitness <- 1:10/10 # tricky fitness, only for showing purposes
progeny(ni)
ni
}
\references{Goldberg, David E. 1989 \emph{Genetic Algorithms in Search, Optimization and Machine Learning}. Addison-Wesley Pub. Co. ISBN: 0201157675}
\author{Victor Trevino. Francesco Falciani Group. University of Birmingham, U.K. http://www.bip.bham.ac.uk/bioinf}
\seealso{
For more information see \code{\link{Niche}}.
\code{\link[galgo:offspring.Niche]{*offspring}()},
\code{\link[galgo:crossover.Niche]{*crossover}()}.
}
\keyword{methods}
| /man/progeny.Niche.Rd | no_license | cran/galgo | R | false | false | 2,463 | rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
%
%
% on Wed Feb 08 14:37:46 2006.
%
% Generator was the Rdoc class, which is part of the R.oo package written
% by Henrik Bengtsson, 2001-2004.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{progeny.Niche}
\alias{progeny.Niche}
\alias{Niche.progeny}
\alias{progeny.Niche}
\alias{progeny,Niche-method}
\keyword{methods}
\keyword{internal}
\title{Performs offspring, crossover, mutation, and elitism mechanism to generate the ``evolved'' niche}
\description{
Performs offspring, crossover, mutation, and elitism mechanism to generate the ``evolved'' niche.
}
\usage{\method{progeny}{Niche}(ni, immigration=NULL, ...)}
\arguments{
\item{immigration}{Chromosomes wanted to immigrate (replacing) in the niche.}
}
\details{
The basic idea to generate a progeny is a random selection biased toward
the best chromosomes (see Goldberg). We implented this idea as a weighted
probability for a chromosome to be selected using the formula:
p = scale * max(0,fitness - mean * mean(fitness))\^\ power
where scale, mean and power are the properties of the niche
(\code{offspringScaleFactor, offspringMeanFactor and offspringPowerFactor}
respectively). The default values were selected to be reasonably bias
when the variance in the fitness are both high (at early generations) and low
(in late generatios).
\code{offspring} is part of \code{progeny} method.
For related details For more information see \code{\link{Niche}}.
}
\value{
Returns nothing.
}
\examples{
cr <- Chromosome(genes=newCollection(Gene(shape1=1, shape2=100),5))
cr
ni <- Niche(chromosomes = newRandomCollection(cr, 10))
ni
ni$fitness <- 1:10/10 # tricky fitness, only for showing purposes
progeny(ni)
ni
}
\references{Goldberg, David E. 1989 \emph{Genetic Algorithms in Search, Optimization and Machine Learning}. Addison-Wesley Pub. Co. ISBN: 0201157675}
\author{Victor Trevino. Francesco Falciani Group. University of Birmingham, U.K. http://www.bip.bham.ac.uk/bioinf}
\seealso{
For more information see \code{\link{Niche}}.
\code{\link[galgo:offspring.Niche]{*offspring}()},
\code{\link[galgo:crossover.Niche]{*crossover}()}.
}
\keyword{methods}
|
#
# "`-''-/").___..--''"`-._
# (`6_ 6 ) `-. ( ).`-.__.`) WE ARE ...
# (_Y_.)' ._ ) `._ `. ``-..-' PENN STATE!
# _ ..`--'_..-_/ /--'_.' ,'
# (il),-'' (li),' ((!.-'
#
#
#Author: Guido Cervone (cervone@polygonsu.edu) and Yanni Cao (yvc5268@polygonsu.edu)
# Geoinformatics and Earth Observation Laboratory (http://geolab.polygonsu.edu)
# Department of Geography and Institute for CyberScience
# The Pennsylvania State University
#
#
require(raster)
require(foreach)
require(doParallel)
addZero <- function( x )
{
x = as.numeric(x)
if ( x < 10 )
paste(0,x,sep="")
else
x
}
modis.download = function( product, month, year, h, v, ver="005", username="", pwd="", tmp.dir="./") {
base.url = "http://e4ftl01.cr.usgs.gov/"
#base.url.2 = paste("http://",username,":",pwd,"@","e4ftl01.cr.usgs.gov/",sep="")
# Download list of available days
#
url = paste(base.url,"MOLT/",product,".",ver,sep="")
list.fname = paste(tmp.dir, "dates.html",sep="")
day.fname = paste(tmp.dir, "day.html",sep="")
# Download the file
#
download.file(url,list.fname)
# read it
#
html=scan(list.fname,"character")
# find out if there are any days which match our search
#
#pattern = paste(month.abb[as.numeric(month)],year,sep="-")
pattern = paste(year,addZero(month),sep=".")
valid = html[ grep(pattern, html)]
info = regexpr("[[:digit:]]{4}\\.[[:digit:]]{2}\\.[[:digit:]]{2}",valid)
start = as.vector(info)
end = start + attr(info,"match.length") - 1
files = NULL
for ( d in 1:length(valid) ) {
date = substring(valid[d], start[d],end[d])
print(paste("Date = ",date) )
# New URL
#
url.month = paste(url,"/",date,sep="")
# Download the file
#
download.file(url.month,day.fname,quiet=TRUE)
# read it
#
html=scan(day.fname,"character")
cl <- makeCluster(8)
registerDoParallel(cl)
foreach( lon=h, .export="addZero") %dopar% {
for ( lat in v ) {
pattern = paste(".(h",addZero(lon),"v",addZero(lat),").*(hdf)<",sep="")
scene = html[grep(pattern,html)]
# Just get what's in the > and < (the file name)
#
info = regexpr(">(.*)<",scene)
fname = substring(scene, info[1]+1, info[1]+attr(info,"match.length")-2)
# MAke sure that this file exists in the data
#
if ( !identical(fname, character(0))) {
# And now.... download this file
#
url.scene = paste(url.month,"/",fname,sep="")
ofname = paste(tmp.dir,fname,sep="")
#files = rbind(ret, c(date, ofname))
# Download only if it does not exist
#
if (!file.exists(ofname)) {
extra = paste("--http-user=",username," --http-password=",pwd,sep="")
print(paste("Downloading",url.scene,extra))
download.file(url.scene,ofname,method="wget",quiet=TRUE,extra=extra)
}
}
}
}
stopCluster(cl)
} # End looping through the dates
files = dir(path=tmp.dir, pattern=paste("^",product,".A",year,".*(hdf)$",sep=""))
# Remove those files that do not exist and those that are small
#
v1 = as.vector(sapply(files, file.exists))
sizes = as.vector(unlist(sapply(files, file.info)[1,]))
v2 = sizes > 100
files = files[v1&v2]
return(files)
}
modis.mosaic = function( files,
MRTpath="~/local/MRT/bin",
pixel_size=.00250,
bands_subset=bands_subset,
proj=T,
delete=delete,
proj_type="GEO") {
# Get the dates and
#
id = regexpr("A[0-9]{6}",files)
temp = substring(files,id+1,id+attr(id,"match.length"))
jd = as.numeric(substring(temp,5,7))
# Convert from Julian
#
dates = as.Date(jd-1,origin=paste(year,"-01-01",sep=""))
dates = as.vector(gsub("-",".",dates))
dHDF = data.frame(Date=dates,Name=files)
ModisMosaic(dHDF,mosaic=T,
MRTpath,pixel_size=pixel_size, proj=proj, proj_type=proj_type,bands_subset=bands_subset,delete=delete)
}
# pattern=".Lai_1km\\.tif$", filename,
modis.calibrate.merge = function(files, offset, gain, valid ) {
# Now merge the mosaics for each day. This takes a bit of time
#
merged = list()
for ( i in 1:length(files) ) {
print(paste("Loading",files[i]))
temp = raster(files[i])
values = values(temp)
values[ values<valid[1] | values>valid[2] ] = NA
values = values*gain + offset
values(temp) = values
merged[[i]] = temp
}
merged.stack = stack(merged)
res = calc(merged.stack, mean, na.rm=T)
return(res)
}
# The following two functions have been adopted from ModisDownload.R
# Version: 3.3, 6th Oct. 2014
# Author: Babak Naimi (naimi.b@gmail.com)
#
ModisMosaic = function(dHDF,MRTpath,mosaic=FALSE,bands_subset,delete=FALSE,proj=FALSE,UL="",LR="",resample_type='NEAREST_NEIGHBOR',proj_type='UTM', proj_params='0 0 0 0 0 0 0 0 0 0 0 0',datum='WGS84',utm_zone=NA,pixel_size) {
#dHDF <- .getMODIS(x,h,v,dates,version)
dHDF$Date <- as.character(dHDF$Date)
dHDF$Name <- as.character(dHDF$Name)
if (nrow(dHDF) < 2) mosaic <- FALSE
if (mosaic) {
du <- unique(dHDF$Date)
for (d in du) {
dw <- dHDF[which(dHDF$Date == d),]
if (nrow(dw) > 1){
date_name <- sub(sub(pattern="\\.", replacement="-", d), pattern="\\.", replacement="-", d)
name <- paste("Mosaic_",date_name,".hdf",sep='')
Mosaic.success <- mosaicHDF(dw$Name,name,MRTpath=MRTpath,bands_subset=bands_subset,delete=delete)
if (Mosaic.success) {
if (delete) for (ModisName in dw[,2]) unlink(paste(getwd(), '/', ModisName, sep=""))
if (proj) {
pref <- strsplit(dw[1,2],'\\.')[[1]][1]
e <- reprojectHDF(name,filename=paste(pref,'_',date_name,'.tif',sep=''),MRTpath=MRTpath,UL=UL,LR=LR,proj_type=proj_type,proj_params=proj_params,utm_zone=utm_zone,pixel_size=pixel_size)
if (e & delete) unlink(paste(name))
if (!e) warning (paste("The procedure has failed to REPROJECT the mosaic image for date ",d,"!",sep=""))
}
} else {
warning(paste("The procedure has failed to MOSAIC the images for date ",d,"!",sep=""))
if (proj) {
warning ("Since the mosaic is failed, the individual hdf images are reprojected...")
pref <- strsplit(dw[1,2],'\\.')[[1]]
pref <- paste(pref[1],"_",pref[3],sep="")
for (ModisName in dw[,2]) {
e <- reprojectHDF(ModisName,filename=paste(pref,'_',date_name,'.tif',sep=''),MRTpath=MRTpath,UL=UL,LR=LR,bands_subset=bands_subset,proj_type=proj_type,proj_params=proj_params,utm_zone=utm_zone,pixel_size=pixel_size)
if (e & delete) unlink(paste(ModisName))
if (!e) warning (paste("The procedure has failed to REPROJECT the individual HDF image ",ModisName,"!",sep=""))
}
}
}
}
}
} else {
if (proj) {
for (i in 1:nrow(dHDF)) {
ModisName <- dHDF[i,2]
pref <- strsplit(ModisName,'\\.')[[1]]
pref <- paste(pref[1],"_",pref[3],sep="")
d <- dHDF[i,1]
date_name <- sub(sub(pattern="\\.", replacement="-", d), pattern="\\.", replacement="-", d)
e <- reprojectHDF(ModisName,filename=paste(pref,'_',date_name,'.tif',sep=''),MRTpath=MRTpath,UL=UL,LR=LR,bands_subset=bands_subset,proj_type=proj_type,proj_params=proj_params,utm_zone=utm_zone,pixel_size=pixel_size)
if (e & delete) unlink(paste(ModisName))
if (!e) warning (paste("The procedure has failed to REPROJECT the individual HDF image ",ModisName,"!",sep=""))
}
}
}
}
mosaicHDF = function(hdfNames,filename,MRTpath,bands_subset,delete=FALSE) {
if (missing(MRTpath)) stop("MRTpath argument should be specified...")
if (length(hdfNames) < 2) stop("mosaic cannot be called for ONE image!")
if (missing(bands_subset)) bands_subset <- ''
if (missing(delete)) delete <- FALSE
mosaicname = file(paste(MRTpath, TmpMosaic, sep=""), open="wt")
write(paste(getwd(),"/",hdfNames[1], sep=""), mosaicname)
for (j in 2:length(hdfNames)) write(paste(getwd(),"/",hdfNames[j], sep=""),mosaicname,append=T)
close(mosaicname)
# generate mosaic:
if (bands_subset != '') {
e <- system(paste(MRTpath, '/mrtmosaic -i ', MRTpath, paste0(TmpMosaic,' -s "'),bands_subset,'" -o ',getwd(), '/',filename, sep=""))
if (e != 0) warning ("Mosaic failed! 'bands_subset' may has incorrect structure!")
} else {
e <- system(paste(MRTpath, '/mrtmosaic -i ', MRTpath, paste0(TmpMosaic,' -o '),getwd(), '/',filename, sep=""))
if (e != 0) warning ("Mosaic failed!")
}
if (delete & e == 0) for (ModisName in hdfNames) unlink(paste(getwd(), '/', ModisName, sep=""))
if (e == 0) return (TRUE)
else return (FALSE)
}
reprojectHDF = function(hdfName,filename,MRTpath,UL="",LR="",resample_type='NEAREST_NEIGHBOR',proj_type='UTM',
bands_subset='',proj_params='0 0 0 0 0 0 0 0 0 0 0 0',datum='WGS84',utm_zone=NA,pixel_size=1000) {
fname = file('tmp.prm', open="wt")
write(paste('INPUT_FILENAME = ', getwd(), '/',hdfName, sep=""), fname)
if (bands_subset != '') {
write(paste('SPECTRAL_SUBSET = ( ',bands_subset,' )',sep=''),fname,append=TRUE)
}
if (UL[1] != '' & LR[1] != '') {
write('SPATIAL_SUBSET_TYPE = OUTPUT_PROJ_COORDS', fname, append=TRUE)
write(paste('SPATIAL_SUBSET_UL_CORNER = ( ', as.character(UL[1]),' ',as.character(UL[2]),' )',sep=''), fname, append=TRUE)
write(paste('SPATIAL_SUBSET_LR_CORNER = ( ', as.character(LR[1]),' ',as.character(LR[2]),' )',sep=''), fname, append=TRUE)
}
write(paste('OUTPUT_FILENAME = ', filename, sep=""), fname, append=TRUE)
write(paste('RESAMPLING_TYPE = ',resample_type,sep=''), fname, append=TRUE)
write(paste('OUTPUT_PROJECTION_TYPE = ',proj_type,sep=''), fname, append=TRUE)
write(paste('OUTPUT_PROJECTION_PARAMETERS = ( ',proj_params,' )',sep=''), fname, append=TRUE)
write(paste('DATUM = ',datum,sep=''), fname, append=TRUE)
if (proj_type == 'UTM') write(paste('UTM_ZONE = ',utm_zone,sep=''), fname, append=TRUE)
write(paste('OUTPUT_PIXEL_SIZE = ',as.character(pixel_size),sep=''), fname, append=TRUE)
close(fname)
e <- system(paste(MRTpath, '/resample -p ',getwd(),'/','tmp.prm', sep=''))
if (e == 0) return (TRUE)
else return(FALSE)
}
# Display a list of products that can be processed
#
modisProducts <- function() {
load('ModisLP.RData')
return(.ModisLPxxx)
rm(.ModisLPxxx)
}
#######################
modis.rescale = function(temp, offset, gain, valid ) {
values = values(temp)
values[ values<valid[1] | values>valid[2] ] = NA
values = values*gain + offset
values(temp) = values
return(temp)
}
########################
# Bulk rescale +save function
modis.bulk.rescale = function(files, offset, gain, valid ) {
merged = list()
for ( i in 1:length(files) ) {
print(paste("Loading",files[i]))
temp = raster(files[i])
values = values(temp)
values[ values<valid[1] | values>valid[2] ] = NA
values = values*gain + offset
values(temp) = values
merged[[i]] = temp
writeRaster(merged[[i]], file=files[[i]],format="GTiff", overwrite=T)
}
}
########################
| /Africa/old-v005/RModis_Function_Revised.R | no_license | iSDAgri/RMODIS | R | false | false | 11,921 | r |
#
# "`-''-/").___..--''"`-._
# (`6_ 6 ) `-. ( ).`-.__.`) WE ARE ...
# (_Y_.)' ._ ) `._ `. ``-..-' PENN STATE!
# _ ..`--'_..-_/ /--'_.' ,'
# (il),-'' (li),' ((!.-'
#
#
#Author: Guido Cervone (cervone@polygonsu.edu) and Yanni Cao (yvc5268@polygonsu.edu)
# Geoinformatics and Earth Observation Laboratory (http://geolab.polygonsu.edu)
# Department of Geography and Institute for CyberScience
# The Pennsylvania State University
#
#
require(raster)
require(foreach)
require(doParallel)
addZero <- function( x )
{
x = as.numeric(x)
if ( x < 10 )
paste(0,x,sep="")
else
x
}
modis.download = function( product, month, year, h, v, ver="005", username="", pwd="", tmp.dir="./") {
base.url = "http://e4ftl01.cr.usgs.gov/"
#base.url.2 = paste("http://",username,":",pwd,"@","e4ftl01.cr.usgs.gov/",sep="")
# Download list of available days
#
url = paste(base.url,"MOLT/",product,".",ver,sep="")
list.fname = paste(tmp.dir, "dates.html",sep="")
day.fname = paste(tmp.dir, "day.html",sep="")
# Download the file
#
download.file(url,list.fname)
# read it
#
html=scan(list.fname,"character")
# find out if there are any days which match our search
#
#pattern = paste(month.abb[as.numeric(month)],year,sep="-")
pattern = paste(year,addZero(month),sep=".")
valid = html[ grep(pattern, html)]
info = regexpr("[[:digit:]]{4}\\.[[:digit:]]{2}\\.[[:digit:]]{2}",valid)
start = as.vector(info)
end = start + attr(info,"match.length") - 1
files = NULL
for ( d in 1:length(valid) ) {
date = substring(valid[d], start[d],end[d])
print(paste("Date = ",date) )
# New URL
#
url.month = paste(url,"/",date,sep="")
# Download the file
#
download.file(url.month,day.fname,quiet=TRUE)
# read it
#
html=scan(day.fname,"character")
cl <- makeCluster(8)
registerDoParallel(cl)
foreach( lon=h, .export="addZero") %dopar% {
for ( lat in v ) {
pattern = paste(".(h",addZero(lon),"v",addZero(lat),").*(hdf)<",sep="")
scene = html[grep(pattern,html)]
# Just get what's in the > and < (the file name)
#
info = regexpr(">(.*)<",scene)
fname = substring(scene, info[1]+1, info[1]+attr(info,"match.length")-2)
# MAke sure that this file exists in the data
#
if ( !identical(fname, character(0))) {
# And now.... download this file
#
url.scene = paste(url.month,"/",fname,sep="")
ofname = paste(tmp.dir,fname,sep="")
#files = rbind(ret, c(date, ofname))
# Download only if it does not exist
#
if (!file.exists(ofname)) {
extra = paste("--http-user=",username," --http-password=",pwd,sep="")
print(paste("Downloading",url.scene,extra))
download.file(url.scene,ofname,method="wget",quiet=TRUE,extra=extra)
}
}
}
}
stopCluster(cl)
} # End looping through the dates
files = dir(path=tmp.dir, pattern=paste("^",product,".A",year,".*(hdf)$",sep=""))
# Remove those files that do not exist and those that are small
#
v1 = as.vector(sapply(files, file.exists))
sizes = as.vector(unlist(sapply(files, file.info)[1,]))
v2 = sizes > 100
files = files[v1&v2]
return(files)
}
modis.mosaic = function( files,
MRTpath="~/local/MRT/bin",
pixel_size=.00250,
bands_subset=bands_subset,
proj=T,
delete=delete,
proj_type="GEO") {
# Get the dates and
#
id = regexpr("A[0-9]{6}",files)
temp = substring(files,id+1,id+attr(id,"match.length"))
jd = as.numeric(substring(temp,5,7))
# Convert from Julian
#
dates = as.Date(jd-1,origin=paste(year,"-01-01",sep=""))
dates = as.vector(gsub("-",".",dates))
dHDF = data.frame(Date=dates,Name=files)
ModisMosaic(dHDF,mosaic=T,
MRTpath,pixel_size=pixel_size, proj=proj, proj_type=proj_type,bands_subset=bands_subset,delete=delete)
}
# pattern=".Lai_1km\\.tif$", filename,
modis.calibrate.merge = function(files, offset, gain, valid ) {
# Now merge the mosaics for each day. This takes a bit of time
#
merged = list()
for ( i in 1:length(files) ) {
print(paste("Loading",files[i]))
temp = raster(files[i])
values = values(temp)
values[ values<valid[1] | values>valid[2] ] = NA
values = values*gain + offset
values(temp) = values
merged[[i]] = temp
}
merged.stack = stack(merged)
res = calc(merged.stack, mean, na.rm=T)
return(res)
}
# The following two functions have been adopted from ModisDownload.R
# Version: 3.3, 6th Oct. 2014
# Author: Babak Naimi (naimi.b@gmail.com)
#
ModisMosaic = function(dHDF,MRTpath,mosaic=FALSE,bands_subset,delete=FALSE,proj=FALSE,UL="",LR="",resample_type='NEAREST_NEIGHBOR',proj_type='UTM', proj_params='0 0 0 0 0 0 0 0 0 0 0 0',datum='WGS84',utm_zone=NA,pixel_size) {
#dHDF <- .getMODIS(x,h,v,dates,version)
dHDF$Date <- as.character(dHDF$Date)
dHDF$Name <- as.character(dHDF$Name)
if (nrow(dHDF) < 2) mosaic <- FALSE
if (mosaic) {
du <- unique(dHDF$Date)
for (d in du) {
dw <- dHDF[which(dHDF$Date == d),]
if (nrow(dw) > 1){
date_name <- sub(sub(pattern="\\.", replacement="-", d), pattern="\\.", replacement="-", d)
name <- paste("Mosaic_",date_name,".hdf",sep='')
Mosaic.success <- mosaicHDF(dw$Name,name,MRTpath=MRTpath,bands_subset=bands_subset,delete=delete)
if (Mosaic.success) {
if (delete) for (ModisName in dw[,2]) unlink(paste(getwd(), '/', ModisName, sep=""))
if (proj) {
pref <- strsplit(dw[1,2],'\\.')[[1]][1]
e <- reprojectHDF(name,filename=paste(pref,'_',date_name,'.tif',sep=''),MRTpath=MRTpath,UL=UL,LR=LR,proj_type=proj_type,proj_params=proj_params,utm_zone=utm_zone,pixel_size=pixel_size)
if (e & delete) unlink(paste(name))
if (!e) warning (paste("The procedure has failed to REPROJECT the mosaic image for date ",d,"!",sep=""))
}
} else {
warning(paste("The procedure has failed to MOSAIC the images for date ",d,"!",sep=""))
if (proj) {
warning ("Since the mosaic is failed, the individual hdf images are reprojected...")
pref <- strsplit(dw[1,2],'\\.')[[1]]
pref <- paste(pref[1],"_",pref[3],sep="")
for (ModisName in dw[,2]) {
e <- reprojectHDF(ModisName,filename=paste(pref,'_',date_name,'.tif',sep=''),MRTpath=MRTpath,UL=UL,LR=LR,bands_subset=bands_subset,proj_type=proj_type,proj_params=proj_params,utm_zone=utm_zone,pixel_size=pixel_size)
if (e & delete) unlink(paste(ModisName))
if (!e) warning (paste("The procedure has failed to REPROJECT the individual HDF image ",ModisName,"!",sep=""))
}
}
}
}
}
} else {
if (proj) {
for (i in 1:nrow(dHDF)) {
ModisName <- dHDF[i,2]
pref <- strsplit(ModisName,'\\.')[[1]]
pref <- paste(pref[1],"_",pref[3],sep="")
d <- dHDF[i,1]
date_name <- sub(sub(pattern="\\.", replacement="-", d), pattern="\\.", replacement="-", d)
e <- reprojectHDF(ModisName,filename=paste(pref,'_',date_name,'.tif',sep=''),MRTpath=MRTpath,UL=UL,LR=LR,bands_subset=bands_subset,proj_type=proj_type,proj_params=proj_params,utm_zone=utm_zone,pixel_size=pixel_size)
if (e & delete) unlink(paste(ModisName))
if (!e) warning (paste("The procedure has failed to REPROJECT the individual HDF image ",ModisName,"!",sep=""))
}
}
}
}
mosaicHDF = function(hdfNames,filename,MRTpath,bands_subset,delete=FALSE) {
if (missing(MRTpath)) stop("MRTpath argument should be specified...")
if (length(hdfNames) < 2) stop("mosaic cannot be called for ONE image!")
if (missing(bands_subset)) bands_subset <- ''
if (missing(delete)) delete <- FALSE
mosaicname = file(paste(MRTpath, TmpMosaic, sep=""), open="wt")
write(paste(getwd(),"/",hdfNames[1], sep=""), mosaicname)
for (j in 2:length(hdfNames)) write(paste(getwd(),"/",hdfNames[j], sep=""),mosaicname,append=T)
close(mosaicname)
# generate mosaic:
if (bands_subset != '') {
e <- system(paste(MRTpath, '/mrtmosaic -i ', MRTpath, paste0(TmpMosaic,' -s "'),bands_subset,'" -o ',getwd(), '/',filename, sep=""))
if (e != 0) warning ("Mosaic failed! 'bands_subset' may has incorrect structure!")
} else {
e <- system(paste(MRTpath, '/mrtmosaic -i ', MRTpath, paste0(TmpMosaic,' -o '),getwd(), '/',filename, sep=""))
if (e != 0) warning ("Mosaic failed!")
}
if (delete & e == 0) for (ModisName in hdfNames) unlink(paste(getwd(), '/', ModisName, sep=""))
if (e == 0) return (TRUE)
else return (FALSE)
}
reprojectHDF = function(hdfName,filename,MRTpath,UL="",LR="",resample_type='NEAREST_NEIGHBOR',proj_type='UTM',
bands_subset='',proj_params='0 0 0 0 0 0 0 0 0 0 0 0',datum='WGS84',utm_zone=NA,pixel_size=1000) {
fname = file('tmp.prm', open="wt")
write(paste('INPUT_FILENAME = ', getwd(), '/',hdfName, sep=""), fname)
if (bands_subset != '') {
write(paste('SPECTRAL_SUBSET = ( ',bands_subset,' )',sep=''),fname,append=TRUE)
}
if (UL[1] != '' & LR[1] != '') {
write('SPATIAL_SUBSET_TYPE = OUTPUT_PROJ_COORDS', fname, append=TRUE)
write(paste('SPATIAL_SUBSET_UL_CORNER = ( ', as.character(UL[1]),' ',as.character(UL[2]),' )',sep=''), fname, append=TRUE)
write(paste('SPATIAL_SUBSET_LR_CORNER = ( ', as.character(LR[1]),' ',as.character(LR[2]),' )',sep=''), fname, append=TRUE)
}
write(paste('OUTPUT_FILENAME = ', filename, sep=""), fname, append=TRUE)
write(paste('RESAMPLING_TYPE = ',resample_type,sep=''), fname, append=TRUE)
write(paste('OUTPUT_PROJECTION_TYPE = ',proj_type,sep=''), fname, append=TRUE)
write(paste('OUTPUT_PROJECTION_PARAMETERS = ( ',proj_params,' )',sep=''), fname, append=TRUE)
write(paste('DATUM = ',datum,sep=''), fname, append=TRUE)
if (proj_type == 'UTM') write(paste('UTM_ZONE = ',utm_zone,sep=''), fname, append=TRUE)
write(paste('OUTPUT_PIXEL_SIZE = ',as.character(pixel_size),sep=''), fname, append=TRUE)
close(fname)
e <- system(paste(MRTpath, '/resample -p ',getwd(),'/','tmp.prm', sep=''))
if (e == 0) return (TRUE)
else return(FALSE)
}
# Display a list of products that can be processed
#
modisProducts <- function() {
load('ModisLP.RData')
return(.ModisLPxxx)
rm(.ModisLPxxx)
}
#######################
modis.rescale = function(temp, offset, gain, valid ) {
values = values(temp)
values[ values<valid[1] | values>valid[2] ] = NA
values = values*gain + offset
values(temp) = values
return(temp)
}
########################
# Bulk rescale +save function
modis.bulk.rescale = function(files, offset, gain, valid ) {
merged = list()
for ( i in 1:length(files) ) {
print(paste("Loading",files[i]))
temp = raster(files[i])
values = values(temp)
values[ values<valid[1] | values>valid[2] ] = NA
values = values*gain + offset
values(temp) = values
merged[[i]] = temp
writeRaster(merged[[i]], file=files[[i]],format="GTiff", overwrite=T)
}
}
########################
|
for (n in 1:25) {
if (n>=18) {
cat(n," is 18 or greater\n")
}
if (n==15) {
cat(n," is equal to 15\n")
}
}
| /equalcompare.R | no_license | cnguyen351/chem160module13 | R | false | false | 131 | r | for (n in 1:25) {
if (n>=18) {
cat(n," is 18 or greater\n")
}
if (n==15) {
cat(n," is equal to 15\n")
}
}
|
# Exercise 2: a basic Shiny app
# Load the `shiny` package
library("shiny")
# Define a new `ui` variable. This variable should be assigned a `fluidPage()` layout
# The `fluidPage()` layout should be passed the following:
ui <- fluidPage(
# A `titlePanel()` layout with the text "Cost Calculator"
titlePanel("Cost Calculator"),
# A `numericInput()` widget with the label "Price (in dollars)"
# It should have a default value of 0 and a minimum value of 0
# Hint: look up the function's arguments in the documentation!
numericInput("price", label = "Price (in dollars)", value = 0, min = 0),
# A second `numericInput()` widget with the label "Quantity"
# It should have a default value of 1 and a minimum value of 1
numericInput("quantity", label = "Quantity", value = 1, min = 1),
# The word "Cost", strongly bolded
strong("Cost"),
# A `textOutput()` output of a calculated value labeled `cost`
textOutput("cost")
)
# Define a `server` function (with appropriate arguments)
# This function should perform the following:
server <- function(input, result) {
# Assign a reactive `renderText()` function to the output's `cost` value
# The reactive expression should return the input `price` times the `quantity`
# So it looks nice, paste a "$" in front of it!
result$cost <- renderText({
return(paste0("$", input$price * input$quantity))
})
}
# Create a new `shinyApp()` using the above ui and server
shinyApp(ui = ui, server = server) | /chapter-19-exercises/exercise-2/app.R | permissive | helenlaire/book-exercises | R | false | false | 1,476 | r | # Exercise 2: a basic Shiny app
# Load the `shiny` package
library("shiny")
# Define a new `ui` variable. This variable should be assigned a `fluidPage()` layout
# The `fluidPage()` layout should be passed the following:
ui <- fluidPage(
# A `titlePanel()` layout with the text "Cost Calculator"
titlePanel("Cost Calculator"),
# A `numericInput()` widget with the label "Price (in dollars)"
# It should have a default value of 0 and a minimum value of 0
# Hint: look up the function's arguments in the documentation!
numericInput("price", label = "Price (in dollars)", value = 0, min = 0),
# A second `numericInput()` widget with the label "Quantity"
# It should have a default value of 1 and a minimum value of 1
numericInput("quantity", label = "Quantity", value = 1, min = 1),
# The word "Cost", strongly bolded
strong("Cost"),
# A `textOutput()` output of a calculated value labeled `cost`
textOutput("cost")
)
# Define a `server` function (with appropriate arguments)
# This function should perform the following:
server <- function(input, result) {
# Assign a reactive `renderText()` function to the output's `cost` value
# The reactive expression should return the input `price` times the `quantity`
# So it looks nice, paste a "$" in front of it!
result$cost <- renderText({
return(paste0("$", input$price * input$quantity))
})
}
# Create a new `shinyApp()` using the above ui and server
shinyApp(ui = ui, server = server) |
## This first line will take time to load.
if(!exists("NEI")){
NEI <- readRDS("./data/summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("./data/Source_Classification_Code.rds")
}
# Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
# Using the base plotting system, make a plot showing the total PM2.5 emission from all sources
# for each of the years 1999, 2002, 2005, and 2008.
aggregatedTotalByYear <- aggregate(Emissions ~ year, NEI, sum)
png('plot1.png')
barplot(height=aggregatedTotalByYear$Emissions, names.arg=aggregatedTotalByYear$year, xlab="years", ylab=expression('total PM'[2.5]*' emission'),main=expression('Total PM'[2.5]*' emissions at various years'))
dev.off() | /plot1.R | no_license | joshitrigun/Exploratory-DataAnalysis-courseproject-2 | R | false | false | 726 | r | ## This first line will take time to load.
if(!exists("NEI")){
NEI <- readRDS("./data/summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("./data/Source_Classification_Code.rds")
}
# Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
# Using the base plotting system, make a plot showing the total PM2.5 emission from all sources
# for each of the years 1999, 2002, 2005, and 2008.
aggregatedTotalByYear <- aggregate(Emissions ~ year, NEI, sum)
png('plot1.png')
barplot(height=aggregatedTotalByYear$Emissions, names.arg=aggregatedTotalByYear$year, xlab="years", ylab=expression('total PM'[2.5]*' emission'),main=expression('Total PM'[2.5]*' emissions at various years'))
dev.off() |
require(testthat)
library(BuildingRPackage)
# test make_filename
test_that("make_filename generates the file name", {
expect_equal(make_filename(2013), "accident_2013.csv.bz2")
})
| /tests/test.R | no_license | fdelzompo/BuildingRPackage | R | false | false | 183 | r | require(testthat)
library(BuildingRPackage)
# test make_filename
test_that("make_filename generates the file name", {
expect_equal(make_filename(2013), "accident_2013.csv.bz2")
})
|
#require(plyr)
#require(scales)
# ---------------------------------------------------------------------------------------------
# Formatting functions for ggplot graph axis
# ---------------------------------------------------------------------------------------------
#' Human Numbers: Format numbers so they're legible for humans
#' source: https://github.com/fdryan/R/blob/master/ggplot2_formatter.r
#' Use this in ggplot for labels where you might use the comma or percent functions from the
#' Scales package.
#'
#' Checks whether numbers are positive or negative.
#' Allows up to 1 significant figure
#' sapply used for element-wise application of the humanity function as a vector may include
#' numbers where billions, millions or thousands are appropriate.
#'
#' @return a character vector the same length as the input vector
#' @param x a numeric vector to format,
#' @smbl a symbol you'd like to prefix your numbers by e.g. "$"
#' @signif = the number of significant places you want the function to return
#' @examples
#' human_numbers(c(1000000 , 1500000, 10000000000))
#' human_numbers(c(1.200000e+05, -2.154660e+05, 2.387790e+05, 4.343500e+04 ,5.648675e+12), "$")
#' ggplot2 + scale_y_continuous(labels = human_numbers)
#' ggplot2 + scale_x_continuous(labels = human_numbers)
#' ggplot2 + scale_x_continuous(labels = human_gbp)
human_numbers <- function(x = NULL, smbl ="", signif = 1){
humanity <- function(y){
if (!is.na(y)){
tn <- round(abs(y) / 1e12, signif)
b <- round(abs(y) / 1e9, signif)
m <- round(abs(y) / 1e6, signif)
k <- round(abs(y) / 1e3, signif)
if ( y >= 0 ){
y_is_positive <- ""
} else {
y_is_positive <- "-"
}
if ( k < 1 ) {
paste0( y_is_positive, smbl, round(abs(y), signif ))
} else if ( m < 1){
paste0 (y_is_positive, smbl, k , "k")
} else if (b < 1){
paste0 (y_is_positive, smbl, m ,"m")
}else if(tn < 1){
paste0 (y_is_positive, smbl, b ,"bn")
} else {
paste0 (y_is_positive, smbl, comma(tn), "tn")
}
} else if (is.na(y) | is.null(y)){
"-"
}
}
sapply(x,humanity)
}
#' Human versions of large currency numbers - extensible via smbl
human_gbp <- function(x){human_numbers(x, smbl = "£")}
human_usd <- function(x){human_numbers(x, smbl = "$")}
human_euro <- function(x){human_numbers(x, smbl = "€")}
human_num <- function(x){human_numbers(x, smbl = "")}
| /convert_to_human_readable_numbers.R | no_license | tschemic/Additional_Scripts | R | false | false | 2,490 | r | #require(plyr)
#require(scales)
# ---------------------------------------------------------------------------------------------
# Formatting functions for ggplot graph axis
# ---------------------------------------------------------------------------------------------
#' Human Numbers: Format numbers so they're legible for humans
#' source: https://github.com/fdryan/R/blob/master/ggplot2_formatter.r
#' Use this in ggplot for labels where you might use the comma or percent functions from the
#' Scales package.
#'
#' Checks whether numbers are positive or negative.
#' Allows up to 1 significant figure
#' sapply used for element-wise application of the humanity function as a vector may include
#' numbers where billions, millions or thousands are appropriate.
#'
#' @return a character vector the same length as the input vector
#' @param x a numeric vector to format,
#' @smbl a symbol you'd like to prefix your numbers by e.g. "$"
#' @signif = the number of significant places you want the function to return
#' @examples
#' human_numbers(c(1000000 , 1500000, 10000000000))
#' human_numbers(c(1.200000e+05, -2.154660e+05, 2.387790e+05, 4.343500e+04 ,5.648675e+12), "$")
#' ggplot2 + scale_y_continuous(labels = human_numbers)
#' ggplot2 + scale_x_continuous(labels = human_numbers)
#' ggplot2 + scale_x_continuous(labels = human_gbp)
human_numbers <- function(x = NULL, smbl ="", signif = 1){
humanity <- function(y){
if (!is.na(y)){
tn <- round(abs(y) / 1e12, signif)
b <- round(abs(y) / 1e9, signif)
m <- round(abs(y) / 1e6, signif)
k <- round(abs(y) / 1e3, signif)
if ( y >= 0 ){
y_is_positive <- ""
} else {
y_is_positive <- "-"
}
if ( k < 1 ) {
paste0( y_is_positive, smbl, round(abs(y), signif ))
} else if ( m < 1){
paste0 (y_is_positive, smbl, k , "k")
} else if (b < 1){
paste0 (y_is_positive, smbl, m ,"m")
}else if(tn < 1){
paste0 (y_is_positive, smbl, b ,"bn")
} else {
paste0 (y_is_positive, smbl, comma(tn), "tn")
}
} else if (is.na(y) | is.null(y)){
"-"
}
}
sapply(x,humanity)
}
#' Human versions of large currency numbers - extensible via smbl
human_gbp <- function(x){human_numbers(x, smbl = "£")}
human_usd <- function(x){human_numbers(x, smbl = "$")}
human_euro <- function(x){human_numbers(x, smbl = "€")}
human_num <- function(x){human_numbers(x, smbl = "")}
|
#for (i in 1:num_sample) {
# tab.select <- tab[tab[,"count"]==i, ]
# state <- unique(tab.select$state)
# stat <- data.frame(count=matrix(numeric(), nrow=length(state)), row.names=state)
# stat <- stat.count(tab.select, stat)
# stat <- stat.size(tab.select, stat)
# stat <- stat[order(-stat[, "len_mean"]),]
# barplot(stat[, 'len_mean'], main=c("Mean length (density:", i, ")"))
# axis(1, at = 1:length(state), labels=rownames(stat), las=2, cex.axis=0.8)
#}
| /HMRbase/rscripts/lib/unused.R | no_license | xjlizji/lizcodes | R | false | false | 468 | r |
#for (i in 1:num_sample) {
# tab.select <- tab[tab[,"count"]==i, ]
# state <- unique(tab.select$state)
# stat <- data.frame(count=matrix(numeric(), nrow=length(state)), row.names=state)
# stat <- stat.count(tab.select, stat)
# stat <- stat.size(tab.select, stat)
# stat <- stat[order(-stat[, "len_mean"]),]
# barplot(stat[, 'len_mean'], main=c("Mean length (density:", i, ")"))
# axis(1, at = 1:length(state), labels=rownames(stat), las=2, cex.axis=0.8)
#}
|
## ---- dependencies
if (!require("data.table")){
install.packages("data.table", repos="http://cran.rstudio.com/")
library("data.table")
}
if (!require("stargazer")){
install.packages("stargazer", repos="https://cran.rstudio.com/")
library("stargazer")
}
# First we need to get data from each robot and combine into one data set
## ---- read_data
for (robot in 1:params$robots) {
gazebo_filenames <- lapply(1:params$robots, FUN=function(robot){paste0("turtlebot", robot, "_gazebo_odom.csv")})
gazebo_full_filenames <- lapply(gazebo_filenames, FUN=function(filename){paste(params$data_dir, params$experiment, filename, sep="/")})
gazebo <- rbindlist(lapply(gazebo_full_filenames, FUN=function(file){fread(file, header=T, sep=",")}))
discrete_filenames <- lapply(1:params$robots, FUN=function(robot){paste0("turtlebot", robot, "_discrete_filter_odom.csv")})
discrete_full_filenames <- lapply(discrete_filenames, FUN=function(filename){paste(params$data_dir, params$experiment, filename, sep="/")})
discrete <- rbindlist(lapply(discrete_full_filenames, FUN=function(file){fread(file, header=T, sep=",")}))
continuous_filenames <- lapply(1:params$robots, FUN=function(robot){paste0("turtlebot", robot, "_continuous_filter_odom.csv")})
continuous_full_filenames <- lapply(continuous_filenames, FUN=function(filename){paste(params$data_dir, params$experiment, filename, sep="/")})
continuous <- rbindlist(lapply(continuous_full_filenames, FUN=function(file){fread(file, header=T, sep=",")}))
external_filenames <- lapply(1:params$robots, FUN=function(robot){paste0("turtlebot", robot, "_external_pose_count.csv")})
external_full_filenames <- lapply(external_filenames, FUN=function(filename){paste(params$data_dir, params$experiment, filename, sep="/")})
external_data_tables <- lapply(external_full_filenames, FUN=function(file){fread(file, header=T, sep=",")})
}
## ---- calculations
# Now calculate errors
gazebo$dist_from_origin <- sqrt(gazebo$x_position ^ 2 + gazebo$y_position ^ 2)
discrete$x_error <- gazebo$x_position - discrete$x_position
discrete$y_error <- gazebo$y_position - discrete$y_position
discrete$horizontal_error <- sqrt(discrete$x_error ^ 2 + discrete$y_error ^ 2)
correct_yaw <- function(yaw) {
while (yaw > pi) {
yaw <- yaw - 2*pi
}
while (yaw < -1*pi) {
yaw <- yaw + 2*pi
}
return(yaw)
}
vector_correct_yaw <- Vectorize(correct_yaw)
discrete$yaw_error <- vector_correct_yaw(gazebo$yaw - discrete$yaw)
continuous$yaw_error <- vector_correct_yaw(gazebo$yaw - continuous$yaw)
# for (i in 1:NROW(discrete$yaw)) {
# yaw <- gazebo$yaw[i] - discrete$yaw[i]
#
# while (yaw > pi) {
# yaw <- yaw - 2*pi
# }
# while (yaw < -1*pi){
# yaw <- yaw + 2*pi
# }
# discrete$yaw_error[i] <- yaw
# }
continuous$x_error <- gazebo$x_position - continuous$x_position
continuous$y_error <- gazebo$y_position - continuous$y_position
continuous$horizontal_error <- sqrt(continuous$x_error ^ 2 + continuous$y_error ^ 2)
# for (i in 1:NROW(continuous$yaw)) {
# yaw <- gazebo$yaw[i] - continuous$yaw[i]
#
# while (yaw > pi) {
# yaw <- yaw - 2*pi
# }
# while (yaw < -1*pi){
# yaw <- yaw + 2*pi
# }
# continuous$yaw_error[i] <- yaw
# }
external_data_averages <- lapply(external_data_tables, FUN=function(table){table$count[length(table)]})
## ---- plot
plot(gazebo$x_position, gazebo$y_position,
main = "Ground truth visited locations of robots")
hist(gazebo$dist_from_origin,
main = "Distance from origin vs. time")
hist(continuous$x_error,
main = "Continuous x_error")
hist(continuous$y_error,
main = "Continuous y_error")
hist(continuous$horizontal_error,
main = "Continuous total distance error")
hist(discrete$x_error,
main = "Discrete x_error")
hist(discrete$y_error,
main = "Discrete y_error")
hist (discrete$horizontal_error,
main = "Discrete total distance error")
## ---- summary
summary(continuous$x_error)
summary(continuous$y_error)
summary(continuous$yaw_error)
summary(continuous$horizontal_error)
summary(discrete$x_error)
summary(discrete$y_error)
summary(discrete$yaw_error)
summary(discrete$horizontal_error)
if (params$robot >= 2) {
summary(external_data_averages)
}
## ---- external_figures
figure_dir <- "/home/matt/thesis/writing/r_figures/"
filename = paste0(figure_dir, params$experiment, "_continuous_error.pdf")
pdf(filename)
plot(continuous$horizontal_error, main="Continuous Filter Error", sub=paste0("For ", params$experiment, " Experiment"), xlab="Time (.1s)", ylab="Horizontal Position Error (m)")
dev.off()
filename = paste0(figure_dir, params$experiment, "_discrete_error.pdf")
pdf(filename)
plot(discrete$horizontal_error, main="Discrete Filter Error", sub=paste0("For ", params$experiment, " Experiment"), xlab="Time (.1s)", ylab="Horizontal Position Error (m)")
dev.off()
if (params$experiment == "one_stationary_noiseless") {
gazebo$horizontal_error <- sqrt(gazebo$x_position ^ 2 + gazebo$y_position ^ 2)
pdf(paste0(figure_dir, "gazebo_odom_drift.pdf"))
plot(gazebo$horizontal_error, main="Gazebo Odometry Drift for Stationary Robot with Noiseless Odometry", ylab="Distance from Origin (m)", xlab="Time (.1s)")
dev.off()
}
## ---- stargazer_tables
table_dir <- "/home/matt/thesis/writing/autogenerated_tables/"
out_file <- paste0(table_dir, params$experiment, "_continuous_summary.tex")
tex_label <- paste0("tab:", params$experiment, "_continuous_summary")
stargazer(continuous,
out=out_file,
table.placement="h",
label=tex_label,
title=gsub("_", "-", paste0("Continuous Filter Estimate for ", params$experiment, " Experiment")),
digits.extra = 20)
out_file <- paste0(table_dir, params$experiment, "_discrete_summary.tex")
tex_label <- paste0("tab:", params$experiment, "_discrete_summary")
stargazer(discrete,
out=out_file,
table.placement="h",
label=tex_label,
title=gsub("_", "-", paste0("Discrete Filter Estimate for ", params$experiment, " Experiment")),
digits.extra = 20)
if (params$experiment == "one_stationary_noiseless") {
stargazer(gazebo,
out=paste0(table_dir, "gazebo_stationary_noiseless_summary.tex"),
table.placement="h",
label="tab:gazebo_stationary_noiseless_summary",
title="Ground Truth Noiseless Odometry for Stationary Robot located at Origin",
digits.extra = 20)
}
| /analysis/experiment.R | no_license | jhub/thesis | R | false | false | 6,563 | r | ## ---- dependencies
if (!require("data.table")){
install.packages("data.table", repos="http://cran.rstudio.com/")
library("data.table")
}
if (!require("stargazer")){
install.packages("stargazer", repos="https://cran.rstudio.com/")
library("stargazer")
}
# First we need to get data from each robot and combine into one data set
## ---- read_data
for (robot in 1:params$robots) {
gazebo_filenames <- lapply(1:params$robots, FUN=function(robot){paste0("turtlebot", robot, "_gazebo_odom.csv")})
gazebo_full_filenames <- lapply(gazebo_filenames, FUN=function(filename){paste(params$data_dir, params$experiment, filename, sep="/")})
gazebo <- rbindlist(lapply(gazebo_full_filenames, FUN=function(file){fread(file, header=T, sep=",")}))
discrete_filenames <- lapply(1:params$robots, FUN=function(robot){paste0("turtlebot", robot, "_discrete_filter_odom.csv")})
discrete_full_filenames <- lapply(discrete_filenames, FUN=function(filename){paste(params$data_dir, params$experiment, filename, sep="/")})
discrete <- rbindlist(lapply(discrete_full_filenames, FUN=function(file){fread(file, header=T, sep=",")}))
continuous_filenames <- lapply(1:params$robots, FUN=function(robot){paste0("turtlebot", robot, "_continuous_filter_odom.csv")})
continuous_full_filenames <- lapply(continuous_filenames, FUN=function(filename){paste(params$data_dir, params$experiment, filename, sep="/")})
continuous <- rbindlist(lapply(continuous_full_filenames, FUN=function(file){fread(file, header=T, sep=",")}))
external_filenames <- lapply(1:params$robots, FUN=function(robot){paste0("turtlebot", robot, "_external_pose_count.csv")})
external_full_filenames <- lapply(external_filenames, FUN=function(filename){paste(params$data_dir, params$experiment, filename, sep="/")})
external_data_tables <- lapply(external_full_filenames, FUN=function(file){fread(file, header=T, sep=",")})
}
## ---- calculations
# Now calculate errors
gazebo$dist_from_origin <- sqrt(gazebo$x_position ^ 2 + gazebo$y_position ^ 2)
discrete$x_error <- gazebo$x_position - discrete$x_position
discrete$y_error <- gazebo$y_position - discrete$y_position
discrete$horizontal_error <- sqrt(discrete$x_error ^ 2 + discrete$y_error ^ 2)
correct_yaw <- function(yaw) {
while (yaw > pi) {
yaw <- yaw - 2*pi
}
while (yaw < -1*pi) {
yaw <- yaw + 2*pi
}
return(yaw)
}
vector_correct_yaw <- Vectorize(correct_yaw)
discrete$yaw_error <- vector_correct_yaw(gazebo$yaw - discrete$yaw)
continuous$yaw_error <- vector_correct_yaw(gazebo$yaw - continuous$yaw)
# for (i in 1:NROW(discrete$yaw)) {
# yaw <- gazebo$yaw[i] - discrete$yaw[i]
#
# while (yaw > pi) {
# yaw <- yaw - 2*pi
# }
# while (yaw < -1*pi){
# yaw <- yaw + 2*pi
# }
# discrete$yaw_error[i] <- yaw
# }
continuous$x_error <- gazebo$x_position - continuous$x_position
continuous$y_error <- gazebo$y_position - continuous$y_position
continuous$horizontal_error <- sqrt(continuous$x_error ^ 2 + continuous$y_error ^ 2)
# for (i in 1:NROW(continuous$yaw)) {
# yaw <- gazebo$yaw[i] - continuous$yaw[i]
#
# while (yaw > pi) {
# yaw <- yaw - 2*pi
# }
# while (yaw < -1*pi){
# yaw <- yaw + 2*pi
# }
# continuous$yaw_error[i] <- yaw
# }
external_data_averages <- lapply(external_data_tables, FUN=function(table){table$count[length(table)]})
## ---- plot
plot(gazebo$x_position, gazebo$y_position,
main = "Ground truth visited locations of robots")
hist(gazebo$dist_from_origin,
main = "Distance from origin vs. time")
hist(continuous$x_error,
main = "Continuous x_error")
hist(continuous$y_error,
main = "Continuous y_error")
hist(continuous$horizontal_error,
main = "Continuous total distance error")
hist(discrete$x_error,
main = "Discrete x_error")
hist(discrete$y_error,
main = "Discrete y_error")
hist (discrete$horizontal_error,
main = "Discrete total distance error")
## ---- summary
summary(continuous$x_error)
summary(continuous$y_error)
summary(continuous$yaw_error)
summary(continuous$horizontal_error)
summary(discrete$x_error)
summary(discrete$y_error)
summary(discrete$yaw_error)
summary(discrete$horizontal_error)
if (params$robot >= 2) {
summary(external_data_averages)
}
## ---- external_figures
figure_dir <- "/home/matt/thesis/writing/r_figures/"
filename = paste0(figure_dir, params$experiment, "_continuous_error.pdf")
pdf(filename)
plot(continuous$horizontal_error, main="Continuous Filter Error", sub=paste0("For ", params$experiment, " Experiment"), xlab="Time (.1s)", ylab="Horizontal Position Error (m)")
dev.off()
filename = paste0(figure_dir, params$experiment, "_discrete_error.pdf")
pdf(filename)
plot(discrete$horizontal_error, main="Discrete Filter Error", sub=paste0("For ", params$experiment, " Experiment"), xlab="Time (.1s)", ylab="Horizontal Position Error (m)")
dev.off()
if (params$experiment == "one_stationary_noiseless") {
gazebo$horizontal_error <- sqrt(gazebo$x_position ^ 2 + gazebo$y_position ^ 2)
pdf(paste0(figure_dir, "gazebo_odom_drift.pdf"))
plot(gazebo$horizontal_error, main="Gazebo Odometry Drift for Stationary Robot with Noiseless Odometry", ylab="Distance from Origin (m)", xlab="Time (.1s)")
dev.off()
}
## ---- stargazer_tables
table_dir <- "/home/matt/thesis/writing/autogenerated_tables/"
out_file <- paste0(table_dir, params$experiment, "_continuous_summary.tex")
tex_label <- paste0("tab:", params$experiment, "_continuous_summary")
stargazer(continuous,
out=out_file,
table.placement="h",
label=tex_label,
title=gsub("_", "-", paste0("Continuous Filter Estimate for ", params$experiment, " Experiment")),
digits.extra = 20)
out_file <- paste0(table_dir, params$experiment, "_discrete_summary.tex")
tex_label <- paste0("tab:", params$experiment, "_discrete_summary")
stargazer(discrete,
out=out_file,
table.placement="h",
label=tex_label,
title=gsub("_", "-", paste0("Discrete Filter Estimate for ", params$experiment, " Experiment")),
digits.extra = 20)
if (params$experiment == "one_stationary_noiseless") {
stargazer(gazebo,
out=paste0(table_dir, "gazebo_stationary_noiseless_summary.tex"),
table.placement="h",
label="tab:gazebo_stationary_noiseless_summary",
title="Ground Truth Noiseless Odometry for Stationary Robot located at Origin",
digits.extra = 20)
}
|
#' Classification Rate Evaluation
#'
#' This function evaluates the classification rates for two sets of attribute profiles
#'
#' @param att1 a matrix or data frame of attribute profiles
#' @param att2 a matrix or data frame of attribute profiles
#'
#' @return a list with the following components:
#' \describe{
#' \item{PCA}{the proportion of correctly classified attributes (i.e., attribute level classification rate)}
#' \item{PCV}{a vector giving the proportions of correctly classified attribute vectors (i.e., vector level classification rate).
#' The fist element is the proportion of at least one attribute in the vector are correctly identified; the second
#' element is the proportion of at least two attributes in the vector are correctly identified; and so forth. The last
#' element is the proportion of all elements in the vector are correctly identified.}
#' }
#' @export
#'
#' @examples
#' \dontrun{
#' N <- 2000
#' # model does not matter if item parameter is probability of success
#' Q <- sim30GDINA$simQ
#' J <- nrow(Q)
#' gs <- matrix(0.1,J,2)
#'
#' set.seed(12345)
#' sim <- simGDINA(N,Q,gs.parm = gs)
#' GDINA.est <- GDINA(sim$dat,Q)
#'
#' CR <- ClassRate(sim$attribute,personparm(GDINA.est))
#' CR
#' }
ClassRate <- function(att1,att2){
if (any(dim(att1)!=dim(att2))) stop("att1 and att2 must have the same dimensions.",call. = FALSE)
comp <- as.matrix(att1)==as.matrix(att2)
K <- ncol(att1)
PCV <- NULL
for (k in 1:K){
PCV <- c(PCV,mean(rowSums(comp)>=k))
}
PCA <- mean(comp)
return(list(PCA=PCA,PCV=PCV))
}
| /R/CR.R | no_license | momo609/GDINA | R | false | false | 1,621 | r | #' Classification Rate Evaluation
#'
#' This function evaluates the classification rates for two sets of attribute profiles
#'
#' @param att1 a matrix or data frame of attribute profiles
#' @param att2 a matrix or data frame of attribute profiles
#'
#' @return a list with the following components:
#' \describe{
#' \item{PCA}{the proportion of correctly classified attributes (i.e., attribute level classification rate)}
#' \item{PCV}{a vector giving the proportions of correctly classified attribute vectors (i.e., vector level classification rate).
#' The fist element is the proportion of at least one attribute in the vector are correctly identified; the second
#' element is the proportion of at least two attributes in the vector are correctly identified; and so forth. The last
#' element is the proportion of all elements in the vector are correctly identified.}
#' }
#' @export
#'
#' @examples
#' \dontrun{
#' N <- 2000
#' # model does not matter if item parameter is probability of success
#' Q <- sim30GDINA$simQ
#' J <- nrow(Q)
#' gs <- matrix(0.1,J,2)
#'
#' set.seed(12345)
#' sim <- simGDINA(N,Q,gs.parm = gs)
#' GDINA.est <- GDINA(sim$dat,Q)
#'
#' CR <- ClassRate(sim$attribute,personparm(GDINA.est))
#' CR
#' }
ClassRate <- function(att1,att2){
if (any(dim(att1)!=dim(att2))) stop("att1 and att2 must have the same dimensions.",call. = FALSE)
comp <- as.matrix(att1)==as.matrix(att2)
K <- ncol(att1)
PCV <- NULL
for (k in 1:K){
PCV <- c(PCV,mean(rowSums(comp)>=k))
}
PCA <- mean(comp)
return(list(PCA=PCA,PCV=PCV))
}
|
###########################
# GET DATA - imports DAT files
# end with A.3, long format df
setwd (wd.3) # read from 3-hr dir
all.fnames.3 <- dir(wd.3) # get filenames from wd.3
dfl.all.3 <- lapply(all.fnames.3, function(x) # read to list of dframes
read.table(x, sep = ",", fill = TRUE, skip = 1))
df.all.3 <- do.call(rbind.fill, dfl.all.3) # rbind to long format
bench.3 <- matrix(rep(3 ,nrow(df.all.3))) # create benchtime column
df.all.3 <- data.frame(df.all.3, bench.3) # attach bench column
colnames(df.all.3) <- c("id",
"variable",
"time",
"benchtime") # assign colnames
df.cast.3 <- dcast(df.all.3,
time~id,
value.var = "variable") # dcast to new df
df.cast.3 [is.na(df.cast.3)] <- -6 # replace NA with -6
df.cast.3 <- data.frame(df.cast.3[,-1], # move time to end...
df.cast.3[, 1]) # makes renaming easier
names.3.simple <- str_sub(all.fnames.3, 0,
trim.length) # simplify filenames
names.3.unique <- unique(names.3.simple) # get unique names.simple
names.3.count <- count(names.3.simple) # freq of names.simple
colnames(df.cast.3) <- c(names.3.simple,
"time") # rename to names.simple
A.3 <- vector("list", length(names.3.unique)) # empty list
# loop sized on names.unique
# compute rowMeans on columns with same names.simple
for (i in 1:length(names.3.unique)){
if(i == 1){
k = 1
A.3[[i]] <- rowMeans(df.cast.3[k:names.3.count[[2]][i]])
k = k + names.3.count[[2]][i]
}
else{
A.3[[i]] <- rowMeans(df.cast.3[k:names.3.count[[2]][i]-1])
k = k + names.3.count[[2]][i]
}
}
A.3 <- data.frame(matrix(unlist(A.3),
ncol = length(names.3.unique)))# unlist to df
# create new time col, starts from 1 rather than 0
time.3 <- data.frame(seq(1, nrow(A.3), 1))
pad.3 <- rep(0, length(names.3.unique) + 1) # add zeros to first row
A.melt.3 <- data.frame(time.3, A.3)
A.melt.3 <- rbind(pad.3, A.melt.3)
colnames(A.melt.3) <- c("time", names.3.unique)
A.melt.3 <- melt(A.melt.3, id.vars = "time")
setwd (wd) # revert to wd
| /get_data3.R | no_license | tastyCanOfMalk/BCIRA | R | false | false | 2,606 | r | ###########################
# GET DATA - imports DAT files
# end with A.3, long format df
setwd (wd.3) # read from 3-hr dir
all.fnames.3 <- dir(wd.3) # get filenames from wd.3
dfl.all.3 <- lapply(all.fnames.3, function(x) # read to list of dframes
read.table(x, sep = ",", fill = TRUE, skip = 1))
df.all.3 <- do.call(rbind.fill, dfl.all.3) # rbind to long format
bench.3 <- matrix(rep(3 ,nrow(df.all.3))) # create benchtime column
df.all.3 <- data.frame(df.all.3, bench.3) # attach bench column
colnames(df.all.3) <- c("id",
"variable",
"time",
"benchtime") # assign colnames
df.cast.3 <- dcast(df.all.3,
time~id,
value.var = "variable") # dcast to new df
df.cast.3 [is.na(df.cast.3)] <- -6 # replace NA with -6
df.cast.3 <- data.frame(df.cast.3[,-1], # move time to end...
df.cast.3[, 1]) # makes renaming easier
names.3.simple <- str_sub(all.fnames.3, 0,
trim.length) # simplify filenames
names.3.unique <- unique(names.3.simple) # get unique names.simple
names.3.count <- count(names.3.simple) # freq of names.simple
colnames(df.cast.3) <- c(names.3.simple,
"time") # rename to names.simple
A.3 <- vector("list", length(names.3.unique)) # empty list
# loop sized on names.unique
# compute rowMeans on columns with same names.simple
for (i in 1:length(names.3.unique)){
if(i == 1){
k = 1
A.3[[i]] <- rowMeans(df.cast.3[k:names.3.count[[2]][i]])
k = k + names.3.count[[2]][i]
}
else{
A.3[[i]] <- rowMeans(df.cast.3[k:names.3.count[[2]][i]-1])
k = k + names.3.count[[2]][i]
}
}
A.3 <- data.frame(matrix(unlist(A.3),
ncol = length(names.3.unique)))# unlist to df
# create new time col, starts from 1 rather than 0
time.3 <- data.frame(seq(1, nrow(A.3), 1))
pad.3 <- rep(0, length(names.3.unique) + 1) # add zeros to first row
A.melt.3 <- data.frame(time.3, A.3)
A.melt.3 <- rbind(pad.3, A.melt.3)
colnames(A.melt.3) <- c("time", names.3.unique)
A.melt.3 <- melt(A.melt.3, id.vars = "time")
setwd (wd) # revert to wd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/learner_nnet_regr_nnet.R
\name{mlr_learners_regr.nnet}
\alias{mlr_learners_regr.nnet}
\alias{LearnerRegrNnet}
\title{Regression Neural Net Learner}
\description{
Calls \link[nnet:nnet]{nnet::nnet} from package \CRANpkg{nnet}.
}
\section{Dictionary}{
This \link{Learner} can be instantiated via the
\link[mlr3misc:Dictionary]{dictionary} \link{mlr_learners} or with the associated
sugar function \code{\link[=lrn]{lrn()}}:\preformatted{mlr_learners$get("regr.nnet")
lrn("regr.nnet")
}
}
\section{Traits}{
\itemize{
\item Packages: nnet
\item Predict Types: response
\item Feature Types: numeric, factor, ordered
\item Properties:
}
}
\examples{
# stop example failing with warning if package not installed
learner = suppressWarnings(mlr3::lrn("regr.nnet"))
print(learner)
# available parameters:
learner$param_set$ids()
}
\seealso{
\link[mlr3misc:Dictionary]{Dictionary} of \link[mlr3:Learner]{Learners}:
\link[mlr3:mlr_learners]{mlr3::mlr_learners}
}
\author{
0livier1O1
}
\section{Super classes}{
\code{\link[mlr3:Learner]{mlr3::Learner}} -> \code{\link[mlr3:LearnerRegr]{mlr3::LearnerRegr}} -> \code{LearnerRegrNnet}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{LearnerRegrNnet$new()}}
\item \href{#method-clone}{\code{LearnerRegrNnet$clone()}}
}
}
\if{html}{
\out{<details ><summary>Inherited methods</summary>}
\itemize{
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Learner" data-id="format">}\href{../../mlr3/html/Learner.html#method-format}{\code{mlr3::Learner$format()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Learner" data-id="help">}\href{../../mlr3/html/Learner.html#method-help}{\code{mlr3::Learner$help()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Learner" data-id="predict">}\href{../../mlr3/html/Learner.html#method-predict}{\code{mlr3::Learner$predict()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Learner" data-id="predict_newdata">}\href{../../mlr3/html/Learner.html#method-predict_newdata}{\code{mlr3::Learner$predict_newdata()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Learner" data-id="print">}\href{../../mlr3/html/Learner.html#method-print}{\code{mlr3::Learner$print()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Learner" data-id="reset">}\href{../../mlr3/html/Learner.html#method-reset}{\code{mlr3::Learner$reset()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Learner" data-id="train">}\href{../../mlr3/html/Learner.html#method-train}{\code{mlr3::Learner$train()}}\out{</span>}
}
\out{</details>}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
Creates a new instance of this \link[R6:R6Class]{R6} class.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{LearnerRegrNnet$new()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{LearnerRegrNnet$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
| /man/mlr_learners_regr.nnet.Rd | no_license | 0livier1O1/mlr3extralearners | R | false | true | 3,583 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/learner_nnet_regr_nnet.R
\name{mlr_learners_regr.nnet}
\alias{mlr_learners_regr.nnet}
\alias{LearnerRegrNnet}
\title{Regression Neural Net Learner}
\description{
Calls \link[nnet:nnet]{nnet::nnet} from package \CRANpkg{nnet}.
}
\section{Dictionary}{
This \link{Learner} can be instantiated via the
\link[mlr3misc:Dictionary]{dictionary} \link{mlr_learners} or with the associated
sugar function \code{\link[=lrn]{lrn()}}:\preformatted{mlr_learners$get("regr.nnet")
lrn("regr.nnet")
}
}
\section{Traits}{
\itemize{
\item Packages: nnet
\item Predict Types: response
\item Feature Types: numeric, factor, ordered
\item Properties:
}
}
\examples{
# stop example failing with warning if package not installed
learner = suppressWarnings(mlr3::lrn("regr.nnet"))
print(learner)
# available parameters:
learner$param_set$ids()
}
\seealso{
\link[mlr3misc:Dictionary]{Dictionary} of \link[mlr3:Learner]{Learners}:
\link[mlr3:mlr_learners]{mlr3::mlr_learners}
}
\author{
0livier1O1
}
\section{Super classes}{
\code{\link[mlr3:Learner]{mlr3::Learner}} -> \code{\link[mlr3:LearnerRegr]{mlr3::LearnerRegr}} -> \code{LearnerRegrNnet}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{LearnerRegrNnet$new()}}
\item \href{#method-clone}{\code{LearnerRegrNnet$clone()}}
}
}
\if{html}{
\out{<details ><summary>Inherited methods</summary>}
\itemize{
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Learner" data-id="format">}\href{../../mlr3/html/Learner.html#method-format}{\code{mlr3::Learner$format()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Learner" data-id="help">}\href{../../mlr3/html/Learner.html#method-help}{\code{mlr3::Learner$help()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Learner" data-id="predict">}\href{../../mlr3/html/Learner.html#method-predict}{\code{mlr3::Learner$predict()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Learner" data-id="predict_newdata">}\href{../../mlr3/html/Learner.html#method-predict_newdata}{\code{mlr3::Learner$predict_newdata()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Learner" data-id="print">}\href{../../mlr3/html/Learner.html#method-print}{\code{mlr3::Learner$print()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Learner" data-id="reset">}\href{../../mlr3/html/Learner.html#method-reset}{\code{mlr3::Learner$reset()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Learner" data-id="train">}\href{../../mlr3/html/Learner.html#method-train}{\code{mlr3::Learner$train()}}\out{</span>}
}
\out{</details>}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
Creates a new instance of this \link[R6:R6Class]{R6} class.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{LearnerRegrNnet$new()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{LearnerRegrNnet$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
library(shiny)
library(shinySignals) # devtools::install_github("hadley/shinySignals")
library(dplyr)
library(shinydashboard)
library(bubbles) # devtools::install_github("jcheng5/bubbles")
source("bloomfilter.R")
# Shoaib: Install shinyjs
library(shinyjs)
# Shoaib: Set the credentials here
golablUserID <- c("test","shiny")
globalPassword <- c("test","shiny")
# An empty prototype of the data frame we want to create
prototype <- data.frame(date = character(), time = character(),
size = numeric(), r_version = character(), r_arch = character(),
r_os = character(), package = character(), version = character(),
country = character(), ip_id = character(), received = numeric())
# Connects to streaming log data for cran.rstudio.com and
# returns a reactive expression that serves up the cumulative
# results as a data frame
packageStream <- function(session) {
# Connect to data source
sock <- socketConnection("cransim.rstudio.com", 6789, blocking = FALSE, open = "r")
# Clean up when session is over
session$onSessionEnded(function() {
close(sock)
})
# Returns new lines
newLines <- reactive({
invalidateLater(1000, session)
readLines(sock)
})
# Parses newLines() into data frame
reactive({
if (length(newLines()) == 0)
return()
read.csv(textConnection(newLines()), header=FALSE, stringsAsFactors=FALSE,
col.names = names(prototype)
) %>% mutate(received = as.numeric(Sys.time()))
})
}
# Accumulates pkgStream rows over time; throws out any older than timeWindow
# (assuming the presence of a "received" field)
packageData <- function(pkgStream, timeWindow) {
shinySignals::reducePast(pkgStream, function(memo, value) {
rbind(memo, value) %>%
filter(received > as.numeric(Sys.time()) - timeWindow)
}, prototype)
}
# Count the total nrows of pkgStream
downloadCount <- function(pkgStream) {
shinySignals::reducePast(pkgStream, function(memo, df) {
if (is.null(df))
return(memo)
memo + nrow(df)
}, 0)
}
# Use a bloom filter to probabilistically track the number of unique
# users we have seen; using bloom filter means we will not have a
# perfectly accurate count, but the memory usage will be bounded.
userCount <- function(pkgStream) {
# These parameters estimate that with 5000 unique users added to
# the filter, we'll have a 1% chance of false positive on the next
# user to be queried.
bloomFilter <- BloomFilter$new(5000, 0.01)
total <- 0
reactive({
df <- pkgStream()
if (!is.null(df) && nrow(df) > 0) {
# ip_id is only unique on a per-day basis. To make them unique
# across days, include the date. And call unique() to make sure
# we don't double-count dupes in the current data frame.
ids <- paste(df$date, df$ip_id) %>% unique()
# Get indices of IDs we haven't seen before
newIds <- !sapply(ids, bloomFilter$has)
# Add the count of new IDs
total <<- total + length(newIds)
# Add the new IDs so we know for next time
sapply(ids[newIds], bloomFilter$set)
}
total
})
}
| /087-crandash/global.R | permissive | shocoder/shiny-examples | R | false | false | 3,088 | r | library(shiny)
library(shinySignals) # devtools::install_github("hadley/shinySignals")
library(dplyr)
library(shinydashboard)
library(bubbles) # devtools::install_github("jcheng5/bubbles")
source("bloomfilter.R")
# Shoaib: Install shinyjs
library(shinyjs)
# Shoaib: Set the credentials here
golablUserID <- c("test","shiny")
globalPassword <- c("test","shiny")
# An empty prototype of the data frame we want to create
prototype <- data.frame(date = character(), time = character(),
size = numeric(), r_version = character(), r_arch = character(),
r_os = character(), package = character(), version = character(),
country = character(), ip_id = character(), received = numeric())
# Connects to streaming log data for cran.rstudio.com and
# returns a reactive expression that serves up the cumulative
# results as a data frame
packageStream <- function(session) {
# Connect to data source
sock <- socketConnection("cransim.rstudio.com", 6789, blocking = FALSE, open = "r")
# Clean up when session is over
session$onSessionEnded(function() {
close(sock)
})
# Returns new lines
newLines <- reactive({
invalidateLater(1000, session)
readLines(sock)
})
# Parses newLines() into data frame
reactive({
if (length(newLines()) == 0)
return()
read.csv(textConnection(newLines()), header=FALSE, stringsAsFactors=FALSE,
col.names = names(prototype)
) %>% mutate(received = as.numeric(Sys.time()))
})
}
# Accumulates pkgStream rows over time; throws out any older than timeWindow
# (assuming the presence of a "received" field)
packageData <- function(pkgStream, timeWindow) {
shinySignals::reducePast(pkgStream, function(memo, value) {
rbind(memo, value) %>%
filter(received > as.numeric(Sys.time()) - timeWindow)
}, prototype)
}
# Count the total nrows of pkgStream
downloadCount <- function(pkgStream) {
shinySignals::reducePast(pkgStream, function(memo, df) {
if (is.null(df))
return(memo)
memo + nrow(df)
}, 0)
}
# Use a bloom filter to probabilistically track the number of unique
# users we have seen; using bloom filter means we will not have a
# perfectly accurate count, but the memory usage will be bounded.
userCount <- function(pkgStream) {
# These parameters estimate that with 5000 unique users added to
# the filter, we'll have a 1% chance of false positive on the next
# user to be queried.
bloomFilter <- BloomFilter$new(5000, 0.01)
total <- 0
reactive({
df <- pkgStream()
if (!is.null(df) && nrow(df) > 0) {
# ip_id is only unique on a per-day basis. To make them unique
# across days, include the date. And call unique() to make sure
# we don't double-count dupes in the current data frame.
ids <- paste(df$date, df$ip_id) %>% unique()
# Get indices of IDs we haven't seen before
newIds <- !sapply(ids, bloomFilter$has)
# Add the count of new IDs
total <<- total + length(newIds)
# Add the new IDs so we know for next time
sapply(ids[newIds], bloomFilter$set)
}
total
})
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{write.bed}
\alias{write.bed}
\title{Write effect intervals to a \code{bed} file.}
\usage{
write.bed(intervals, bedfile)
}
\arguments{
\item{intervals:}{output of \code{\link{get.effect.intervals}}}
\item{bedfile:}{output \code{bed} file}
}
\description{
Write effect intervals to a \code{bed} file.
}
\examples{
\dontrun{
fra=2
region="chr1:154206209-154214400"
res <- multiseq(x=x, g=g, minobs=1, lm.approx=FALSE, read.depth=samples$ReadDepth)
intervals <- get.effect.intervals(res, fra))
write.bed(intervals, "out.bed")
}
}
| /package/multiseq/man/write.bed.Rd | no_license | esterpantaleo/multiseq | R | false | false | 587 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{write.bed}
\alias{write.bed}
\title{Write effect intervals to a \code{bed} file.}
\usage{
write.bed(intervals, bedfile)
}
\arguments{
\item{intervals:}{output of \code{\link{get.effect.intervals}}}
\item{bedfile:}{output \code{bed} file}
}
\description{
Write effect intervals to a \code{bed} file.
}
\examples{
\dontrun{
fra=2
region="chr1:154206209-154214400"
res <- multiseq(x=x, g=g, minobs=1, lm.approx=FALSE, read.depth=samples$ReadDepth)
intervals <- get.effect.intervals(res, fra))
write.bed(intervals, "out.bed")
}
}
|
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip","./hpc.zip")
unzip("./hpc.zip",exdir=".")
fullhpc<-read.table("./household_power_consumption.txt",header=TRUE,sep=";")
hpc<-subset(fullhpc,strptime(Date,"%d/%m/%Y") %in% c("2007-02-01","2007-02-02"))
xstart<-as.POSIXct(strptime("2007-02-01","%Y-%m-%d"))
xend<-as.POSIXct(strptime("2007-02-03","%Y-%m-%d"))
xrange<-c(xstart,xend)
png(filename="./plot3.png",width=480,height=480,units="px")
#submeter1
plot(strptime(paste(as.character(hpc$Date),as.character(hpc$Time),sep=" "),"%d/%m/%Y %H:%M:%S"),
as.numeric(as.character(hpc$Sub_metering_1)),xlim=xrange,type="l",col="black",
ylab="Energy sub metering",xlab="")
#submeter3
lines(strptime(paste(as.character(hpc$Date),as.character(hpc$Time),sep=" "),"%d/%m/%Y %H:%M:%S"),
as.numeric(as.character(hpc$Sub_metering_3)),col="blue")
#submeter2
lines(strptime(paste(as.character(hpc$Date),as.character(hpc$Time),sep=" "),"%d/%m/%Y %H:%M:%S"),
as.numeric(as.character(hpc$Sub_metering_2)),col="red")
legend("topright",lty=1,col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
| /plot3.R | no_license | bharathcvs/ExData_Plotting1 | R | false | false | 1,199 | r | download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip","./hpc.zip")
unzip("./hpc.zip",exdir=".")
fullhpc<-read.table("./household_power_consumption.txt",header=TRUE,sep=";")
hpc<-subset(fullhpc,strptime(Date,"%d/%m/%Y") %in% c("2007-02-01","2007-02-02"))
xstart<-as.POSIXct(strptime("2007-02-01","%Y-%m-%d"))
xend<-as.POSIXct(strptime("2007-02-03","%Y-%m-%d"))
xrange<-c(xstart,xend)
png(filename="./plot3.png",width=480,height=480,units="px")
#submeter1
plot(strptime(paste(as.character(hpc$Date),as.character(hpc$Time),sep=" "),"%d/%m/%Y %H:%M:%S"),
as.numeric(as.character(hpc$Sub_metering_1)),xlim=xrange,type="l",col="black",
ylab="Energy sub metering",xlab="")
#submeter3
lines(strptime(paste(as.character(hpc$Date),as.character(hpc$Time),sep=" "),"%d/%m/%Y %H:%M:%S"),
as.numeric(as.character(hpc$Sub_metering_3)),col="blue")
#submeter2
lines(strptime(paste(as.character(hpc$Date),as.character(hpc$Time),sep=" "),"%d/%m/%Y %H:%M:%S"),
as.numeric(as.character(hpc$Sub_metering_2)),col="red")
legend("topright",lty=1,col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
|
setMethod("codebook","data.set",function(x,weights,unweighted=TRUE,...){
weights <- eval(substitute(weights),x,parent.frame())
cb <- lapply(x,codebookEntry,weights=weights,unweighted=unweighted)
new("codebook",cb)
})
setMethod("codebook","item",function(x,weights,unweighted=TRUE,...){
xname <- paste(deparse(substitute(x)))
cb <- list(codebookEntry(x,weights=weights,unweighted=unweighted))
names(cb) <- xname
new("codebook",cb)
})
setMethod("codebook","data.frame",function(x,weights,unweighted=TRUE,...){
weights <- eval(substitute(weights),x,parent.frame())
cb <- lapply(x,codebookEntry,weights=weights,unweighted=unweighted)
new("codebook",cb)
})
setMethod("codebook","tbl_df",function(x,weights,unweighted=TRUE,...){
weights <- eval(substitute(weights),x,parent.frame())
cb <- lapply(x,codebookEntry,weights=weights,unweighted=unweighted)
new("codebook",cb)
})
setMethod("codebook","atomic",function(x,weights,unweighted=TRUE,...){
xname <- paste(deparse(substitute(x)))
cb <- list(codebookEntry(x,weights=weights,unweighted=unweighted))
names(cb) <- xname
new("codebook",cb)
})
setMethod("codebook","factor",function(x,weights,unweighted=TRUE,...){
xname <- paste(deparse(substitute(x)))
cb <- list(codebookEntry(x,weights=weights,unweighted=unweighted))
names(cb) <- xname
new("codebook",cb)
})
setMethod("codebook","ANY",function(x,weights,unweighted=TRUE,...){
xname <- paste(deparse(substitute(x)))
cb <- list(codebookEntry(x,weights=weights,unweighted=unweighted))
names(cb) <- xname
new("codebook",cb)
})
setMethod("codebook","NULL",function(x,weights,unweighted=TRUE,...)NULL)
setMethod("codebookEntry","item",function(x,weights,unweighted=TRUE,...){
annotation <- annotation(x)
filter <- x@value.filter
spec <- c(
"Storage mode:"=storage.mode(x),
"Measurement:"=measurement(x)
)
if(length(filter)) spec <- c(spec,
switch(class(filter),
missing.values = c("Missing values:" = format(filter)),
valid.values = c("Valid values:" = format(filter)),
valid.range = c("Valid range:" = format(filter))
))
stats <- switch(measurement(x),
nominal=,ordinal=codebookStatsCateg(x,weights=weights,unweighted=unweighted),
interval=,ratio=codebookStatsMetric(x,weights=weights,unweighted=unweighted)
)
cbe <- new("codebookEntry",
spec = spec,
stats = stats,
annotation = annotation
)
return(cbe)
})
NAtab <- function(isna,weights=NULL){
if(!length(weights))
weights <- rep(1,length(isna))
counts <- c(
Valid = sum(weights*!isna),
"Missing (NA)" = sum(weights*isna),
Total = sum(weights)
)
perc <- 100*counts/counts[3]
perc[3] <- NA
cbind(N=counts,
Percent=perc)
}
setMethod("codebookEntry","ANY",function(x,weights,unweighted=TRUE,...){
if(length(attr(x,"label")))
annotation <- c(description=attr(x,"label"))
else
annotation <- NULL
spec <- c("Storage mode:"=storage.mode(x))
isat <- is.atomic(x)
if(isat)
isna <- is.na(x)
else
isna <- FALSE
if(mode(x) == "numeric"){
descr <- Descriptives(x)
if(length(weights) && length(descr) > 2){ # There is more than a range
wdescr <- Descriptives(x,weights)
if(unweighted)
descr <- collect(Unweighted=format(descr),
Weighted=format(wdescr))
else
descr <- as.matrix(format(wdescr))
}
else
descr <- as.matrix(format(descr))
}
else
descr <- NULL
if(any(isna)){
tab <- NAtab(isna)
if(length(weights)){
wtab <- NAtab(isna,weights)
if(unweighted)
tab <- collect(Unweighted=tab,
Weighted=wtab)
else
tab <- array(wtab,
dim=c(dim(tab),1),
dimnames=c(dimnames(tab),
list(NULL)))
}
else
tab <- array(tab,
dim=c(dim(tab),1),
dimnames=c(dimnames(tab),
list(NULL)))
attr(tab,"title") <- "Valid and missing values"
} else
tab <- integer(0)
stats <- list(tab=tab,
descr=descr)
new("codebookEntry",
spec = spec,
stats = stats,
annotation = annotation
)
})
rwnexp <- function(mat,nms){
res <- array(0,c(length(nms),ncol(mat)),
dimnames=list(nms,colnames(mat)))
rn <- rownames(mat)
res[rn,] <- mat
return(res)
}
codebookTable_factor <- function(x,weights=NULL,...){
if(!length(weights))
weights <- rep(1,length(x))
isna <- is.na(x)
counts <- rowsum(weights[!isna],x[!isna])
lev <- levels(x)
counts <- rwnexp(counts,lev)
NAs <- sum(weights*isna)
tab <- cbind(counts,100*counts/sum(counts))
if(any(isna)) {
labs <- sQuote(levels(x))
if(nlevels(x))
labs <- paste(format(c(1:nlevels(x),NA),justify="right"),
format(c(labs,""),justify="left")
)
else
labs <- "NA"
tab <- rbind(tab,c(NAs,NA))
counts <- c(counts,NAs)
tab <- cbind(tab,100*counts/sum(counts))
colnames(tab) <- c("N","Valid","Total")
}
else {
labs <- sQuote(levels(x))
labs <- paste(format(1:nlevels(x),justify="right"),
format(labs,justify="left")
)
colnames(tab) <- c("N","Valid")
}
rownames(tab) <- labs
return(tab)
}
setMethod("codebookEntry","factor",function(x,weights=NULL,unweighted=TRUE,...){
tab <- codebookTable_factor(x)
if(length(weights)){
wtab <- codebookTable_factor(x,weights=weights)
if(unweighted)
tab <- collect(Unweighted=tab,
Weighted=wtab)
else {
tab <- wtab
tab.dn <- dimnames(tab)
tab.d <- dim(tab)
dim(tab) <- c(tab.d,1)
dimnames(tab) <- c(tab.dn,list(NULL))
}
}
else{
tab.dn <- dimnames(tab)
tab.d <- dim(tab)
dim(tab) <- c(tab.d,1)
dimnames(tab) <- c(tab.dn,list(NULL))
}
attr(tab,"title") <- "Levels and labels"
if(length(attr(x,"label")))
annotation <- c(description=attr(x,"label"))
else
annotation <- NULL
spec <- c("Storage mode:"=storage.mode(x))
spec <- if(is.ordered(x)) c(spec,
"Ordered factor with"=paste(nlevels(x),"levels"))
else c(spec,
"Factor with"=paste(nlevels(x),"levels"))
stats <- list(tab=tab)
new("codebookEntry",
spec = spec,
stats = stats,
annotation = annotation
)
})
setMethod("codebookEntry","character",function(x,weights=NULL,unweighted=TRUE,...){
spec <- c("Storage mode:"=storage.mode(x))
isna <- is.na(x)
descr <- codebookStatsChar(x)
descr <- descr$descr
if(any(isna)){
tab <- NAtab(isna)
if(length(weights)){
wtab <- NAtab(isna,weights)
if(unweighted)
tab <- collect(Unweighted=tab,
Weighted=tab)
else
tab <- array(wtab,
dim=c(dim(tab),1),
dimnames=c(dimnames(tab),
list(NULL)))
}
else
tab <- array(tab,
dim=c(dim(tab),1),
dimnames=c(dimnames(tab),
list(NULL)))
attr(tab,"title") <- "Valid and missing values"
} else
tab <- integer(0)
stats <- list(tab=tab,
descr=descr)
new("codebookEntry",
spec = spec,
stats = stats,
annotation = NULL
)
})
codebookTable_logical <- function(x,weights=NULL,...){
if(!length(weights))
weights <- rep(1,length(x))
isna <- is.na(x)
counts <- rowsum(weights[!isna],x[!isna])
NAs <- sum(weights*isna)
tab <- cbind(counts,100*counts/sum(counts))
if(any(isna)) {
labs <- c("FALSE","TRUE","NA")
tab <- rbind(tab,c(NAs,NA))
counts <- c(counts,NAs)
tab <- cbind(tab,100*counts/sum(counts))
colnames(tab) <- c("N","Valid","Total")
}
else {
labs <- c("FALSE","TRUE")
colnames(tab) <- c("N","Valid")
}
rownames(tab) <- labs
return(tab)
}
setMethod("codebookEntry","logical",function(x,weights=NULL,unweighted=TRUE,...){
tab <- codebookTable_logical(x)
if(length(weights)){
wtab <- codebookTable_logical(x,weights=weights)
if(unweighted)
tab <- collect(Unweighted=tab,
Weighted=wtab)
else {
tab <- wtab
tab.dn <- dimnames(tab)
tab.d <- dim(tab)
dim(tab) <- c(tab.d,1)
dimnames(tab) <- c(tab.dn,list(NULL))
}
}
else{
tab.dn <- dimnames(tab)
tab.d <- dim(tab)
dim(tab) <- c(tab.d,1)
dimnames(tab) <- c(tab.dn,list(NULL))
}
attr(tab,"title") <- "Logical values"
if(length(attr(x,"label")))
annotation <- c(description=attr(x,"label"))
else
annotation <- NULL
spec <- c("Storage mode:"=storage.mode(x))
stats <- list(tab=tab)
new("codebookEntry",
spec = spec,
stats = stats,
annotation = annotation
)
})
codebookStatsCateg <- function(x,weights=NULL,unweighted=TRUE,...){
vl <- labels(x)
ic <- inherits(x,"character")
if(length(vl) || !ic){
tab <- codebookTable_item(x)
tab.title <- attr(tab,"title")
if(length(weights)){
wtab <- codebookTable_item(x,weights)
if(unweighted)
tab <- collect(Unweighted=tab,
Weighted=wtab)
else
tab <- array(wtab,
dim=c(dim(tab),1),
dimnames=c(dimnames(tab),
list(NULL)))
}
else
tab <- array(tab,
dim=c(dim(tab),1),
dimnames=c(dimnames(tab),
list(NULL)))
attr(tab,"title") <- tab.title
list(tab=tab)
}
else
codebookStatsChar(x)
}
codebookStatsChar <- function(x,...){
descr <- structure(dQuote(range(x,na.rm=TRUE)),
names=c("Min","Max"))
descr <- as.matrix(descr)
list(
tab = NULL,
descr = descr
)
}
codebookStatsMetric <- function(x,weights=TRUE,unweighted=TRUE,...){
if(length(labels(x))){
tab <- codebookTable_item(x,drop.unlabelled=TRUE)
tab.title <- attr(tab,"title")
if(length(weights) && length(tab)){
wtab <- codebookTable_item(x,weights=weights,
drop.unlabelled=TRUE)
if(unweighted)
tab <- collect(Unweighted=tab,
Weighted=wtab)
else
tab <- array(wtab,
dim=c(dim(tab),1),
dimnames=c(dimnames(tab),
list(NULL)))
attr(tab,"title") <- tab.title
}
else if(length(tab)){
tab <- array(tab,
dim=c(dim(tab),1),
dimnames=c(dimnames(tab),
list(NULL)))
attr(tab,"title") <- tab.title
}
}
else
tab <- NULL
descr <- Descriptives(x)[1:4]
if(length(weights)){
wdescr <- Descriptives(x,weights)[1:4]
if(unweighted)
descr <- collect(Unweighted=descr,
Weighted=wdescr)
else
descr <- as.matrix(wdescr)
}
else
descr <- as.matrix(descr)
list(
tab=tab,
descr=descr
)
}
codebookTable_item <- function(x,weights=NULL,drop.unlabelled=FALSE,drop.empty=TRUE){
is.m <- is.missing(x)
isNA <- is.na(x)
vl <- labels(x)
if(!length(weights)){
weights <- rep(1,length(x))
}
if(length(vl)){
vvl <- vl@values
lvl <- vl@.Data
valid <- !is.missing2(vvl,x@value.filter)
i <- match(x@.Data,vvl,nomatch=0L)
tab <- drop(rowsum(weights,i))
if("0" %in% names(tab)){
ii <- match("0",names(tab))
tab <- tab[-ii]
}
if(length(tab) < length(vvl)){
tab0 <- numeric(length(vvl))
ii <- as.integer(names(tab))
tab0[ii] <- tab
tab <- tab0
}
names(tab) <- as.character(vvl)
lab <- sQuote(vl@.Data)
tab.title <- "Values and labels"
}
else {
valid <- logical(0)
tab <- c()
lab <- c()
i <- logical(length(x))
tab.title <- "Values"
}
ovld <- sum(weights*(!is.m & !i))
omiss <- sum(weights*(is.m & !i & !isNA))
NAs <- sum(weights*(isNA))
if(ovld | !drop.empty){
tab <- c(tab," "=ovld)
lab <- c(lab,"(unlab.val.)")
valid <- c(valid,TRUE)
}
if(omiss | !drop.empty){
tab <- c(tab," "=omiss)
lab <- c(lab,"(unlab.mss.)")
valid <- c(valid,FALSE)
}
if(NAs | !drop.empty){
tab <- c(tab,"NA"=NAs)
lab <- c(lab,"")
valid <- c(valid,FALSE)
}
if(any(!valid)){
missing.marker <- "M"
valid.marker <- paste(rep(" ",nchar(missing.marker)),collapse="")
lab <- paste(ifelse(valid,valid.marker,missing.marker),lab)
}
tab.nonzero <- tab>0
tab.keep <- tab.nonzero | !drop.empty
tab <- tab[valid | tab.keep]
lab <- lab[valid | tab.keep]
valid <- valid[valid | tab.keep]
if(any(!valid)){
vperc <- rep(NA,length(tab))
vtab <- tab[valid]
Nvalid <- sum(vtab)
if(Nvalid) vperc[valid] <- 100 * vtab/Nvalid
else vperc[valid] <- 0
tperc <- 100 * tab/sum(tab)
tab <- cbind(N=tab,Valid=vperc,Total=tperc)
} else {
tperc <- 100 * tab/sum(tab)
tab <- cbind(N=tab,Percent=tperc)
}
rownames(tab) <- names(tperc)
if(drop.unlabelled){
drp <- match("(unlab.val.)",trimws(lab),nomatch=0L)
if(drp > 0){
tab <- tab[-drp,,drop=FALSE]
lab <- lab[-drp]
}
if(all(is.na(tab[,2])) && length(tab)){
tab <- tab[,-2,drop=FALSE]
colnames(tab) <- c("N","Percent")
}
}
if(!length(tab))
tab <- NULL
else {
rownames(tab) <- paste(format(rownames(tab),justify="right"),format(lab,justify="left"))
attr(tab,"title") <- tab.title
}
return(tab)
}
setMethod("as.character","codebook",function(x,...){
width <- getOption("width")
toprule <- paste(rep("=",width),collapse="")
midrule <- paste(rep("-",width),collapse="")
out <- mapply(format,x=x@.Data,name=names(x),toprule=toprule,midrule=midrule)
unlist(out)
})
setMethod("show","codebook",function(object){
width <- getOption("width")
toprule <- paste(rep("=",width),collapse="")
midrule <- paste(rep("-",width),collapse="")
out <- mapply(format,x=object@.Data,name=names(object),toprule=toprule,midrule=midrule)
out <- unlist(out)
writeLines(out)
})
Write.codebook <- function(x,file=stdout(),...){
width <- getOption("width")
toprule <- paste(rep("=",width),collapse="")
midrule <- paste(rep("-",width),collapse="")
out <- mapply(format,x=x@.Data,name=names(x),toprule=toprule,midrule=midrule)
out <- unlist(out)
writeLines(out,con=file)
}
format.codebookEntry <- function(x,name="",width=getOption("width"),
toprule=paste(rep("=",width),collapse=""),
midrule=paste(rep("-",width),collapse=""),
...
){
annot <- x@annotation
description <- annot["description"]
wording <- annot["wording"]
if(length(annot)) annot <- annot[names(annot) %nin% c("description","wording")]
title <- strwrap(if(length(description) && !is.na(description))
paste(name[1],sQuote(description))
else name[1] ,width=width,prefix=" ")
wording <- if(length(wording) && !is.na(wording))
strwrap(dQuote(wording),width=width,prefix=" ")
else NULL
spec <- paste(" ",names(x@spec),x@spec)
tab <- unclass(x@stats$tab)
descr <- unclass(x@stats$descr)
if(length(tab)){
tab.title <- attr(tab,"title")
tab.d <- dim(tab)
tab.dn <- dimnames(tab)
tab <- apply(tab,3,format_cb_table)
#tab <- lapply(tab,unlist)
#tab <- do.call(cbind,tab)
rn <- rownames(tab)
rn[1] <- tab.title
rn <- format(rn)
if(tab.d[3]>1){
tab <- cbind(" "=rn,tab)
tab <- rbind(colnames(tab),"",tab)
tab <- apply(tab,2,format,justify="right")
}
else {
tab <- cbind(rn,tab)
}
tab <- paste(" ",apply(tab,1,paste,collapse=" "))
}
if(!is.matrix(descr)) descr <- NULL
if(length(descr)){
descr.rn <- format(paste(rownames(descr),":",sep=""),justify="right")
if(is.numeric(descr[]))
descr[] <- formatC(descr[],format="f",digits=3)
descr[] <- gsub("NA","",descr[])
if(!length(ncol(descr))) browser()
if(ncol(descr) > 1){
descr.rn <- c("","",descr.rn)
descr <- rbind(colnames(descr),"",descr)
}
descr <- cbind(descr.rn,descr)
descr <- apply(descr,2,format,justify="right")
descr <- paste(" ",apply(descr,1,paste,collapse=" "))
}
if(length(tab) && length(descr)){
statstab <- format(c(tab,"",descr),justify="left")
}
else if(length(tab)){
statstab <- tab
}
else if(length(descr)){
statstab <- descr
}
else
statstab <- NULL
annot.out <- character()
if(length(annot)){
for(i in seq_len(length(annot))){
annot.i <- annot[i]
nm.i <- trimws(names(annot.i))
annot.i <- strwrap(annot.i,width=getOption("width")-8-4)
annot.i <- c(paste(" ",annot.i),"")
if(nzchar(nm.i)){
annot.i <- c(
paste(" ",nm.i,":",sep=""),
annot.i
)
}
annot.out <- c(annot.out,annot.i)
}
}
c(
toprule,
"",
title,
if(length(wording)) c(
"",
wording
),
"",
midrule,
"",
spec,
"",
statstab,
"",
if(length(annot.out)) annot.out
)
}
#setMethod("format","codebookEntry",format.codebookEntry)
format_cb_table <- function(tab){
cn <- colnames(tab)
if(ncol(tab)>2){
if(all(trunc(tab[,1])==tab[,1])){
tab <- cbind(
formatC(tab[,1,drop=FALSE],format="d"),
formatC(tab[,2],format="f",digits=1),
formatC(tab[,3],format="f",digits=1)
)
} else {
tab <- cbind(
formatC(tab[,1,drop=FALSE],format="f",digits=1),
formatC(tab[,2],format="f",digits=1),
formatC(tab[,3],format="f",digits=1)
)
}
}
else {
if(all(trunc(tab[,1])==tab[,1])){
tab <- cbind(
formatC(tab[,1,drop=FALSE],format="d"),
formatC(tab[,2],format="f",digits=1)
)
} else {
tab <- cbind(
formatC(tab[,1,drop=FALSE],format="f",digits=1),
formatC(tab[,2],format="f",digits=1)
)
}
}
tab[tab=="NA"] <- ""
tab <- rbind(" "=cn,"",tab)
tab <- format(tab,justify="right")
tab <- apply(tab,1,paste,collapse=" ")
tab
}
setMethod("[",signature(x="codebook",i="atomic",j="missing",drop="ANY"),
function(x,i,j,...,drop=TRUE){
if(is.character(i))
i <- match(i,names(x))
cb <- x@.Data[i]
if(!length(cb)) return(NULL)
if(is.numeric(i) || is.logical(i))
names(cb) <- names(x)[i]
else return(NULL)
new("codebook",cb)
})
setMethod("$",signature(x="codebook"),
function(x,name){
i <- match(name,names(x))
cb <- x@.Data[i]
if(!length(cb)) return(NULL)
names(cb) <- name
new("codebook",cb)
})
setMethod("[[",signature(x="codebook"),
function(x,i,...){
if(is.character(i))
i <- match(i,names(x))
cb <- x@.Data[i]
if(!length(cb)) return(NULL)
if(is.numeric(i) || is.logical(i))
names(cb) <- names(x)[i]
new("codebook",cb)
})
| /R/codebook-methods.R | no_license | cran/memisc | R | false | false | 19,841 | r | setMethod("codebook","data.set",function(x,weights,unweighted=TRUE,...){
weights <- eval(substitute(weights),x,parent.frame())
cb <- lapply(x,codebookEntry,weights=weights,unweighted=unweighted)
new("codebook",cb)
})
setMethod("codebook","item",function(x,weights,unweighted=TRUE,...){
xname <- paste(deparse(substitute(x)))
cb <- list(codebookEntry(x,weights=weights,unweighted=unweighted))
names(cb) <- xname
new("codebook",cb)
})
setMethod("codebook","data.frame",function(x,weights,unweighted=TRUE,...){
weights <- eval(substitute(weights),x,parent.frame())
cb <- lapply(x,codebookEntry,weights=weights,unweighted=unweighted)
new("codebook",cb)
})
setMethod("codebook","tbl_df",function(x,weights,unweighted=TRUE,...){
weights <- eval(substitute(weights),x,parent.frame())
cb <- lapply(x,codebookEntry,weights=weights,unweighted=unweighted)
new("codebook",cb)
})
setMethod("codebook","atomic",function(x,weights,unweighted=TRUE,...){
xname <- paste(deparse(substitute(x)))
cb <- list(codebookEntry(x,weights=weights,unweighted=unweighted))
names(cb) <- xname
new("codebook",cb)
})
setMethod("codebook","factor",function(x,weights,unweighted=TRUE,...){
xname <- paste(deparse(substitute(x)))
cb <- list(codebookEntry(x,weights=weights,unweighted=unweighted))
names(cb) <- xname
new("codebook",cb)
})
setMethod("codebook","ANY",function(x,weights,unweighted=TRUE,...){
xname <- paste(deparse(substitute(x)))
cb <- list(codebookEntry(x,weights=weights,unweighted=unweighted))
names(cb) <- xname
new("codebook",cb)
})
setMethod("codebook","NULL",function(x,weights,unweighted=TRUE,...)NULL)
setMethod("codebookEntry","item",function(x,weights,unweighted=TRUE,...){
annotation <- annotation(x)
filter <- x@value.filter
spec <- c(
"Storage mode:"=storage.mode(x),
"Measurement:"=measurement(x)
)
if(length(filter)) spec <- c(spec,
switch(class(filter),
missing.values = c("Missing values:" = format(filter)),
valid.values = c("Valid values:" = format(filter)),
valid.range = c("Valid range:" = format(filter))
))
stats <- switch(measurement(x),
nominal=,ordinal=codebookStatsCateg(x,weights=weights,unweighted=unweighted),
interval=,ratio=codebookStatsMetric(x,weights=weights,unweighted=unweighted)
)
cbe <- new("codebookEntry",
spec = spec,
stats = stats,
annotation = annotation
)
return(cbe)
})
NAtab <- function(isna,weights=NULL){
if(!length(weights))
weights <- rep(1,length(isna))
counts <- c(
Valid = sum(weights*!isna),
"Missing (NA)" = sum(weights*isna),
Total = sum(weights)
)
perc <- 100*counts/counts[3]
perc[3] <- NA
cbind(N=counts,
Percent=perc)
}
setMethod("codebookEntry","ANY",function(x,weights,unweighted=TRUE,...){
if(length(attr(x,"label")))
annotation <- c(description=attr(x,"label"))
else
annotation <- NULL
spec <- c("Storage mode:"=storage.mode(x))
isat <- is.atomic(x)
if(isat)
isna <- is.na(x)
else
isna <- FALSE
if(mode(x) == "numeric"){
descr <- Descriptives(x)
if(length(weights) && length(descr) > 2){ # There is more than a range
wdescr <- Descriptives(x,weights)
if(unweighted)
descr <- collect(Unweighted=format(descr),
Weighted=format(wdescr))
else
descr <- as.matrix(format(wdescr))
}
else
descr <- as.matrix(format(descr))
}
else
descr <- NULL
if(any(isna)){
tab <- NAtab(isna)
if(length(weights)){
wtab <- NAtab(isna,weights)
if(unweighted)
tab <- collect(Unweighted=tab,
Weighted=wtab)
else
tab <- array(wtab,
dim=c(dim(tab),1),
dimnames=c(dimnames(tab),
list(NULL)))
}
else
tab <- array(tab,
dim=c(dim(tab),1),
dimnames=c(dimnames(tab),
list(NULL)))
attr(tab,"title") <- "Valid and missing values"
} else
tab <- integer(0)
stats <- list(tab=tab,
descr=descr)
new("codebookEntry",
spec = spec,
stats = stats,
annotation = annotation
)
})
rwnexp <- function(mat,nms){
res <- array(0,c(length(nms),ncol(mat)),
dimnames=list(nms,colnames(mat)))
rn <- rownames(mat)
res[rn,] <- mat
return(res)
}
codebookTable_factor <- function(x,weights=NULL,...){
if(!length(weights))
weights <- rep(1,length(x))
isna <- is.na(x)
counts <- rowsum(weights[!isna],x[!isna])
lev <- levels(x)
counts <- rwnexp(counts,lev)
NAs <- sum(weights*isna)
tab <- cbind(counts,100*counts/sum(counts))
if(any(isna)) {
labs <- sQuote(levels(x))
if(nlevels(x))
labs <- paste(format(c(1:nlevels(x),NA),justify="right"),
format(c(labs,""),justify="left")
)
else
labs <- "NA"
tab <- rbind(tab,c(NAs,NA))
counts <- c(counts,NAs)
tab <- cbind(tab,100*counts/sum(counts))
colnames(tab) <- c("N","Valid","Total")
}
else {
labs <- sQuote(levels(x))
labs <- paste(format(1:nlevels(x),justify="right"),
format(labs,justify="left")
)
colnames(tab) <- c("N","Valid")
}
rownames(tab) <- labs
return(tab)
}
setMethod("codebookEntry","factor",function(x,weights=NULL,unweighted=TRUE,...){
tab <- codebookTable_factor(x)
if(length(weights)){
wtab <- codebookTable_factor(x,weights=weights)
if(unweighted)
tab <- collect(Unweighted=tab,
Weighted=wtab)
else {
tab <- wtab
tab.dn <- dimnames(tab)
tab.d <- dim(tab)
dim(tab) <- c(tab.d,1)
dimnames(tab) <- c(tab.dn,list(NULL))
}
}
else{
tab.dn <- dimnames(tab)
tab.d <- dim(tab)
dim(tab) <- c(tab.d,1)
dimnames(tab) <- c(tab.dn,list(NULL))
}
attr(tab,"title") <- "Levels and labels"
if(length(attr(x,"label")))
annotation <- c(description=attr(x,"label"))
else
annotation <- NULL
spec <- c("Storage mode:"=storage.mode(x))
spec <- if(is.ordered(x)) c(spec,
"Ordered factor with"=paste(nlevels(x),"levels"))
else c(spec,
"Factor with"=paste(nlevels(x),"levels"))
stats <- list(tab=tab)
new("codebookEntry",
spec = spec,
stats = stats,
annotation = annotation
)
})
setMethod("codebookEntry","character",function(x,weights=NULL,unweighted=TRUE,...){
spec <- c("Storage mode:"=storage.mode(x))
isna <- is.na(x)
descr <- codebookStatsChar(x)
descr <- descr$descr
if(any(isna)){
tab <- NAtab(isna)
if(length(weights)){
wtab <- NAtab(isna,weights)
if(unweighted)
tab <- collect(Unweighted=tab,
Weighted=tab)
else
tab <- array(wtab,
dim=c(dim(tab),1),
dimnames=c(dimnames(tab),
list(NULL)))
}
else
tab <- array(tab,
dim=c(dim(tab),1),
dimnames=c(dimnames(tab),
list(NULL)))
attr(tab,"title") <- "Valid and missing values"
} else
tab <- integer(0)
stats <- list(tab=tab,
descr=descr)
new("codebookEntry",
spec = spec,
stats = stats,
annotation = NULL
)
})
codebookTable_logical <- function(x,weights=NULL,...){
if(!length(weights))
weights <- rep(1,length(x))
isna <- is.na(x)
counts <- rowsum(weights[!isna],x[!isna])
NAs <- sum(weights*isna)
tab <- cbind(counts,100*counts/sum(counts))
if(any(isna)) {
labs <- c("FALSE","TRUE","NA")
tab <- rbind(tab,c(NAs,NA))
counts <- c(counts,NAs)
tab <- cbind(tab,100*counts/sum(counts))
colnames(tab) <- c("N","Valid","Total")
}
else {
labs <- c("FALSE","TRUE")
colnames(tab) <- c("N","Valid")
}
rownames(tab) <- labs
return(tab)
}
setMethod("codebookEntry","logical",function(x,weights=NULL,unweighted=TRUE,...){
tab <- codebookTable_logical(x)
if(length(weights)){
wtab <- codebookTable_logical(x,weights=weights)
if(unweighted)
tab <- collect(Unweighted=tab,
Weighted=wtab)
else {
tab <- wtab
tab.dn <- dimnames(tab)
tab.d <- dim(tab)
dim(tab) <- c(tab.d,1)
dimnames(tab) <- c(tab.dn,list(NULL))
}
}
else{
tab.dn <- dimnames(tab)
tab.d <- dim(tab)
dim(tab) <- c(tab.d,1)
dimnames(tab) <- c(tab.dn,list(NULL))
}
attr(tab,"title") <- "Logical values"
if(length(attr(x,"label")))
annotation <- c(description=attr(x,"label"))
else
annotation <- NULL
spec <- c("Storage mode:"=storage.mode(x))
stats <- list(tab=tab)
new("codebookEntry",
spec = spec,
stats = stats,
annotation = annotation
)
})
codebookStatsCateg <- function(x,weights=NULL,unweighted=TRUE,...){
vl <- labels(x)
ic <- inherits(x,"character")
if(length(vl) || !ic){
tab <- codebookTable_item(x)
tab.title <- attr(tab,"title")
if(length(weights)){
wtab <- codebookTable_item(x,weights)
if(unweighted)
tab <- collect(Unweighted=tab,
Weighted=wtab)
else
tab <- array(wtab,
dim=c(dim(tab),1),
dimnames=c(dimnames(tab),
list(NULL)))
}
else
tab <- array(tab,
dim=c(dim(tab),1),
dimnames=c(dimnames(tab),
list(NULL)))
attr(tab,"title") <- tab.title
list(tab=tab)
}
else
codebookStatsChar(x)
}
codebookStatsChar <- function(x,...){
descr <- structure(dQuote(range(x,na.rm=TRUE)),
names=c("Min","Max"))
descr <- as.matrix(descr)
list(
tab = NULL,
descr = descr
)
}
codebookStatsMetric <- function(x,weights=TRUE,unweighted=TRUE,...){
if(length(labels(x))){
tab <- codebookTable_item(x,drop.unlabelled=TRUE)
tab.title <- attr(tab,"title")
if(length(weights) && length(tab)){
wtab <- codebookTable_item(x,weights=weights,
drop.unlabelled=TRUE)
if(unweighted)
tab <- collect(Unweighted=tab,
Weighted=wtab)
else
tab <- array(wtab,
dim=c(dim(tab),1),
dimnames=c(dimnames(tab),
list(NULL)))
attr(tab,"title") <- tab.title
}
else if(length(tab)){
tab <- array(tab,
dim=c(dim(tab),1),
dimnames=c(dimnames(tab),
list(NULL)))
attr(tab,"title") <- tab.title
}
}
else
tab <- NULL
descr <- Descriptives(x)[1:4]
if(length(weights)){
wdescr <- Descriptives(x,weights)[1:4]
if(unweighted)
descr <- collect(Unweighted=descr,
Weighted=wdescr)
else
descr <- as.matrix(wdescr)
}
else
descr <- as.matrix(descr)
list(
tab=tab,
descr=descr
)
}
codebookTable_item <- function(x,weights=NULL,drop.unlabelled=FALSE,drop.empty=TRUE){
is.m <- is.missing(x)
isNA <- is.na(x)
vl <- labels(x)
if(!length(weights)){
weights <- rep(1,length(x))
}
if(length(vl)){
vvl <- vl@values
lvl <- vl@.Data
valid <- !is.missing2(vvl,x@value.filter)
i <- match(x@.Data,vvl,nomatch=0L)
tab <- drop(rowsum(weights,i))
if("0" %in% names(tab)){
ii <- match("0",names(tab))
tab <- tab[-ii]
}
if(length(tab) < length(vvl)){
tab0 <- numeric(length(vvl))
ii <- as.integer(names(tab))
tab0[ii] <- tab
tab <- tab0
}
names(tab) <- as.character(vvl)
lab <- sQuote(vl@.Data)
tab.title <- "Values and labels"
}
else {
valid <- logical(0)
tab <- c()
lab <- c()
i <- logical(length(x))
tab.title <- "Values"
}
ovld <- sum(weights*(!is.m & !i))
omiss <- sum(weights*(is.m & !i & !isNA))
NAs <- sum(weights*(isNA))
if(ovld | !drop.empty){
tab <- c(tab," "=ovld)
lab <- c(lab,"(unlab.val.)")
valid <- c(valid,TRUE)
}
if(omiss | !drop.empty){
tab <- c(tab," "=omiss)
lab <- c(lab,"(unlab.mss.)")
valid <- c(valid,FALSE)
}
if(NAs | !drop.empty){
tab <- c(tab,"NA"=NAs)
lab <- c(lab,"")
valid <- c(valid,FALSE)
}
if(any(!valid)){
missing.marker <- "M"
valid.marker <- paste(rep(" ",nchar(missing.marker)),collapse="")
lab <- paste(ifelse(valid,valid.marker,missing.marker),lab)
}
tab.nonzero <- tab>0
tab.keep <- tab.nonzero | !drop.empty
tab <- tab[valid | tab.keep]
lab <- lab[valid | tab.keep]
valid <- valid[valid | tab.keep]
if(any(!valid)){
vperc <- rep(NA,length(tab))
vtab <- tab[valid]
Nvalid <- sum(vtab)
if(Nvalid) vperc[valid] <- 100 * vtab/Nvalid
else vperc[valid] <- 0
tperc <- 100 * tab/sum(tab)
tab <- cbind(N=tab,Valid=vperc,Total=tperc)
} else {
tperc <- 100 * tab/sum(tab)
tab <- cbind(N=tab,Percent=tperc)
}
rownames(tab) <- names(tperc)
if(drop.unlabelled){
drp <- match("(unlab.val.)",trimws(lab),nomatch=0L)
if(drp > 0){
tab <- tab[-drp,,drop=FALSE]
lab <- lab[-drp]
}
if(all(is.na(tab[,2])) && length(tab)){
tab <- tab[,-2,drop=FALSE]
colnames(tab) <- c("N","Percent")
}
}
if(!length(tab))
tab <- NULL
else {
rownames(tab) <- paste(format(rownames(tab),justify="right"),format(lab,justify="left"))
attr(tab,"title") <- tab.title
}
return(tab)
}
setMethod("as.character","codebook",function(x,...){
width <- getOption("width")
toprule <- paste(rep("=",width),collapse="")
midrule <- paste(rep("-",width),collapse="")
out <- mapply(format,x=x@.Data,name=names(x),toprule=toprule,midrule=midrule)
unlist(out)
})
setMethod("show","codebook",function(object){
width <- getOption("width")
toprule <- paste(rep("=",width),collapse="")
midrule <- paste(rep("-",width),collapse="")
out <- mapply(format,x=object@.Data,name=names(object),toprule=toprule,midrule=midrule)
out <- unlist(out)
writeLines(out)
})
Write.codebook <- function(x,file=stdout(),...){
width <- getOption("width")
toprule <- paste(rep("=",width),collapse="")
midrule <- paste(rep("-",width),collapse="")
out <- mapply(format,x=x@.Data,name=names(x),toprule=toprule,midrule=midrule)
out <- unlist(out)
writeLines(out,con=file)
}
format.codebookEntry <- function(x,name="",width=getOption("width"),
toprule=paste(rep("=",width),collapse=""),
midrule=paste(rep("-",width),collapse=""),
...
){
annot <- x@annotation
description <- annot["description"]
wording <- annot["wording"]
if(length(annot)) annot <- annot[names(annot) %nin% c("description","wording")]
title <- strwrap(if(length(description) && !is.na(description))
paste(name[1],sQuote(description))
else name[1] ,width=width,prefix=" ")
wording <- if(length(wording) && !is.na(wording))
strwrap(dQuote(wording),width=width,prefix=" ")
else NULL
spec <- paste(" ",names(x@spec),x@spec)
tab <- unclass(x@stats$tab)
descr <- unclass(x@stats$descr)
if(length(tab)){
tab.title <- attr(tab,"title")
tab.d <- dim(tab)
tab.dn <- dimnames(tab)
tab <- apply(tab,3,format_cb_table)
#tab <- lapply(tab,unlist)
#tab <- do.call(cbind,tab)
rn <- rownames(tab)
rn[1] <- tab.title
rn <- format(rn)
if(tab.d[3]>1){
tab <- cbind(" "=rn,tab)
tab <- rbind(colnames(tab),"",tab)
tab <- apply(tab,2,format,justify="right")
}
else {
tab <- cbind(rn,tab)
}
tab <- paste(" ",apply(tab,1,paste,collapse=" "))
}
if(!is.matrix(descr)) descr <- NULL
if(length(descr)){
descr.rn <- format(paste(rownames(descr),":",sep=""),justify="right")
if(is.numeric(descr[]))
descr[] <- formatC(descr[],format="f",digits=3)
descr[] <- gsub("NA","",descr[])
if(!length(ncol(descr))) browser()
if(ncol(descr) > 1){
descr.rn <- c("","",descr.rn)
descr <- rbind(colnames(descr),"",descr)
}
descr <- cbind(descr.rn,descr)
descr <- apply(descr,2,format,justify="right")
descr <- paste(" ",apply(descr,1,paste,collapse=" "))
}
if(length(tab) && length(descr)){
statstab <- format(c(tab,"",descr),justify="left")
}
else if(length(tab)){
statstab <- tab
}
else if(length(descr)){
statstab <- descr
}
else
statstab <- NULL
annot.out <- character()
if(length(annot)){
for(i in seq_len(length(annot))){
annot.i <- annot[i]
nm.i <- trimws(names(annot.i))
annot.i <- strwrap(annot.i,width=getOption("width")-8-4)
annot.i <- c(paste(" ",annot.i),"")
if(nzchar(nm.i)){
annot.i <- c(
paste(" ",nm.i,":",sep=""),
annot.i
)
}
annot.out <- c(annot.out,annot.i)
}
}
c(
toprule,
"",
title,
if(length(wording)) c(
"",
wording
),
"",
midrule,
"",
spec,
"",
statstab,
"",
if(length(annot.out)) annot.out
)
}
#setMethod("format","codebookEntry",format.codebookEntry)
format_cb_table <- function(tab){
cn <- colnames(tab)
if(ncol(tab)>2){
if(all(trunc(tab[,1])==tab[,1])){
tab <- cbind(
formatC(tab[,1,drop=FALSE],format="d"),
formatC(tab[,2],format="f",digits=1),
formatC(tab[,3],format="f",digits=1)
)
} else {
tab <- cbind(
formatC(tab[,1,drop=FALSE],format="f",digits=1),
formatC(tab[,2],format="f",digits=1),
formatC(tab[,3],format="f",digits=1)
)
}
}
else {
if(all(trunc(tab[,1])==tab[,1])){
tab <- cbind(
formatC(tab[,1,drop=FALSE],format="d"),
formatC(tab[,2],format="f",digits=1)
)
} else {
tab <- cbind(
formatC(tab[,1,drop=FALSE],format="f",digits=1),
formatC(tab[,2],format="f",digits=1)
)
}
}
tab[tab=="NA"] <- ""
tab <- rbind(" "=cn,"",tab)
tab <- format(tab,justify="right")
tab <- apply(tab,1,paste,collapse=" ")
tab
}
setMethod("[",signature(x="codebook",i="atomic",j="missing",drop="ANY"),
function(x,i,j,...,drop=TRUE){
if(is.character(i))
i <- match(i,names(x))
cb <- x@.Data[i]
if(!length(cb)) return(NULL)
if(is.numeric(i) || is.logical(i))
names(cb) <- names(x)[i]
else return(NULL)
new("codebook",cb)
})
setMethod("$",signature(x="codebook"),
function(x,name){
i <- match(name,names(x))
cb <- x@.Data[i]
if(!length(cb)) return(NULL)
names(cb) <- name
new("codebook",cb)
})
setMethod("[[",signature(x="codebook"),
function(x,i,...){
if(is.character(i))
i <- match(i,names(x))
cb <- x@.Data[i]
if(!length(cb)) return(NULL)
if(is.numeric(i) || is.logical(i))
names(cb) <- names(x)[i]
new("codebook",cb)
})
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/servevolleyR.R
\docType{package}
\name{servevolleyR}
\alias{servevolleyR}
\alias{servevolleyR-package}
\title{servevolleyR}
\description{
Package for simulating tennis games, tiebreaks, sets, and matches
}
\author{
Thomas Heslop
}
| /man/servevolleyR.Rd | no_license | durtal/servevolleyR | R | false | false | 318 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/servevolleyR.R
\docType{package}
\name{servevolleyR}
\alias{servevolleyR}
\alias{servevolleyR-package}
\title{servevolleyR}
\description{
Package for simulating tennis games, tiebreaks, sets, and matches
}
\author{
Thomas Heslop
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Met_MatH.R
\docType{methods}
\name{plot-MatH}
\alias{plot-MatH}
\alias{plot,MatH-method}
\title{Method plot for a matrix of histograms}
\usage{
\S4method{plot}{MatH}(x, y = "missing", type = "HISTO", border = "black", angL = 330)
}
\arguments{
\item{x}{a \code{distributionH} object}
\item{y}{not used in this implementation}
\item{type}{(optional) a string describing the type of plot, default="HISTO".\cr
Other allowed types are \cr
"DENS"=a density approximation, \cr
"BOXPLOT"=l boxplot}
\item{border}{(optional) a string the color of the border of the plot, default="black".}
\item{angL}{(optional) angle of labels of rows (DEFAULT=330).}
}
\description{
An overloading plot function for a \code{MatH} object. The method returns a graphical representation
of the matrix of histograms.
}
\examples{
plot(BLOOD) # plots BLOOD dataset
\dontrun{
plot(BLOOD, type = "HISTO", border = "blue") # plots a matrix of histograms
plot(BLOOD, type = "DENS", border = "blue") # plots a matrix of densities
plot(BLOOD, type = "BOXPLOT") # plots a boxplots
}
}
| /man/plot-MatH.Rd | no_license | cran/HistDAWass | R | false | true | 1,170 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Met_MatH.R
\docType{methods}
\name{plot-MatH}
\alias{plot-MatH}
\alias{plot,MatH-method}
\title{Method plot for a matrix of histograms}
\usage{
\S4method{plot}{MatH}(x, y = "missing", type = "HISTO", border = "black", angL = 330)
}
\arguments{
\item{x}{a \code{distributionH} object}
\item{y}{not used in this implementation}
\item{type}{(optional) a string describing the type of plot, default="HISTO".\cr
Other allowed types are \cr
"DENS"=a density approximation, \cr
"BOXPLOT"=l boxplot}
\item{border}{(optional) a string the color of the border of the plot, default="black".}
\item{angL}{(optional) angle of labels of rows (DEFAULT=330).}
}
\description{
An overloading plot function for a \code{MatH} object. The method returns a graphical representation
of the matrix of histograms.
}
\examples{
plot(BLOOD) # plots BLOOD dataset
\dontrun{
plot(BLOOD, type = "HISTO", border = "blue") # plots a matrix of histograms
plot(BLOOD, type = "DENS", border = "blue") # plots a matrix of densities
plot(BLOOD, type = "BOXPLOT") # plots a boxplots
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MT.R
\name{MT}
\alias{MT}
\title{Function to generate a unit space for the Mahalanobis-Taguchi (MT) method}
\usage{
MT(unit_space_data, includes_transformed_data = FALSE, ...)
}
\arguments{
\item{unit_space_data}{Matrix with n rows (samples) and p columns (variables).
Data to generate the unit space. All data should be
continuous values and should not have missing values.}
\item{includes_transformed_data}{If \code{TRUE}, then the transformed data
are included in a return object.}
\item{...}{Passed to \code{\link[base]{solve}} for computing the inverse of
the correlation matrix.}
}
\value{
\code{MT} returns an object of S3 \link[base]{class} "MT". An object
of class "MT" is a list containing the following components:
\item{A}{p x p (q x q) matrix. Inversed correlation matrix of
\code{unit_space_data} (the transformed data).}
\item{calc_A}{\code{function(x) solve(cor(x), ...)}.}
\item{transforms_data}{Function to be generated from
\code{\link{generates_normalization_function}} based
on \code{unit_space_data}.}
\item{distance}{Vector with length n. Distances from the unit space to each
sample.}
\item{n}{The number of samples.}
\item{q}{The number of variables after the data transformation. q is equal
to p.}
\item{x}{If \code{includes_transformed_data} is \code{TRUE}, then the
transformed data are included.}
}
\description{
\code{MT} generates a unit space for the Mahalanobis-Taguchi (MT) method. In
\code{\link{general_MT}}, the inversed correlation matrix is used for A and
the data are normalized based on \code{unit_space_data}.
}
\examples{
# 40 data for versicolor in the iris dataset
iris_versicolor <- iris[61:100, -5]
unit_space_MT <- MT(unit_space_data = iris_versicolor,
includes_transformed_data = TRUE)
# The following tol is a parameter passed to solve function.
unit_space_MT <- MT(unit_space_data = iris_versicolor,
includes_transformed_data = TRUE,
tol = 1e-9)
(unit_space_MT$distance)
}
\references{
Taguchi, G. (1995). Pattern Recognition and Quality Engineering (1).
\emph{Journal of Quality Engineering Society, 3}(2), 2-5. (In Japanese)
Taguchi, G., Wu, Y., & Chodhury, S. (2000).
\emph{Mahalanobis-Taguchi System.} McGraw-Hill Professional.
Taguchi, G., & Jugulum, R. (2002). \emph{The Mahalanobis-Taguchi strategy:
A pattern technology system.} John Wiley & Sons.
Woodall, W. H., Koudelik, R., Tsui, K. L., Kim, S. B., Stoumbos, Z. G., &
Carvounis, C. P. (2003). A review and analysis of the Mahalanobis-Taguchi
system. \emph{Technometrics, 45}(1), 1-15.
}
\seealso{
\code{\link[base]{solve}}, \code{\link{general_MT}},
\code{\link{generates_normalization_function}}, and
\code{\link{diagnosis.MT}}
}
| /man/MT.Rd | no_license | cran/MTSYS | R | false | true | 2,963 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MT.R
\name{MT}
\alias{MT}
\title{Function to generate a unit space for the Mahalanobis-Taguchi (MT) method}
\usage{
MT(unit_space_data, includes_transformed_data = FALSE, ...)
}
\arguments{
\item{unit_space_data}{Matrix with n rows (samples) and p columns (variables).
Data to generate the unit space. All data should be
continuous values and should not have missing values.}
\item{includes_transformed_data}{If \code{TRUE}, then the transformed data
are included in a return object.}
\item{...}{Passed to \code{\link[base]{solve}} for computing the inverse of
the correlation matrix.}
}
\value{
\code{MT} returns an object of S3 \link[base]{class} "MT". An object
of class "MT" is a list containing the following components:
\item{A}{p x p (q x q) matrix. Inversed correlation matrix of
\code{unit_space_data} (the transformed data).}
\item{calc_A}{\code{function(x) solve(cor(x), ...)}.}
\item{transforms_data}{Function to be generated from
\code{\link{generates_normalization_function}} based
on \code{unit_space_data}.}
\item{distance}{Vector with length n. Distances from the unit space to each
sample.}
\item{n}{The number of samples.}
\item{q}{The number of variables after the data transformation. q is equal
to p.}
\item{x}{If \code{includes_transformed_data} is \code{TRUE}, then the
transformed data are included.}
}
\description{
\code{MT} generates a unit space for the Mahalanobis-Taguchi (MT) method. In
\code{\link{general_MT}}, the inversed correlation matrix is used for A and
the data are normalized based on \code{unit_space_data}.
}
\examples{
# 40 data for versicolor in the iris dataset
iris_versicolor <- iris[61:100, -5]
unit_space_MT <- MT(unit_space_data = iris_versicolor,
includes_transformed_data = TRUE)
# The following tol is a parameter passed to solve function.
unit_space_MT <- MT(unit_space_data = iris_versicolor,
includes_transformed_data = TRUE,
tol = 1e-9)
(unit_space_MT$distance)
}
\references{
Taguchi, G. (1995). Pattern Recognition and Quality Engineering (1).
\emph{Journal of Quality Engineering Society, 3}(2), 2-5. (In Japanese)
Taguchi, G., Wu, Y., & Chodhury, S. (2000).
\emph{Mahalanobis-Taguchi System.} McGraw-Hill Professional.
Taguchi, G., & Jugulum, R. (2002). \emph{The Mahalanobis-Taguchi strategy:
A pattern technology system.} John Wiley & Sons.
Woodall, W. H., Koudelik, R., Tsui, K. L., Kim, S. B., Stoumbos, Z. G., &
Carvounis, C. P. (2003). A review and analysis of the Mahalanobis-Taguchi
system. \emph{Technometrics, 45}(1), 1-15.
}
\seealso{
\code{\link[base]{solve}}, \code{\link{general_MT}},
\code{\link{generates_normalization_function}}, and
\code{\link{diagnosis.MT}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drive_functions.R
\name{realtime.update}
\alias{realtime.update}
\title{Overwrites the Realtime API data model associated with this file with the provided JSON data model.}
\usage{
realtime.update(fileId, baseRevision = NULL)
}
\arguments{
\item{fileId}{The ID of the file that the Realtime API data model is associated with}
\item{baseRevision}{The revision of the model to diff the uploaded model against}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/drive
\item https://www.googleapis.com/auth/drive.file
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/drive, https://www.googleapis.com/auth/drive.file)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/drive/}{Google Documentation}
}
| /googledrivev2.auto/man/realtime.update.Rd | permissive | GVersteeg/autoGoogleAPI | R | false | true | 1,065 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drive_functions.R
\name{realtime.update}
\alias{realtime.update}
\title{Overwrites the Realtime API data model associated with this file with the provided JSON data model.}
\usage{
realtime.update(fileId, baseRevision = NULL)
}
\arguments{
\item{fileId}{The ID of the file that the Realtime API data model is associated with}
\item{baseRevision}{The revision of the model to diff the uploaded model against}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/drive
\item https://www.googleapis.com/auth/drive.file
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/drive, https://www.googleapis.com/auth/drive.file)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/drive/}{Google Documentation}
}
|
library(shiny)
library(shinydashboard)
library(data.table)
library(DT)
library(ggplot2)
library(shinycssloaders)
library(shinydashboardPlus)
library(shinyWidgets)
library(leaflet)
library(rjson)
library(htmltools)
library(leaflet.minicharts)
library(echarts4r)
library(echarts4r.maps)
library(sparkline)
library(shinyBS)
source(file = 'Settings/Path.R', local = T, encoding = "UTF-8")
source(file = 'Utils/Functions.R', local = T, encoding = 'UTF-8')
source(file = 'Utils/ConfirmedPyramidData.R', local = T, encoding = 'UTF-8')
source(file = paste0(COMPONENT_PATH, 'Notification.R'), local = T, encoding = 'UTF-8')
source(file = paste0(PAGE_PATH, 'Main/Utils/ValueBox.R'), local = T, encoding = 'UTF-8')
source(file = paste0(COMPONENT_PATH, '/Main/NewsList.ui.R'), local = T, encoding = 'UTF-8')
source(file = paste0(COMPONENT_PATH, '/Main/clusterTabButton.ui.R'), local = T, encoding = 'UTF-8')
source(file = paste0(COMPONENT_PATH, '/Main/SymptomsProgression.ui.R'), local = T, encoding = 'UTF-8')
source(file = paste0(COMPONENT_PATH, '/Main/ComfirmedPyramid.ui.R'), local = T, encoding = 'UTF-8')
source(file = paste0(COMPONENT_PATH, '/Main/Tendency.ui.R'), local = T, encoding = 'UTF-8')
# ====
# データの読み込み
# ====
byDate <- fread(paste0(DATA_PATH, 'byDate.csv'), header = T)
byDate[is.na(byDate)] <- 0
byDate$date <- lapply(byDate[, 1], function(x){as.Date(as.character(x), format = '%Y%m%d')})
# マップ用データ読み込み
mapData <- fread(paste0(DATA_PATH, 'result.map.csv'), header = T)
# 死亡データ
death <- fread(paste0(DATA_PATH, 'death.csv'))
death[is.na(death)] <- 0
# 行動歴データ
activity <- rjson::fromJSON(file = paste0(DATA_PATH, 'caseMap.json'), unexpected.escape = 'error')
# 経度緯度データ
position <- fread(paste0(DATA_PATH, 'position.csv'))
# 厚労省の都道府県まとめデータ
detailByRegion <- fread(paste0(DATA_PATH, 'detailByRegion.csv'))
# 各都道府県のPCR検査数
provincePCR <- fread(paste0(DATA_PATH, 'provincePCR.csv'), header = T, na.strings = 'N/A')
provincePCR$date <- as.Date(provincePCR$日付)
setorderv(provincePCR, c('県名', 'date'))
# provincePCR[is.na(検査数), 検査数 := shift(検査数), by = .(県名, 日付)]
for (i in 2:nrow(provincePCR)) {
if (is.na(provincePCR[i]$検査数)) {
if (provincePCR[i]$県名 == provincePCR[i - 1]$県名) {
provincePCR[i]$検査数 <- provincePCR[i - 1]$検査数
} else {
provincePCR[i]$検査数 <- 0
}
}
}
provincePCR <- provincePCR[!(県名 %in% c('全国(厚労省)', 'イタリア', 'ロンバルディア', '韓国'))]
maxCheckNumberData <- provincePCR[provincePCR[, .I[which.max(検査数)], by = 県名]$V1]
maxCheckNumberData[, rank := order(検査数, decreasing = T)]
# アプリ情報
# statics <- fromJSON(file = 'https://stg.covid-2019.live/ncov-static/stats.json',
# unexpected.escape = 'error')
# 国内の日報
domesticDailyReport <- fread(paste0(DATA_PATH, 'domesticDailyReport.csv'))
domesticDailyReport$date <- as.Date(as.character(domesticDailyReport$date), '%Y%m%d')
setnafill(domesticDailyReport, type = 'locf')
# チャーター便の日報
flightDailyReport <- fread(paste0(DATA_PATH, 'flightDailyReport.csv'))
flightDailyReport$date <- as.Date(as.character(flightDailyReport$date), '%Y%m%d')
setnafill(flightDailyReport, type = 'locf')
# 空港検疫の日報
airportDailyReport <- fread(paste0(DATA_PATH, 'airportDailyReport.csv'))
airportDailyReport$date <- as.Date(as.character(airportDailyReport$date), '%Y%m%d')
setnafill(airportDailyReport, type = 'locf')
# クルーズ船の日報
shipDailyReport <- fread(paste0(DATA_PATH, 'shipDailyReport.csv'))
shipDailyReport$date <- as.Date(as.character(shipDailyReport$date), '%Y%m%d')
setnafill(shipDailyReport, type = 'locf')
# 日報まとめ
dailyReport <- fread(paste0(DATA_PATH, 'resultDailyReport.csv'))
dailyReport$date <- as.Date(as.character(dailyReport$date), '%Y%m%d')
setnafill(dailyReport, type = 'locf')
# コールセンター
callCenterDailyReport <- fread(paste0(DATA_PATH, 'callCenter.csv'))
callCenterDailyReport$date <- as.Date(as.character(callCenterDailyReport$date), '%Y%m%d')
# 文言データ
lang <- fread(paste0(DATA_PATH, 'lang.csv'))
langCode <- 'ja'
# TODO 言語切り替え機能
# languageSet <- c('ja', 'cn')
# names(languageSet) <- c(lang[[langCode]][25], lang[[langCode]][26])
# ====総数基礎集計====
# PCR
PCR_WITHIN <- getFinalAndDiff(domesticDailyReport$pcr)
PCR_SHIP <- getFinalAndDiff(shipDailyReport$pcr)
PCR_FLIGHT <- getFinalAndDiff(flightDailyReport$pcr)
PCR_AIRPORT <- getFinalAndDiff(airportDailyReport$pcr)
# 確認
TOTAL_DOMESITC <- sum(byDate[, c(2:48)]) # 日本国内事例のPCR陽性数(クルーズ船関連者除く)
TOTAL_OFFICER <- sum(byDate$検疫職員) # クルーズ船関連の職員のPCR陽性数
TOTAL_FLIGHT <- sum(byDate$チャーター便) # チャーター便のPCR陽性数
TOTAL_WITHIN <- TOTAL_DOMESITC + TOTAL_OFFICER + TOTAL_FLIGHT # 日本国内事例のPCR陽性数
TOTAL_SHIP <- sum(byDate$クルーズ船) # クルーズ船のPCR陽性数
TOTAL_JAPAN <- TOTAL_WITHIN + TOTAL_SHIP # 日本領土内のPCR陽性数
CONFIRMED_PIE_DATA <- data.table(category = c(lang[[langCode]][4], # 国内事例
lang[[langCode]][35], # クルーズ船
lang[[langCode]][36] # チャーター便
),
value = c(TOTAL_DOMESITC + TOTAL_OFFICER, TOTAL_SHIP, TOTAL_FLIGHT))
# 退院
DISCHARGE_WITHIN <- getFinalAndDiff(domesticDailyReport$discharge)
DISCHARGE_FLIGHT <- getFinalAndDiff(flightDailyReport$discharge)
DISCHARGE_SHIP <- getFinalAndDiff(shipDailyReport$discharge)
DISCHARGE_AIRPORT <- getFinalAndDiff(airportDailyReport$discharge)
CURED_PIE_DATA <- data.table(
category = c(
lang[[langCode]][4], # 国内事例
lang[[langCode]][36], # チャーター便 (無症状)
lang[[langCode]][35], # クルーズ船
'空港検疫'
),
value = c(
DISCHARGE_WITHIN$final,
DISCHARGE_FLIGHT$final,
DISCHARGE_SHIP$final,
DISCHARGE_AIRPORT$final
),
diff = c(
DISCHARGE_WITHIN$diff,
DISCHARGE_FLIGHT$diff,
DISCHARGE_SHIP$diff,
DISCHARGE_AIRPORT$diff
)
)
DISCHARGE_TOTAL <- sum(CURED_PIE_DATA$value)
DISCHARGE_TOTAL_NO_SHIP <- DISCHARGE_TOTAL - DISCHARGE_SHIP$final
DISCHARGE_DIFF <- sum(CURED_PIE_DATA$diff)
DISCHARGE_DIFF_NO_SHIP <- DISCHARGE_DIFF - DISCHARGE_SHIP$diff
# 死亡
DEATH_DOMESITC <- sum(death[, c(2:48)]) # 日本国内事例の死亡数(クルーズ船関連者除く)
DEATH_OFFICER <- sum(death[]$検疫職員) # クルーズ船関連の職員の死亡数
DEATH_FLIGHT <- sum(death$チャーター便) # チャーター便の死亡数
DEATH_WITHIN <- DEATH_DOMESITC + DEATH_OFFICER + DEATH_FLIGHT # 日本国内事例の死亡数
DEATH_SHIP <- sum(death$クルーズ船) # クルーズ船の死亡数
DEATH_JAPAN <- DEATH_WITHIN + DEATH_SHIP # 日本領土内の死亡数
DEATH_PIE_DATA <- data.table(category = c(lang[[langCode]][4], # 国内事例
lang[[langCode]][35], # クルーズ船
lang[[langCode]][36] # チャーター便
),
value = c(DEATH_DOMESITC + DEATH_OFFICER, DEATH_SHIP, DEATH_FLIGHT))
# ====本日のデータ====
# 確認
byDateToday <- byDate[nrow(byDate), ] # 本日の差分データセット
todayConfirmed <- unlist(as.list(byDateToday[, 2:ncol(byDateToday)]))
HAS_TODAY_CONFIRMED <- todayConfirmed[todayConfirmed > 0] # 本日変化がある都道府県分類
deathToday <- death[nrow(byDate), ] # 本日の差分データセット
todayDeath <- unlist(as.list(deathToday[, 2:ncol(deathToday)]))
HAS_TODAY_DEATH <- todayDeath[todayDeath > 0] # 本日変化がある都道府県分類
# ====前日比べの基礎集計(差分)====
# 確認
TOTAL_DOMESITC_DIFF <- sum(byDateToday[, c(2:48)]) # 日本国内事例のPCR陽性数(クルーズ船関連者除く)
TOTAL_OFFICER_DIFF <- sum(byDateToday[]$検疫職員) # クルーズ船関連の職員のPCR陽性数
TOTAL_FLIGHT_DIFF <- sum(byDateToday$チャーター便) # チャーター便のPCR陽性数
TOTAL_WITHIN_DIFF <- TOTAL_DOMESITC_DIFF + TOTAL_OFFICER_DIFF + TOTAL_FLIGHT_DIFF # 日本国内事例のPCR陽性数
TOTAL_SHIP_DIFF <- sum(byDateToday$クルーズ船) # クルーズ船のPCR陽性数
TOTAL_JAPAN_DIFF <- TOTAL_WITHIN_DIFF + TOTAL_SHIP_DIFF # 日本領土内のPCR陽性数
# 死亡
DEATH_DOMESITC_DIFF <- sum(deathToday[, c(2:48)]) # 日本国内事例のPCR陽性数(クルーズ船関連者除く)
DEATH_OFFICER_DIFF <- sum(deathToday[]$検疫職員) # クルーズ船関連の職員のPCR陽性数
DEATH_FLIGHT_DIFF <- sum(deathToday$チャーター便) # チャーター便のPCR陽性数
DEATH_WITHIN_DIFF <- DEATH_DOMESITC_DIFF + DEATH_OFFICER_DIFF + DEATH_FLIGHT_DIFF # 日本国内事例のPCR陽性数
DEATH_SHIP_DIFF <- sum(deathToday$クルーズ船) # クルーズ船のPCR陽性数
DEATH_JAPAN_DIFF <- DEATH_WITHIN_DIFF +DEATH_SHIP_DIFF # 日本領土内のPCR陽性数
# 地域選択に表示する項目名
regionName <- colSums(byDate[, 2:ncol(byDate)])
regionNamePref <- regionName[1:47]
# 感染者確認されていない地域
regionZero <- names(regionNamePref[regionNamePref == 0])
regionNamePref <- sort(regionNamePref[regionNamePref > 0], decreasing = T)
regionNamePrefName <- paste0(names(regionNamePref), ' (', regionNamePref, ')')
regionNameOther <- regionName[48:length(regionName)]
regionNameOtherName <- paste0(names(regionNameOther), ' (', regionNameOther, ')')
regionName <- c('都道府県', names(regionNameOther), names(regionNamePref))
defaultSelectedRegionName <- regionName[1:3]
names(regionName) <- c(paste0('都道府県合計', ' (', TOTAL_DOMESITC, ')'),
regionNameOtherName,
regionNamePrefName)
regionName <- as.list(regionName)
news <- fread(paste0(DATA_PATH, 'mhlw_houdou.csv'))
provinceCode <- fread(paste0(DATA_PATH, 'prefectures.csv'))
provinceSelector <- provinceCode$id
names(provinceSelector) <- provinceCode$`name-ja`
# 詳細データけんもねずみ
positiveDetail <- fread(paste0(DATA_PATH, 'positiveDetail.csv'))
selectProvinceOption <- unique(positiveDetail$都道府県)
selectProvinceOption <- selectProvinceOption[selectProvinceOption != '未']
# 詳細データ
detail <- fread(paste0(DATA_PATH, 'detail.csv'),
colClasses = list(
numeric = c(1, 2),
factor = c(5, 6, 9:11)
)
)
detailColName <- colnames(detail)
detail[, comfirmedDay := as.Date(as.character(detail$comfirmedDay), format = "%Y%m%d")]
detail[, link := as.integer(detail$link)]
detailMerged <- merge(detail, news, by.x = 'link', by.y = 'id')
detailMerged[, link := paste0("<a href='", detailMerged$link.y, "'>", detailMerged$title, "</a>")]
detail <- detailMerged[, detailColName, with = F][order(id)]
# 詳細データのサマリー
detailSummary <- detail[, .(count = .N), by = .(gender, age)]
# 症状の進行テーブルを読み込む
processData <- fread(input = paste0(DATA_PATH, 'resultProcessData.csv'))
# ====
# 定数設定
# ====
# Real-time感染数の更新時間
UPDATE_DATETIME <- file.info(paste0(DATA_PATH, 'byDate.csv'))$mtime
latestUpdateDuration <- difftime(Sys.time(), UPDATE_DATETIME)
LATEST_UPDATE <- paste0(
round(latestUpdateDuration[[1]], 0),
convertUnit2Ja(latestUpdateDuration)
)
# PCRデータ(厚労省対応)の更新時間
UPDATE_DATETIME_DOMESTIC_DAILY_REPORT <- file.info(paste0(DATA_PATH, 'domesticDailyReport.csv'))$mtime
latestUpdateDomesticDailyReportDuration <- difftime(Sys.time(), UPDATE_DATETIME_DOMESTIC_DAILY_REPORT)
LATEST_UPDATE_DOMESTIC_DAILY_REPORT <- paste0(
round(latestUpdateDomesticDailyReportDuration[[1]], 0),
convertUnit2Ja(latestUpdateDomesticDailyReportDuration)
)
RECOVERED_FILE_UPDATE_DATETIME <- file.info(paste0(DATA_PATH, 'recovered.csv'))$mtime
DEATH_FILE_UPDATE_DATETIME <- file.info(paste0(DATA_PATH, 'death.csv'))$mtime
UPDATE_DATE <- as.Date(UPDATE_DATETIME)
DEATH_UPDATE_DATE <- as.Date(DEATH_FILE_UPDATE_DATETIME)
# TODO Vectorのネーミングなぜかうまくいかないのでとりあえずここに置く
showOption <- c('showShip', 'showFlight')
names(showOption) <- c(lang[[langCode]][35], lang[[langCode]][36])
twitterUrl <- paste0('https://twitter.com/intent/tweet?text=新型コロナウイルス感染速報:国内の感染確認',
TOTAL_JAPAN,
'人(クルーズ船含む)、',
byDate$date[nrow(byDate)],
'の現時点で新たに',
TOTAL_JAPAN_DIFF,
'人が確認されました。&hashtags=',
'新型コロナウイルス,新型コロナウイルス速報',
'&url=https://covid-2019.live/')
lightRed <- '#F56954'
middleRed <- '#DD4B39'
darkRed <- '#B03C2D'
lightYellow <- '#F8BF76'
middleYellow <- '#F39C11'
darkYellow <- '#DB8B0A'
lightGreen <- '#00A65A'
middleGreen <- '#01A65A'
darkGreen <- '#088448'
superDarkGreen <- '#046938'
lightNavy <- '#5A6E82'
middelNavy <- '#001F3F'
darkNavy <- '#001934'
lightGrey <- '#F5F5F5'
lightBlue <- '#7BD6F5'
middleBlue <- '#00C0EF'
darkBlue <- '#00A7D0'
options(spinner.color = middleRed)
GLOBAL_VALUE <- reactiveValues(
signateDetail = NULL,
signateLink = NULL,
signatePlace = fread(file = paste0(DATA_PATH, 'resultSignatePlace.csv')),
Academic = list(
onSet2ConfirmedMap = NULL
),
hokkaidoData = NULL,
hokkaidoDataUpdateTime = NULL,
hokkaidoPatients = NULL,
Aomori = list(
summary = NULL,
patient = NULL,
callCenter = NULL,
contact = NULL,
updateTime = NULL
),
Kanagawa = list(
summary = NULL,
updateTime = NULL
)
)
| /global.R | permissive | tokyoshare/2019-ncov-japan | R | false | false | 14,036 | r | library(shiny)
library(shinydashboard)
library(data.table)
library(DT)
library(ggplot2)
library(shinycssloaders)
library(shinydashboardPlus)
library(shinyWidgets)
library(leaflet)
library(rjson)
library(htmltools)
library(leaflet.minicharts)
library(echarts4r)
library(echarts4r.maps)
library(sparkline)
library(shinyBS)
source(file = 'Settings/Path.R', local = T, encoding = "UTF-8")
source(file = 'Utils/Functions.R', local = T, encoding = 'UTF-8')
source(file = 'Utils/ConfirmedPyramidData.R', local = T, encoding = 'UTF-8')
source(file = paste0(COMPONENT_PATH, 'Notification.R'), local = T, encoding = 'UTF-8')
source(file = paste0(PAGE_PATH, 'Main/Utils/ValueBox.R'), local = T, encoding = 'UTF-8')
source(file = paste0(COMPONENT_PATH, '/Main/NewsList.ui.R'), local = T, encoding = 'UTF-8')
source(file = paste0(COMPONENT_PATH, '/Main/clusterTabButton.ui.R'), local = T, encoding = 'UTF-8')
source(file = paste0(COMPONENT_PATH, '/Main/SymptomsProgression.ui.R'), local = T, encoding = 'UTF-8')
source(file = paste0(COMPONENT_PATH, '/Main/ComfirmedPyramid.ui.R'), local = T, encoding = 'UTF-8')
source(file = paste0(COMPONENT_PATH, '/Main/Tendency.ui.R'), local = T, encoding = 'UTF-8')
# ====
# データの読み込み
# ====
byDate <- fread(paste0(DATA_PATH, 'byDate.csv'), header = T)
byDate[is.na(byDate)] <- 0
byDate$date <- lapply(byDate[, 1], function(x){as.Date(as.character(x), format = '%Y%m%d')})
# マップ用データ読み込み
mapData <- fread(paste0(DATA_PATH, 'result.map.csv'), header = T)
# 死亡データ
death <- fread(paste0(DATA_PATH, 'death.csv'))
death[is.na(death)] <- 0
# 行動歴データ
activity <- rjson::fromJSON(file = paste0(DATA_PATH, 'caseMap.json'), unexpected.escape = 'error')
# 経度緯度データ
position <- fread(paste0(DATA_PATH, 'position.csv'))
# 厚労省の都道府県まとめデータ
detailByRegion <- fread(paste0(DATA_PATH, 'detailByRegion.csv'))
# 各都道府県のPCR検査数
provincePCR <- fread(paste0(DATA_PATH, 'provincePCR.csv'), header = T, na.strings = 'N/A')
provincePCR$date <- as.Date(provincePCR$日付)
setorderv(provincePCR, c('県名', 'date'))
# provincePCR[is.na(検査数), 検査数 := shift(検査数), by = .(県名, 日付)]
for (i in 2:nrow(provincePCR)) {
if (is.na(provincePCR[i]$検査数)) {
if (provincePCR[i]$県名 == provincePCR[i - 1]$県名) {
provincePCR[i]$検査数 <- provincePCR[i - 1]$検査数
} else {
provincePCR[i]$検査数 <- 0
}
}
}
provincePCR <- provincePCR[!(県名 %in% c('全国(厚労省)', 'イタリア', 'ロンバルディア', '韓国'))]
maxCheckNumberData <- provincePCR[provincePCR[, .I[which.max(検査数)], by = 県名]$V1]
maxCheckNumberData[, rank := order(検査数, decreasing = T)]
# アプリ情報
# statics <- fromJSON(file = 'https://stg.covid-2019.live/ncov-static/stats.json',
# unexpected.escape = 'error')
# 国内の日報
domesticDailyReport <- fread(paste0(DATA_PATH, 'domesticDailyReport.csv'))
domesticDailyReport$date <- as.Date(as.character(domesticDailyReport$date), '%Y%m%d')
setnafill(domesticDailyReport, type = 'locf')
# チャーター便の日報
flightDailyReport <- fread(paste0(DATA_PATH, 'flightDailyReport.csv'))
flightDailyReport$date <- as.Date(as.character(flightDailyReport$date), '%Y%m%d')
setnafill(flightDailyReport, type = 'locf')
# 空港検疫の日報
airportDailyReport <- fread(paste0(DATA_PATH, 'airportDailyReport.csv'))
airportDailyReport$date <- as.Date(as.character(airportDailyReport$date), '%Y%m%d')
setnafill(airportDailyReport, type = 'locf')
# クルーズ船の日報
shipDailyReport <- fread(paste0(DATA_PATH, 'shipDailyReport.csv'))
shipDailyReport$date <- as.Date(as.character(shipDailyReport$date), '%Y%m%d')
setnafill(shipDailyReport, type = 'locf')
# 日報まとめ
dailyReport <- fread(paste0(DATA_PATH, 'resultDailyReport.csv'))
dailyReport$date <- as.Date(as.character(dailyReport$date), '%Y%m%d')
setnafill(dailyReport, type = 'locf')
# コールセンター
callCenterDailyReport <- fread(paste0(DATA_PATH, 'callCenter.csv'))
callCenterDailyReport$date <- as.Date(as.character(callCenterDailyReport$date), '%Y%m%d')
# 文言データ
lang <- fread(paste0(DATA_PATH, 'lang.csv'))
langCode <- 'ja'
# TODO 言語切り替え機能
# languageSet <- c('ja', 'cn')
# names(languageSet) <- c(lang[[langCode]][25], lang[[langCode]][26])
# ====総数基礎集計====
# PCR
PCR_WITHIN <- getFinalAndDiff(domesticDailyReport$pcr)
PCR_SHIP <- getFinalAndDiff(shipDailyReport$pcr)
PCR_FLIGHT <- getFinalAndDiff(flightDailyReport$pcr)
PCR_AIRPORT <- getFinalAndDiff(airportDailyReport$pcr)
# 確認
TOTAL_DOMESITC <- sum(byDate[, c(2:48)]) # 日本国内事例のPCR陽性数(クルーズ船関連者除く)
TOTAL_OFFICER <- sum(byDate$検疫職員) # クルーズ船関連の職員のPCR陽性数
TOTAL_FLIGHT <- sum(byDate$チャーター便) # チャーター便のPCR陽性数
TOTAL_WITHIN <- TOTAL_DOMESITC + TOTAL_OFFICER + TOTAL_FLIGHT # 日本国内事例のPCR陽性数
TOTAL_SHIP <- sum(byDate$クルーズ船) # クルーズ船のPCR陽性数
TOTAL_JAPAN <- TOTAL_WITHIN + TOTAL_SHIP # 日本領土内のPCR陽性数
CONFIRMED_PIE_DATA <- data.table(category = c(lang[[langCode]][4], # 国内事例
lang[[langCode]][35], # クルーズ船
lang[[langCode]][36] # チャーター便
),
value = c(TOTAL_DOMESITC + TOTAL_OFFICER, TOTAL_SHIP, TOTAL_FLIGHT))
# 退院
DISCHARGE_WITHIN <- getFinalAndDiff(domesticDailyReport$discharge)
DISCHARGE_FLIGHT <- getFinalAndDiff(flightDailyReport$discharge)
DISCHARGE_SHIP <- getFinalAndDiff(shipDailyReport$discharge)
DISCHARGE_AIRPORT <- getFinalAndDiff(airportDailyReport$discharge)
CURED_PIE_DATA <- data.table(
category = c(
lang[[langCode]][4], # 国内事例
lang[[langCode]][36], # チャーター便 (無症状)
lang[[langCode]][35], # クルーズ船
'空港検疫'
),
value = c(
DISCHARGE_WITHIN$final,
DISCHARGE_FLIGHT$final,
DISCHARGE_SHIP$final,
DISCHARGE_AIRPORT$final
),
diff = c(
DISCHARGE_WITHIN$diff,
DISCHARGE_FLIGHT$diff,
DISCHARGE_SHIP$diff,
DISCHARGE_AIRPORT$diff
)
)
DISCHARGE_TOTAL <- sum(CURED_PIE_DATA$value)
DISCHARGE_TOTAL_NO_SHIP <- DISCHARGE_TOTAL - DISCHARGE_SHIP$final
DISCHARGE_DIFF <- sum(CURED_PIE_DATA$diff)
DISCHARGE_DIFF_NO_SHIP <- DISCHARGE_DIFF - DISCHARGE_SHIP$diff
# 死亡
DEATH_DOMESITC <- sum(death[, c(2:48)]) # 日本国内事例の死亡数(クルーズ船関連者除く)
DEATH_OFFICER <- sum(death[]$検疫職員) # クルーズ船関連の職員の死亡数
DEATH_FLIGHT <- sum(death$チャーター便) # チャーター便の死亡数
DEATH_WITHIN <- DEATH_DOMESITC + DEATH_OFFICER + DEATH_FLIGHT # 日本国内事例の死亡数
DEATH_SHIP <- sum(death$クルーズ船) # クルーズ船の死亡数
DEATH_JAPAN <- DEATH_WITHIN + DEATH_SHIP # 日本領土内の死亡数
DEATH_PIE_DATA <- data.table(category = c(lang[[langCode]][4], # 国内事例
lang[[langCode]][35], # クルーズ船
lang[[langCode]][36] # チャーター便
),
value = c(DEATH_DOMESITC + DEATH_OFFICER, DEATH_SHIP, DEATH_FLIGHT))
# ====本日のデータ====
# 確認
byDateToday <- byDate[nrow(byDate), ] # 本日の差分データセット
todayConfirmed <- unlist(as.list(byDateToday[, 2:ncol(byDateToday)]))
HAS_TODAY_CONFIRMED <- todayConfirmed[todayConfirmed > 0] # 本日変化がある都道府県分類
deathToday <- death[nrow(byDate), ] # 本日の差分データセット
todayDeath <- unlist(as.list(deathToday[, 2:ncol(deathToday)]))
HAS_TODAY_DEATH <- todayDeath[todayDeath > 0] # 本日変化がある都道府県分類
# ====前日比べの基礎集計(差分)====
# 確認
TOTAL_DOMESITC_DIFF <- sum(byDateToday[, c(2:48)]) # 日本国内事例のPCR陽性数(クルーズ船関連者除く)
TOTAL_OFFICER_DIFF <- sum(byDateToday[]$検疫職員) # クルーズ船関連の職員のPCR陽性数
TOTAL_FLIGHT_DIFF <- sum(byDateToday$チャーター便) # チャーター便のPCR陽性数
TOTAL_WITHIN_DIFF <- TOTAL_DOMESITC_DIFF + TOTAL_OFFICER_DIFF + TOTAL_FLIGHT_DIFF # 日本国内事例のPCR陽性数
TOTAL_SHIP_DIFF <- sum(byDateToday$クルーズ船) # クルーズ船のPCR陽性数
TOTAL_JAPAN_DIFF <- TOTAL_WITHIN_DIFF + TOTAL_SHIP_DIFF # 日本領土内のPCR陽性数
# 死亡
DEATH_DOMESITC_DIFF <- sum(deathToday[, c(2:48)]) # 日本国内事例のPCR陽性数(クルーズ船関連者除く)
DEATH_OFFICER_DIFF <- sum(deathToday[]$検疫職員) # クルーズ船関連の職員のPCR陽性数
DEATH_FLIGHT_DIFF <- sum(deathToday$チャーター便) # チャーター便のPCR陽性数
DEATH_WITHIN_DIFF <- DEATH_DOMESITC_DIFF + DEATH_OFFICER_DIFF + DEATH_FLIGHT_DIFF # 日本国内事例のPCR陽性数
DEATH_SHIP_DIFF <- sum(deathToday$クルーズ船) # クルーズ船のPCR陽性数
DEATH_JAPAN_DIFF <- DEATH_WITHIN_DIFF +DEATH_SHIP_DIFF # 日本領土内のPCR陽性数
# 地域選択に表示する項目名
regionName <- colSums(byDate[, 2:ncol(byDate)])
regionNamePref <- regionName[1:47]
# 感染者確認されていない地域
regionZero <- names(regionNamePref[regionNamePref == 0])
regionNamePref <- sort(regionNamePref[regionNamePref > 0], decreasing = T)
regionNamePrefName <- paste0(names(regionNamePref), ' (', regionNamePref, ')')
regionNameOther <- regionName[48:length(regionName)]
regionNameOtherName <- paste0(names(regionNameOther), ' (', regionNameOther, ')')
regionName <- c('都道府県', names(regionNameOther), names(regionNamePref))
defaultSelectedRegionName <- regionName[1:3]
names(regionName) <- c(paste0('都道府県合計', ' (', TOTAL_DOMESITC, ')'),
regionNameOtherName,
regionNamePrefName)
regionName <- as.list(regionName)
news <- fread(paste0(DATA_PATH, 'mhlw_houdou.csv'))
provinceCode <- fread(paste0(DATA_PATH, 'prefectures.csv'))
provinceSelector <- provinceCode$id
names(provinceSelector) <- provinceCode$`name-ja`
# 詳細データけんもねずみ
positiveDetail <- fread(paste0(DATA_PATH, 'positiveDetail.csv'))
selectProvinceOption <- unique(positiveDetail$都道府県)
selectProvinceOption <- selectProvinceOption[selectProvinceOption != '未']
# 詳細データ
detail <- fread(paste0(DATA_PATH, 'detail.csv'),
colClasses = list(
numeric = c(1, 2),
factor = c(5, 6, 9:11)
)
)
detailColName <- colnames(detail)
detail[, comfirmedDay := as.Date(as.character(detail$comfirmedDay), format = "%Y%m%d")]
detail[, link := as.integer(detail$link)]
detailMerged <- merge(detail, news, by.x = 'link', by.y = 'id')
detailMerged[, link := paste0("<a href='", detailMerged$link.y, "'>", detailMerged$title, "</a>")]
detail <- detailMerged[, detailColName, with = F][order(id)]
# 詳細データのサマリー
detailSummary <- detail[, .(count = .N), by = .(gender, age)]
# 症状の進行テーブルを読み込む
processData <- fread(input = paste0(DATA_PATH, 'resultProcessData.csv'))
# ====
# 定数設定
# ====
# Real-time感染数の更新時間
UPDATE_DATETIME <- file.info(paste0(DATA_PATH, 'byDate.csv'))$mtime
latestUpdateDuration <- difftime(Sys.time(), UPDATE_DATETIME)
LATEST_UPDATE <- paste0(
round(latestUpdateDuration[[1]], 0),
convertUnit2Ja(latestUpdateDuration)
)
# PCRデータ(厚労省対応)の更新時間
UPDATE_DATETIME_DOMESTIC_DAILY_REPORT <- file.info(paste0(DATA_PATH, 'domesticDailyReport.csv'))$mtime
latestUpdateDomesticDailyReportDuration <- difftime(Sys.time(), UPDATE_DATETIME_DOMESTIC_DAILY_REPORT)
LATEST_UPDATE_DOMESTIC_DAILY_REPORT <- paste0(
round(latestUpdateDomesticDailyReportDuration[[1]], 0),
convertUnit2Ja(latestUpdateDomesticDailyReportDuration)
)
RECOVERED_FILE_UPDATE_DATETIME <- file.info(paste0(DATA_PATH, 'recovered.csv'))$mtime
DEATH_FILE_UPDATE_DATETIME <- file.info(paste0(DATA_PATH, 'death.csv'))$mtime
UPDATE_DATE <- as.Date(UPDATE_DATETIME)
DEATH_UPDATE_DATE <- as.Date(DEATH_FILE_UPDATE_DATETIME)
# TODO Vectorのネーミングなぜかうまくいかないのでとりあえずここに置く
showOption <- c('showShip', 'showFlight')
names(showOption) <- c(lang[[langCode]][35], lang[[langCode]][36])
twitterUrl <- paste0('https://twitter.com/intent/tweet?text=新型コロナウイルス感染速報:国内の感染確認',
TOTAL_JAPAN,
'人(クルーズ船含む)、',
byDate$date[nrow(byDate)],
'の現時点で新たに',
TOTAL_JAPAN_DIFF,
'人が確認されました。&hashtags=',
'新型コロナウイルス,新型コロナウイルス速報',
'&url=https://covid-2019.live/')
lightRed <- '#F56954'
middleRed <- '#DD4B39'
darkRed <- '#B03C2D'
lightYellow <- '#F8BF76'
middleYellow <- '#F39C11'
darkYellow <- '#DB8B0A'
lightGreen <- '#00A65A'
middleGreen <- '#01A65A'
darkGreen <- '#088448'
superDarkGreen <- '#046938'
lightNavy <- '#5A6E82'
middelNavy <- '#001F3F'
darkNavy <- '#001934'
lightGrey <- '#F5F5F5'
lightBlue <- '#7BD6F5'
middleBlue <- '#00C0EF'
darkBlue <- '#00A7D0'
options(spinner.color = middleRed)
GLOBAL_VALUE <- reactiveValues(
signateDetail = NULL,
signateLink = NULL,
signatePlace = fread(file = paste0(DATA_PATH, 'resultSignatePlace.csv')),
Academic = list(
onSet2ConfirmedMap = NULL
),
hokkaidoData = NULL,
hokkaidoDataUpdateTime = NULL,
hokkaidoPatients = NULL,
Aomori = list(
summary = NULL,
patient = NULL,
callCenter = NULL,
contact = NULL,
updateTime = NULL
),
Kanagawa = list(
summary = NULL,
updateTime = NULL
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sagemaker_operations.R
\name{sagemaker_list_training_jobs}
\alias{sagemaker_list_training_jobs}
\title{Lists training jobs}
\usage{
sagemaker_list_training_jobs(
NextToken = NULL,
MaxResults = NULL,
CreationTimeAfter = NULL,
CreationTimeBefore = NULL,
LastModifiedTimeAfter = NULL,
LastModifiedTimeBefore = NULL,
NameContains = NULL,
StatusEquals = NULL,
SortBy = NULL,
SortOrder = NULL,
WarmPoolStatusEquals = NULL
)
}
\arguments{
\item{NextToken}{If the result of the previous
\code{\link[=sagemaker_list_training_jobs]{list_training_jobs}} request was
truncated, the response includes a \code{NextToken}. To retrieve the next set
of training jobs, use the token in the next request.}
\item{MaxResults}{The maximum number of training jobs to return in the response.}
\item{CreationTimeAfter}{A filter that returns only training jobs created after the specified
time (timestamp).}
\item{CreationTimeBefore}{A filter that returns only training jobs created before the specified
time (timestamp).}
\item{LastModifiedTimeAfter}{A filter that returns only training jobs modified after the specified
time (timestamp).}
\item{LastModifiedTimeBefore}{A filter that returns only training jobs modified before the specified
time (timestamp).}
\item{NameContains}{A string in the training job name. This filter returns only training
jobs whose name contains the specified string.}
\item{StatusEquals}{A filter that retrieves only training jobs with a specific status.}
\item{SortBy}{The field to sort results by. The default is \code{CreationTime}.}
\item{SortOrder}{The sort order for results. The default is \code{Ascending}.}
\item{WarmPoolStatusEquals}{A filter that retrieves only training jobs with a specific warm pool
status.}
}
\description{
Lists training jobs.
See \url{https://www.paws-r-sdk.com/docs/sagemaker_list_training_jobs/} for full documentation.
}
\keyword{internal}
| /cran/paws.machine.learning/man/sagemaker_list_training_jobs.Rd | permissive | paws-r/paws | R | false | true | 1,991 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sagemaker_operations.R
\name{sagemaker_list_training_jobs}
\alias{sagemaker_list_training_jobs}
\title{Lists training jobs}
\usage{
sagemaker_list_training_jobs(
NextToken = NULL,
MaxResults = NULL,
CreationTimeAfter = NULL,
CreationTimeBefore = NULL,
LastModifiedTimeAfter = NULL,
LastModifiedTimeBefore = NULL,
NameContains = NULL,
StatusEquals = NULL,
SortBy = NULL,
SortOrder = NULL,
WarmPoolStatusEquals = NULL
)
}
\arguments{
\item{NextToken}{If the result of the previous
\code{\link[=sagemaker_list_training_jobs]{list_training_jobs}} request was
truncated, the response includes a \code{NextToken}. To retrieve the next set
of training jobs, use the token in the next request.}
\item{MaxResults}{The maximum number of training jobs to return in the response.}
\item{CreationTimeAfter}{A filter that returns only training jobs created after the specified
time (timestamp).}
\item{CreationTimeBefore}{A filter that returns only training jobs created before the specified
time (timestamp).}
\item{LastModifiedTimeAfter}{A filter that returns only training jobs modified after the specified
time (timestamp).}
\item{LastModifiedTimeBefore}{A filter that returns only training jobs modified before the specified
time (timestamp).}
\item{NameContains}{A string in the training job name. This filter returns only training
jobs whose name contains the specified string.}
\item{StatusEquals}{A filter that retrieves only training jobs with a specific status.}
\item{SortBy}{The field to sort results by. The default is \code{CreationTime}.}
\item{SortOrder}{The sort order for results. The default is \code{Ascending}.}
\item{WarmPoolStatusEquals}{A filter that retrieves only training jobs with a specific warm pool
status.}
}
\description{
Lists training jobs.
See \url{https://www.paws-r-sdk.com/docs/sagemaker_list_training_jobs/} for full documentation.
}
\keyword{internal}
|
### Information ###
# It is important to filter out MOTUs that have a low read count, and are likely contamination.
# Samples with low read count should also be removed
# Non-prey will also be removed (such as host or parasitic nematodes for example)
# This script filters the Gillet and Zeale datasets separately, and then merges them into a single phyloseq dataset for downstream analysis
### Set up ###
library(phyloseq)
library(ggplot2)
library(dplyr)
library(vegan)
library(stringr)
library(knitr)
library(hilldiv)
library(kableExtra)
### 1) Load and filter MOTUs and Samples in Gillet dataset ###
load("../gillet_dataset/sumaclust98/phyloseq_object_clust_iden98_taxa_assigned_no_singletons.RData")
taxa_names(gillet.phylo) <- paste("gMOTU", seq(nrow(tax_table(gillet.phylo))), sep="_")
# remove samples with less than 1000 reads
gillet.phylo <- prune_samples(sample_sums(gillet.phylo) > 1000, gillet.phylo)
gillet.phylo
# Remove 'sample.' part of sample names that obitools annoyingly adds
chck <- sample_names(gillet.phylo)
chck <- str_remove(chck, "sample.")
sample_names(gillet.phylo) <- chck
### Examine the blanks?
n <- which(otu_table(gillet.phylo)[,"G_NEG"] > 0)
m <- which(otu_table(gillet.phylo)[,"G_Neg"] > 0)
l <- unique(c(n,m))
blnk.df <- as.data.frame(as.matrix(tax_table(gillet.phylo))[l,4:7])
blnk.df$total.reads <- taxa_sums(gillet.phylo)[l]
blnk.df$Neg1.reads <- otu_table(gillet.phylo)[l, "G_NEG"]
for (i in 1:nrow(blnk.df)) {blnk.df$Neg1.prop[i] <- (blnk.df$Neg1.reads[i] / blnk.df$total.reads[i]) * 100 }
blnk.df$Neg2.reads <- otu_table(gillet.phylo)[l, "G_Neg"]
for (i in 1:nrow(blnk.df)) {blnk.df$Neg2.prop[i] <- (blnk.df$Neg2.reads[i] / blnk.df$total.reads[i]) * 100 }
blnk.df$perc <- apply(blnk.df[,c("Neg1.prop","Neg2.prop")], 1, sum)
rownames(blnk.df) <- 1:nrow(blnk.df)
kable(blnk.df, caption = "MOTUs identified in the blanks")
### Remove taxa of which the blanks hold over 2% of the total reads for that MOTU
tab.nam <- blnk.df[,"perc"] > 2 # 2 is for 2%
tab.df <- blnk.df[tab.nam,]
removeTaxa <- rownames(tab.df) # Lists the MOTUs to remove
phy.obj <- subset_taxa(gillet.phylo, !(taxa_names(gillet.phylo) %in% removeTaxa))
phy.obj
### Visualise vertebrate amplification in samples
# Load colour palette
pal.o = c("#f0a3ff",
"#0075dc",
"#993f00",
"#4c005c",
"#191919",
"#005c31",
"#2bce48",
"#ffcc99",
"#808080",
"#94ffb5",
"#8f7c00",
"#9dcc00",
"#c20088",
"blue",
"#ffa405",
"#ffa8bb",
"#426600",
"#ff0010",
"#5ef1f2",
"#00998f",
"#e0ff66",
"indianred",
"#003380",
"green",
"khaki4",
"darkred",
"coral4",
"violetred2",
"#0075dc",
"#993f00")
samples.phylo <- gillet.phylo
samples.phylo <- tax_glom(gillet.phylo, taxrank = "order")
n <- grep("Chiroptera", tax_table(samples.phylo)[,"order"])
m <- grep("Eulipotyphla", tax_table(samples.phylo)[,"order"])
tax_table(samples.phylo)[-c(n, m), 1:4] <- "Other"
unique(tax_table(samples.phylo)[,"order"])
mm.oth <- tax_glom(samples.phylo, taxrank = "order")
mm.oth
mm.oth.ra = transform_sample_counts(mm.oth, function(x) 100 * x/sum(x))
ra.samples.bar <- phyloseq::plot_bar(mm.oth.ra) # extracts information needed for barplots
ra.samples.bar.data <- ra.samples.bar$data
p1.2 <- ggplot(ra.samples.bar.data, aes(x= Sample, y=Abundance, fill = order))
p1.2 + geom_bar(stat="identity", color="black") +
scale_fill_manual(values = pal.o) +
facet_wrap(~ mammal, scale = "free_x") +
theme_classic() +
theme(legend.position = "right") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
### Check range of vertebrate amplification in samples
samples.phylo <- subset_samples(phy.obj, mammal != "BLANK")
n <- grep("Chordata", tax_table(samples.phylo)[,"phylum"])
tax_table(samples.phylo)[-n, 1:4] <- "Other"
unique(tax_table(samples.phylo)[,"phylum"])
mm.oth <- tax_glom(samples.phylo, taxrank = "phylum")
mm.oth
mm.oth.ra = transform_sample_counts(mm.oth, function(x) 100 * x/sum(x))
tax_table(mm.oth.ra)[,1:2]
otu_table(mm.oth.ra)
# Range of Vertebrate amplification across all samples
range(otu_table(mm.oth.ra)[1,])
# Range of Vertebrate amplification across GWTS
gwts.prop <- subset_samples(mm.oth.ra, mammal == "GWTS")
range(otu_table(gwts.prop)[1,])
# Range of Vertebrate amplification across Pygmies
pyg.prop <- subset_samples(mm.oth.ra, mammal == "Pygmy")
range(otu_table(pyg.prop)[1,])
# Range of Vertebrate amplification across Bats
bat.prop <- subset_samples(mm.oth.ra, mammal == "Bat")
range(otu_table(bat.prop)[1,])
#### Filtering ####
samples.phylo <- subset_samples(phy.obj, mammal != "BLANK") # remove blanks
# Remove non-prey
diet.prey <- subset_taxa(samples.phylo, !(class %in% c("Mammalia",
"none",
"Actinopteri",
"Bdelloidea",
"Udeonychophora", # velvet worms
"Merostomata", # horse shoe crabs
"Gammaproteobacteria", # bacteria
"Magnoliopsida", # plants
"Monogononta", # rotifers
"Dothideomycetes", # fungi
"Trebouxiophyceae", # green algae
"Chondrichthyes", # Cartilaginous fish
"Mucoromycetes", # fungi
"Phylum_Endomyxa", # micro things
"Eutardigrada", # tartigrades!!
"Elardia", # Amoebas
"Cephalopoda", # Cephalopods
"Amphibia", # Amphibians
"Aves", # Birds
"Chromadorea", # roundworms
"Hexanauplia", # parasitic crustaceans
"Kingdom_Metazoa",
"Kingdom_",
"Phylum_Discosea", # amoebas
"Branchiopoda", # marine crustaceans
"Phylum_Nematoda")))
# remove samples with less than 1000 reads
sampl.filt <- prune_samples(sample_sums(diet.prey) > 1000, diet.prey)
otu.tab <- as.data.frame(otu_table(sampl.filt))
new.otu.tab <- copy_filt(otu.tab, 0.0001) # Remove MOTUs with less than 0.01% reads in each sample
new.otu.tab <- as.matrix(new.otu.tab)
otu_table(sampl.filt) <- otu_table(new.otu.tab, taxa_are_rows = TRUE)
sampl.filt
# Remove any remaining taxa with less than 5 reads in total from dataset
final.diet <- prune_taxa(taxa_sums(sampl.filt) > 4, sampl.filt)
final.diet
# Check range of read depth of samples
range(sample_sums(final.diet))
# Check average read depth
mean(sample_sums(final.diet))
# Check range of total reads per taxa
range(taxa_sums(final.diet))
# hill_div packages assessment of read depth per sample, according to a shannon diversity equivilent
depth_cov(new.otu.tab,
qvalue = 1)
## Rarefaction analysis
Bat_G <- prune_samples(sample_data(final.diet)$mammal == "Bat", final.diet)
df1 <- as.data.frame(t(as.matrix(otu_table(Bat_G))))
gwts_G <- prune_samples(sample_data(final.diet)$mammal == "GWTS", final.diet)
df2 <- as.data.frame(t(as.matrix(otu_table(gwts_G))))
pyg_G <- prune_samples(sample_data(final.diet)$mammal == "Pygmy", final.diet)
df3 <- as.data.frame(t(as.matrix(otu_table(pyg_G))))
set.seed(57)
r1 <- rarecurve(df1[,])
r2 <- rarecurve(df2[,])
r3 <- rarecurve(df3[,])
out <- r1 # change to the rarefaction curve to plot (r1, r2 or r3 - see above)
par(mar = c(4.5, 4.5, 1, 1)) # bottom, left, top, right
plot(c(1, 15000), c(1, 120), xlab = "Reads",
ylab = "MOTUs", type = "n", cex.axis = 2, cex.lab = 2, las = 1)
#abline(v = 1000)
#abline(v = 5000)
for (i in seq_along(out)) {
N <- attr(out[[i]], "Subsample")
lines(N, out[[i]], col = "black")
}
par(mar=c(5.1, 4.1, 4.1, 2.1)) # back to default plot parameters
# final phyloseq object for gillet dataset
final.diet.g <- final.diet
### Identify how many MOTUs are identified to Order, Family, Genus and Species level
df1 <- as.data.frame(as.matrix(tax_table(final.diet.g)))
df <- df1
df$pident <- as.character(df$pident)
df$pident <- as.numeric(df$pident)
###################
## Species
n <- which(df[,"pident"] > 97.9999)
gn <- grep("Genus_", df[n,"species"])
fm <- grep("Family_", df[n,"species"])
or <- grep("Order_", df[n,"species"])
cl <- grep("Class_", df[n,"species"])
ph <- grep("Phylum_", df[n,"species"])
kg <- grep("Kindgom_", df[n,"species"])
nn <- grep("none", df[n,"species"])
s.p <- grep("_sp._", df[n,"species"])
n.r <- grep("_nr._", df[n,"species"])
c.f <- grep("_cf._", df[n,"species"])
x <- length(c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f))
sp.y <- length(df[n,"species"]) - x
chck <- df[n[-c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f)],"species"]
un.sp.y <- length(unique(chck))
###################
## Genus
df <- df[-n,]
n <- which(df[,"pident"] > 94.9999)
gn <- grep("Genus_", df[n,"genus"])
fm <- grep("Family_", df[n,"genus"])
or <- grep("Order_", df[n,"genus"])
cl <- grep("Class_", df[n,"genus"])
ph <- grep("Phylum_", df[n,"genus"])
kg <- grep("Kindgom_", df[n,"genus"])
nn <- grep("none", df[n,"genus"])
s.p <- grep("_sp._", df[n,"genus"])
n.r <- grep("_nr._", df[n,"genus"])
c.f <- grep("_cf._", df[n,"genus"])
g.g <- grep("_gen._", df[n,"genus"])
x <- length(c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f,g.g))
gn.y <- length(df[n,"genus"]) - x
gn <- grep("Genus_", df1[,"genus"])
fm <- grep("Family_", df1[,"genus"])
or <- grep("Order_", df1[,"genus"])
cl <- grep("Class_", df1[,"genus"])
ph <- grep("Phylum_", df1[,"genus"])
kg <- grep("Kindgom_", df1[,"genus"])
nn <- grep("none", df1[,"genus"])
s.p <- grep("_sp._", df1[,"genus"])
n.r <- grep("_nr._", df1[,"genus"])
c.f <- grep("_cf._", df1[,"genus"])
g.g <- grep("_gen._", df1[,"genus"])
chck <- df1[-c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f),"genus"]
un.gn.y <- length(unique(chck))
###################
## Family
df <- df[-n,]
n <- which(df[,"pident"] > 92.999)
gn <- grep("Genus_", df[n,"family"])
fm <- grep("Family_", df[n,"family"])
or <- grep("Order_", df[n,"family"])
cl <- grep("Class_", df[n,"family"])
ph <- grep("Phylum_", df[n,"family"])
kg <- grep("Kindgom_", df[n,"family"])
nn <- grep("none", df[n,"family"])
s.p <- grep("_sp._", df[n,"family"])
n.r <- grep("_nr._", df[n,"family"])
c.f <- grep("_cf._", df[n,"family"])
g.g <- grep("_gen._", df[n,"family"])
x <- length(c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f,g.g))
fm.y <- length(df[n,"family"]) - x
gn <- grep("Genus_", df1[,"family"])
fm <- grep("Family_", df1[,"family"])
or <- grep("Order_", df1[,"family"])
cl <- grep("Class_", df1[,"family"])
ph <- grep("Phylum_", df1[,"family"])
kg <- grep("Kindgom_", df1[,"family"])
nn <- grep("none", df1[,"family"])
s.p <- grep("_sp._", df1[,"family"])
n.r <- grep("_nr._", df1[,"family"])
c.f <- grep("_cf._", df1[,"family"])
g.g <- grep("_gen._", df1[,"family"])
chck <- df1[-c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f),"family"]
un.fm.y <- length(unique(chck))
###################
## Order
df <- df[-n,]
n <- which(df[,"pident"] > 89.9999)
gn <- grep("Genus_", df[n,"order"])
fm <- grep("Family_", df[n,"order"])
or <- grep("Order_", df[n,"order"])
cl <- grep("Class_", df[n,"order"])
ph <- grep("Phylum_", df[n,"order"])
kg <- grep("Kindgom_", df[n,"order"])
nn <- grep("none", df[n,"order"])
s.p <- grep("_sp._", df[n,"order"])
n.r <- grep("_nr._", df[n,"order"])
c.f <- grep("_cf._", df[n,"order"])
g.g <- grep("_gen._", df[n,"order"])
x <- length(c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f,g.g))
or.y <- length(df[n,"order"]) - x
gn <- grep("Genus_", df1[,"order"])
fm <- grep("Family_", df1[,"order"])
or <- grep("Order_", df1[,"order"])
cl <- grep("Class_", df1[,"order"])
ph <- grep("Phylum_", df1[,"order"])
kg <- grep("Kindgom_", df1[,"order"])
nn <- grep("none", df1[,"order"])
s.p <- grep("_sp._", df1[,"order"])
n.r <- grep("_nr._", df1[,"order"])
c.f <- grep("_cf._", df1[,"order"])
g.g <- grep("_gen._", df1[,"order"])
chck <- df1[-c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f),"order"]
un.or.y <- length(unique(chck))
df <- df1
# Save results to table to compare to Zeale primers later
tabx <- data.frame(primer = NA,
order = NA, unique.orders = NA,
family = NA, unique.families = NA,
genus = NA, unique.genus = NA,
species = NA, unique.species = NA,
total.taxa = NA,
total.reads = NA)
tabx[1,] <- c("Gillet", or.y, un.or.y, fm.y, un.fm.y,
gn.y, un.gn.y, sp.y, un.sp.y,
ntaxa(final.diet.g), sum(sample_sums(final.diet.g)))
### 2) Load and filter MOTUs and Samples in Zeale dataset ###
load("../zeale_dataset/sumaclust98/phyloseq_object_zeale_clust_iden98_taxa_assigned_no_singletons.RData")
taxa_names(zeale.phylo) <- paste("zMOTU", seq(nrow(tax_table(zeale.phylo))), sep="_")
zeale.phylo
# Remove 'sample.' part of sample names that obitools adds
chck <- sample_names(zeale.phylo)
chck <- str_remove(chck, "sample.")
sample_names(zeale.phylo) <- chck
# Examine the blanks?
n <- which(otu_table(zeale.phylo)[,"Z_NEG"] > 0)
m <- which(otu_table(zeale.phylo)[,"Z_Neg"] > 0)
l <- unique(c(n,m))
blnk.df <- as.data.frame(as.matrix(tax_table(zeale.phylo))[l,4:7])
blnk.df$total.reads <- taxa_sums(zeale.phylo)[l]
blnk.df$Neg1.reads <- otu_table(zeale.phylo)[l, "Z_NEG"]
for (i in 1:nrow(blnk.df)) {blnk.df$Neg1.prop[i] <- (blnk.df$Neg1.reads[i] / blnk.df$total.reads[i]) * 100 }
blnk.df$Neg2.reads <- otu_table(zeale.phylo)[l, "Z_Neg"]
for (i in 1:nrow(blnk.df)) {blnk.df$Neg2.prop[i] <- (blnk.df$Neg2.reads[i] / blnk.df$total.reads[i]) * 100 }
blnk.df$perc <- apply(blnk.df[,c("Neg1.prop","Neg2.prop")], 1, sum)
rownames(blnk.df) <- 1:nrow(blnk.df)
kable(blnk.df, caption = "MOTUs identified in the blanks")
# Remove taxa of which the blanks hold over 2% of the total reads for that MOTU
tab.nam <- blnk.df[,"perc"] > 2 # 2 is for 2%
tab.df <- blnk.df[tab.nam,]
removeTaxa <- rownames(tab.df) # Lists the MOTUs to remove
phy.obj <- subset_taxa(zeale.phylo, !(taxa_names(zeale.phylo) %in% removeTaxa))
phy.obj
## Visualise any vertebrate amplification
samples.phylo <- zeale.phylo
samples.phylo <- tax_glom(zeale.phylo, taxrank = "order")
n <- grep("Chiroptera", tax_table(samples.phylo)[,"order"])
m <- grep("Eulipotyphla", tax_table(samples.phylo)[,"order"])
tax_table(samples.phylo)[-c(n, m), 1:4] <- "Other"
unique(tax_table(samples.phylo)[,"order"])
mm.oth <- tax_glom(samples.phylo, taxrank = "order")
mm.oth
mm.oth.ra = transform_sample_counts(mm.oth, function(x) 100 * x/sum(x))
ra.samples.bar <- phyloseq::plot_bar(mm.oth.ra) # extracts information needed for barplots
ra.samples.bar.data <- ra.samples.bar$data
p1.2 <- ggplot(ra.samples.bar.data, aes(x= Sample, y=Abundance, fill = order))
p1.2 + geom_bar(stat="identity", color="black") +
scale_fill_manual(values = pal.o) +
facet_wrap(~ mammal, scale = "free_x") +
theme_classic() +
theme(legend.position = "right") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
# Check range of vertebrate amplification in samples
samples.phylo <- subset_samples(phy.obj, mammal != "BLANK")
n <- grep("Chordata", tax_table(samples.phylo)[,"phylum"])
tax_table(samples.phylo)[-n, 1:4] <- "Other"
unique(tax_table(samples.phylo)[,"phylum"])
mm.oth <- tax_glom(samples.phylo, taxrank = "phylum")
mm.oth
mm.oth.ra = transform_sample_counts(mm.oth, function(x) 100 * x/sum(x))
tax_table(mm.oth.ra)[,1:2]
otu_table(mm.oth.ra)
# Range of Vertebrate amplification across all samples
range(otu_table(mm.oth.ra)[1,])
# Range of Vertebrate amplification across GWTS
gwts.prop <- subset_samples(mm.oth.ra, mammal == "GWTS")
range(otu_table(gwts.prop)[1,])
# Range of Vertebrate amplification across Pygmies
pyg.prop <- subset_samples(mm.oth.ra, mammal == "Pygmy")
range(otu_table(pyg.prop)[1,])
# Range of Vertebrate amplification across Bats
bat.prop <- subset_samples(mm.oth.ra, mammal == "Bat")
range(otu_table(bat.prop)[1,])
#### Filtering ####
samples.phylo <- subset_samples(phy.obj, mammal != "BLANK") # Remove negative controls
# Remove non-prey
diet.prey <- subset_taxa(samples.phylo, !(class %in% c("Mammalia",
"none",
"Actinopteri",
"Bdelloidea",
"Udeonychophora", # velvet worms
"Merostomata", # horse shoe crabs
"Gammaproteobacteria", # bacteria
"Magnoliopsida", # plants
"Monogononta", # rotifers
"Dothideomycetes", # fungi
"Trebouxiophyceae", # green algae
"Chondrichthyes", # Cartilaginous fish
"Mucoromycetes", # fungi
"Phylum_Endomyxa", # micro things
"Eutardigrada", # tartigrades!!
"Elardia", # Amoebas
"Cephalopoda", # Cephalopods
"Amphibia", # Amphibians
"Aves", # Birds
"Chromadorea", # roundworms
"Hexanauplia", # parasitic crustaceans
"Kingdom_Metazoa",
"Kingdom_",
"Phylum_Discosea", # amoebas
"Branchiopoda", # marine crustaceans
"Phylum_Nematoda")))
sampl.filt <- prune_samples(sample_sums(diet.prey) > 1000, diet.prey)
otu.tab <- as.data.frame(otu_table(sampl.filt))
new.otu.tab <- copy_filt(otu.tab, 0.0001) # Remove MOTUs with less than 0.01% reads in each sample
new.otu.tab <- as.matrix(new.otu.tab)
otu_table(sampl.filt) <- otu_table(new.otu.tab, taxa_are_rows = TRUE)
sampl.filt
# Remove any remaining taxa with less than 5 reads in total from dataset
final.diet <- prune_taxa(taxa_sums(sampl.filt) > 4, sampl.filt)
final.diet
# Check range of read depth of samples
range(sample_sums(final.diet))
# Check average read depth
mean(sample_sums(final.diet))
# Check range of total reads per taxa
range(taxa_sums(final.diet))
# hill_div packages assessment of read depth per sample, according to a shannon diversity equivilent
depth_cov(new.otu.tab,
qvalue = 1)
## Rarefaction analysis
Bat_Z <- prune_samples(sample_data(final.diet)$mammal == "Bat", final.diet)
df1 <- as.data.frame(t(as.matrix(otu_table(Bat_Z))))
gwts_Z <- prune_samples(sample_data(final.diet)$mammal == "GWTS", final.diet)
df2 <- as.data.frame(t(as.matrix(otu_table(gwts_Z))))
pyg_Z <- prune_samples(sample_data(final.diet)$mammal == "Pygmy", final.diet)
df3 <- as.data.frame(t(as.matrix(otu_table(pyg_Z))))
set.seed(57)
r1 <- rarecurve(df1[,])
r2 <- rarecurve(df2[,])
r3 <- rarecurve(df3[,])
out <- r1 # change to the rarefaction curve to plot (r1, r2 or r3 - see above)
par(mar = c(4.5, 4.5, 1, 1)) # bottom, left, top, right
plot(c(1, 15000), c(1, 120), xlab = "Reads",
ylab = "MOTUs", type = "n", cex.axis = 2, cex.lab = 2, las = 1)
#abline(v = 1000)
#abline(v = 5000)
for (i in seq_along(out)) {
N <- attr(out[[i]], "Subsample")
lines(N, out[[i]], col = "black")
}
par(mar=c(5.1, 4.1, 4.1, 2.1)) # back to default plot parameters
# Save Zeale dataset phyloseq object
final.diet.z <- final.diet
### Identify how many MOTUs are identified to Order, Family, Genus and Species level
df1 <- as.data.frame(as.matrix(tax_table(final.diet.z)))
df <- df1
df$pident <- as.character(df$pident)
df$pident <- as.numeric(df$pident)
###################
## Species
n <- which(df[,"pident"] > 97.9999)
gn <- grep("Genus_", df[n,"species"])
fm <- grep("Family_", df[n,"species"])
or <- grep("Order_", df[n,"species"])
cl <- grep("Class_", df[n,"species"])
ph <- grep("Phylum_", df[n,"species"])
kg <- grep("Kindgom_", df[n,"species"])
nn <- grep("none", df[n,"species"])
s.p <- grep("_sp._", df[n,"species"])
n.r <- grep("_nr._", df[n,"species"])
c.f <- grep("_cf._", df[n,"species"])
x <- length(c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f))
sp.y <- length(df[n,"species"]) - x
chck <- df[n[-c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f)],"species"]
un.sp.y <- length(unique(chck))
###################
## Genus
df <- df[-n,]
n <- which(df[,"pident"] > 94.9999)
gn <- grep("Genus_", df[n,"genus"])
fm <- grep("Family_", df[n,"genus"])
or <- grep("Order_", df[n,"genus"])
cl <- grep("Class_", df[n,"genus"])
ph <- grep("Phylum_", df[n,"genus"])
kg <- grep("Kindgom_", df[n,"genus"])
nn <- grep("none", df[n,"genus"])
s.p <- grep("_sp._", df[n,"genus"])
n.r <- grep("_nr._", df[n,"genus"])
c.f <- grep("_cf._", df[n,"genus"])
g.g <- grep("_gen._", df[n,"genus"])
x <- length(c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f,g.g))
gn.y <- length(df[n,"genus"]) - x
gn <- grep("Genus_", df1[,"genus"])
fm <- grep("Family_", df1[,"genus"])
or <- grep("Order_", df1[,"genus"])
cl <- grep("Class_", df1[,"genus"])
ph <- grep("Phylum_", df1[,"genus"])
kg <- grep("Kindgom_", df1[,"genus"])
nn <- grep("none", df1[,"genus"])
s.p <- grep("_sp._", df1[,"genus"])
n.r <- grep("_nr._", df1[,"genus"])
c.f <- grep("_cf._", df1[,"genus"])
g.g <- grep("_gen._", df1[,"genus"])
chck <- df1[-c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f),"genus"]
un.gn.y <- length(unique(chck))
###################
## Family
df <- df[-n,]
n <- which(df[,"pident"] > 92.999)
gn <- grep("Genus_", df[n,"family"])
fm <- grep("Family_", df[n,"family"])
or <- grep("Order_", df[n,"family"])
cl <- grep("Class_", df[n,"family"])
ph <- grep("Phylum_", df[n,"family"])
kg <- grep("Kindgom_", df[n,"family"])
nn <- grep("none", df[n,"family"])
s.p <- grep("_sp._", df[n,"family"])
n.r <- grep("_nr._", df[n,"family"])
c.f <- grep("_cf._", df[n,"family"])
g.g <- grep("_gen._", df[n,"family"])
x <- length(c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f,g.g))
fm.y <- length(df[n,"family"]) - x
gn <- grep("Genus_", df1[,"family"])
fm <- grep("Family_", df1[,"family"])
or <- grep("Order_", df1[,"family"])
cl <- grep("Class_", df1[,"family"])
ph <- grep("Phylum_", df1[,"family"])
kg <- grep("Kindgom_", df1[,"family"])
nn <- grep("none", df1[,"family"])
s.p <- grep("_sp._", df1[,"family"])
n.r <- grep("_nr._", df1[,"family"])
c.f <- grep("_cf._", df1[,"family"])
g.g <- grep("_gen._", df1[,"family"])
chck <- df1[-c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f),"family"]
un.fm.y <- length(unique(chck))
###################
## Order
df <- df[-n,]
n <- which(df[,"pident"] > 89.9999)
gn <- grep("Genus_", df[n,"order"])
fm <- grep("Family_", df[n,"order"])
or <- grep("Order_", df[n,"order"])
cl <- grep("Class_", df[n,"order"])
ph <- grep("Phylum_", df[n,"order"])
kg <- grep("Kindgom_", df[n,"order"])
nn <- grep("none", df[n,"order"])
s.p <- grep("_sp._", df[n,"order"])
n.r <- grep("_nr._", df[n,"order"])
c.f <- grep("_cf._", df[n,"order"])
g.g <- grep("_gen._", df[n,"order"])
x <- length(c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f,g.g))
or.y <- length(df[n,"order"]) - x
gn <- grep("Genus_", df1[,"order"])
fm <- grep("Family_", df1[,"order"])
or <- grep("Order_", df1[,"order"])
cl <- grep("Class_", df1[,"order"])
ph <- grep("Phylum_", df1[,"order"])
kg <- grep("Kindgom_", df1[,"order"])
nn <- grep("none", df1[,"order"])
s.p <- grep("_sp._", df1[,"order"])
n.r <- grep("_nr._", df1[,"order"])
c.f <- grep("_cf._", df1[,"order"])
g.g <- grep("_gen._", df1[,"order"])
chck <- df1[-c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f),"order"]
un.or.y <- length(unique(chck))
df <- df1
#un.gn.y <- length(unique(df1[,"genus"]))
#un.fm.y <- length(unique(df1[,"family"]))
#un.or.y <- length(unique(df1[,"order"]))
tabx[2,] <- c("Zeale", or.y, un.or.y, fm.y, un.fm.y,
gn.y, un.gn.y, sp.y, un.sp.y,
ntaxa(final.diet.z), sum(sample_sums(final.diet.z)))
# View table
landscape(knitr::kable(tabx))
## Visualise the differences between taxa identification between primers ##
library(reshape2)
tabx
df <- melt(data = tabx, id.vars = "primer",
measure.vars = c("unique.orders",
"unique.families",
"unique.genus",
"unique.species"))
df$value <- as.numeric(df$value)
df$primer <- factor(df$primer, levels = c("Zeale", "Gillet"))
p <- ggplot(df, aes(x = variable, y=value)) +
geom_bar(aes(fill = primer),
stat = "identity",
color = "black",
position = position_dodge()) +
theme_classic() +
scale_fill_manual(values = c("darkblue", "pink")) +
scale_x_discrete(labels=c("unique.orders" = "Identified\nOrders",
"unique.families" = "Identified\nFamilies",
"unique.genus" = "Identified\nGenera",
"unique.species" = "Identified\nSpecies")) +
theme(axis.text.x = element_text(size = 12, angle = 0, hjust = 0.5,
face = "bold"),
axis.text.y = element_text(size = 12, face = "bold")) +
labs(y = "Number Identified") +
theme(legend.position = c(0.15,0.8)) +
theme(legend.title = element_blank(),
legend.text = element_text(size = 20)) +
#theme(legend.position = "bottom") +
theme(axis.title.x = element_blank(),
axis.title.y = element_text(size = 15, face = "bold"))
p
### 3) Merge The Gillet and Zeale Primer Datasets ###
final.diet.g
final.diet.z
sample_data(final.diet.g)$primer <- rep("Gillet", nsamples(final.diet.g))
sample_data(final.diet.z)$primer <- rep("Zeale", nsamples(final.diet.z))
mrg <- merge_phyloseq(final.diet.g, final.diet.z)
new.df <- read.csv("./samplesheet.csv")
rownames(new.df) <- new.df$id
new.df <- new.df[,-1]
sample_data(mrg) <- new.df
sample_data(mrg)$mammal_primer <- paste(sample_data(mrg)$mammal,
sample_data(mrg)$primer, sep="_")
save(mrg, file = "./merged_primer_dataset.RData")
| /Step_4__Filter_and_Merge.R | no_license | ShrewlockHolmes/Browett_and_Curran_et_al_2021_Mam_Biol | R | false | false | 27,393 | r | ### Information ###
# It is important to filter out MOTUs that have a low read count, and are likely contamination.
# Samples with low read count should also be removed
# Non-prey will also be removed (such as host or parasitic nematodes for example)
# This script filters the Gillet and Zeale datasets separately, and then merges them into a single phyloseq dataset for downstream analysis
### Set up ###
library(phyloseq)
library(ggplot2)
library(dplyr)
library(vegan)
library(stringr)
library(knitr)
library(hilldiv)
library(kableExtra)
### 1) Load and filter MOTUs and Samples in Gillet dataset ###
load("../gillet_dataset/sumaclust98/phyloseq_object_clust_iden98_taxa_assigned_no_singletons.RData")
taxa_names(gillet.phylo) <- paste("gMOTU", seq(nrow(tax_table(gillet.phylo))), sep="_")
# remove samples with less than 1000 reads
gillet.phylo <- prune_samples(sample_sums(gillet.phylo) > 1000, gillet.phylo)
gillet.phylo
# Remove 'sample.' part of sample names that obitools annoyingly adds
chck <- sample_names(gillet.phylo)
chck <- str_remove(chck, "sample.")
sample_names(gillet.phylo) <- chck
### Examine the blanks?
n <- which(otu_table(gillet.phylo)[,"G_NEG"] > 0)
m <- which(otu_table(gillet.phylo)[,"G_Neg"] > 0)
l <- unique(c(n,m))
blnk.df <- as.data.frame(as.matrix(tax_table(gillet.phylo))[l,4:7])
blnk.df$total.reads <- taxa_sums(gillet.phylo)[l]
blnk.df$Neg1.reads <- otu_table(gillet.phylo)[l, "G_NEG"]
for (i in 1:nrow(blnk.df)) {blnk.df$Neg1.prop[i] <- (blnk.df$Neg1.reads[i] / blnk.df$total.reads[i]) * 100 }
blnk.df$Neg2.reads <- otu_table(gillet.phylo)[l, "G_Neg"]
for (i in 1:nrow(blnk.df)) {blnk.df$Neg2.prop[i] <- (blnk.df$Neg2.reads[i] / blnk.df$total.reads[i]) * 100 }
blnk.df$perc <- apply(blnk.df[,c("Neg1.prop","Neg2.prop")], 1, sum)
rownames(blnk.df) <- 1:nrow(blnk.df)
kable(blnk.df, caption = "MOTUs identified in the blanks")
### Remove taxa of which the blanks hold over 2% of the total reads for that MOTU
tab.nam <- blnk.df[,"perc"] > 2 # 2 is for 2%
tab.df <- blnk.df[tab.nam,]
removeTaxa <- rownames(tab.df) # Lists the MOTUs to remove
phy.obj <- subset_taxa(gillet.phylo, !(taxa_names(gillet.phylo) %in% removeTaxa))
phy.obj
### Visualise vertebrate amplification in samples
# Load colour palette
pal.o = c("#f0a3ff",
"#0075dc",
"#993f00",
"#4c005c",
"#191919",
"#005c31",
"#2bce48",
"#ffcc99",
"#808080",
"#94ffb5",
"#8f7c00",
"#9dcc00",
"#c20088",
"blue",
"#ffa405",
"#ffa8bb",
"#426600",
"#ff0010",
"#5ef1f2",
"#00998f",
"#e0ff66",
"indianred",
"#003380",
"green",
"khaki4",
"darkred",
"coral4",
"violetred2",
"#0075dc",
"#993f00")
samples.phylo <- gillet.phylo
samples.phylo <- tax_glom(gillet.phylo, taxrank = "order")
n <- grep("Chiroptera", tax_table(samples.phylo)[,"order"])
m <- grep("Eulipotyphla", tax_table(samples.phylo)[,"order"])
tax_table(samples.phylo)[-c(n, m), 1:4] <- "Other"
unique(tax_table(samples.phylo)[,"order"])
mm.oth <- tax_glom(samples.phylo, taxrank = "order")
mm.oth
mm.oth.ra = transform_sample_counts(mm.oth, function(x) 100 * x/sum(x))
ra.samples.bar <- phyloseq::plot_bar(mm.oth.ra) # extracts information needed for barplots
ra.samples.bar.data <- ra.samples.bar$data
p1.2 <- ggplot(ra.samples.bar.data, aes(x= Sample, y=Abundance, fill = order))
p1.2 + geom_bar(stat="identity", color="black") +
scale_fill_manual(values = pal.o) +
facet_wrap(~ mammal, scale = "free_x") +
theme_classic() +
theme(legend.position = "right") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
### Check range of vertebrate amplification in samples
samples.phylo <- subset_samples(phy.obj, mammal != "BLANK")
n <- grep("Chordata", tax_table(samples.phylo)[,"phylum"])
tax_table(samples.phylo)[-n, 1:4] <- "Other"
unique(tax_table(samples.phylo)[,"phylum"])
mm.oth <- tax_glom(samples.phylo, taxrank = "phylum")
mm.oth
mm.oth.ra = transform_sample_counts(mm.oth, function(x) 100 * x/sum(x))
tax_table(mm.oth.ra)[,1:2]
otu_table(mm.oth.ra)
# Range of Vertebrate amplification across all samples
range(otu_table(mm.oth.ra)[1,])
# Range of Vertebrate amplification across GWTS
gwts.prop <- subset_samples(mm.oth.ra, mammal == "GWTS")
range(otu_table(gwts.prop)[1,])
# Range of Vertebrate amplification across Pygmies
pyg.prop <- subset_samples(mm.oth.ra, mammal == "Pygmy")
range(otu_table(pyg.prop)[1,])
# Range of Vertebrate amplification across Bats
bat.prop <- subset_samples(mm.oth.ra, mammal == "Bat")
range(otu_table(bat.prop)[1,])
#### Filtering ####
samples.phylo <- subset_samples(phy.obj, mammal != "BLANK") # remove blanks
# Remove non-prey
diet.prey <- subset_taxa(samples.phylo, !(class %in% c("Mammalia",
"none",
"Actinopteri",
"Bdelloidea",
"Udeonychophora", # velvet worms
"Merostomata", # horse shoe crabs
"Gammaproteobacteria", # bacteria
"Magnoliopsida", # plants
"Monogononta", # rotifers
"Dothideomycetes", # fungi
"Trebouxiophyceae", # green algae
"Chondrichthyes", # Cartilaginous fish
"Mucoromycetes", # fungi
"Phylum_Endomyxa", # micro things
"Eutardigrada", # tartigrades!!
"Elardia", # Amoebas
"Cephalopoda", # Cephalopods
"Amphibia", # Amphibians
"Aves", # Birds
"Chromadorea", # roundworms
"Hexanauplia", # parasitic crustaceans
"Kingdom_Metazoa",
"Kingdom_",
"Phylum_Discosea", # amoebas
"Branchiopoda", # marine crustaceans
"Phylum_Nematoda")))
# remove samples with less than 1000 reads
sampl.filt <- prune_samples(sample_sums(diet.prey) > 1000, diet.prey)
otu.tab <- as.data.frame(otu_table(sampl.filt))
new.otu.tab <- copy_filt(otu.tab, 0.0001) # Remove MOTUs with less than 0.01% reads in each sample
new.otu.tab <- as.matrix(new.otu.tab)
otu_table(sampl.filt) <- otu_table(new.otu.tab, taxa_are_rows = TRUE)
sampl.filt
# Remove any remaining taxa with less than 5 reads in total from dataset
final.diet <- prune_taxa(taxa_sums(sampl.filt) > 4, sampl.filt)
final.diet
# Check range of read depth of samples
range(sample_sums(final.diet))
# Check average read depth
mean(sample_sums(final.diet))
# Check range of total reads per taxa
range(taxa_sums(final.diet))
# hill_div packages assessment of read depth per sample, according to a shannon diversity equivilent
depth_cov(new.otu.tab,
qvalue = 1)
## Rarefaction analysis
Bat_G <- prune_samples(sample_data(final.diet)$mammal == "Bat", final.diet)
df1 <- as.data.frame(t(as.matrix(otu_table(Bat_G))))
gwts_G <- prune_samples(sample_data(final.diet)$mammal == "GWTS", final.diet)
df2 <- as.data.frame(t(as.matrix(otu_table(gwts_G))))
pyg_G <- prune_samples(sample_data(final.diet)$mammal == "Pygmy", final.diet)
df3 <- as.data.frame(t(as.matrix(otu_table(pyg_G))))
set.seed(57)
r1 <- rarecurve(df1[,])
r2 <- rarecurve(df2[,])
r3 <- rarecurve(df3[,])
out <- r1 # change to the rarefaction curve to plot (r1, r2 or r3 - see above)
par(mar = c(4.5, 4.5, 1, 1)) # bottom, left, top, right
plot(c(1, 15000), c(1, 120), xlab = "Reads",
ylab = "MOTUs", type = "n", cex.axis = 2, cex.lab = 2, las = 1)
#abline(v = 1000)
#abline(v = 5000)
for (i in seq_along(out)) {
N <- attr(out[[i]], "Subsample")
lines(N, out[[i]], col = "black")
}
par(mar=c(5.1, 4.1, 4.1, 2.1)) # back to default plot parameters
# final phyloseq object for gillet dataset
final.diet.g <- final.diet
### Identify how many MOTUs are identified to Order, Family, Genus and Species level
df1 <- as.data.frame(as.matrix(tax_table(final.diet.g)))
df <- df1
df$pident <- as.character(df$pident)
df$pident <- as.numeric(df$pident)
###################
## Species
n <- which(df[,"pident"] > 97.9999)
gn <- grep("Genus_", df[n,"species"])
fm <- grep("Family_", df[n,"species"])
or <- grep("Order_", df[n,"species"])
cl <- grep("Class_", df[n,"species"])
ph <- grep("Phylum_", df[n,"species"])
kg <- grep("Kindgom_", df[n,"species"])
nn <- grep("none", df[n,"species"])
s.p <- grep("_sp._", df[n,"species"])
n.r <- grep("_nr._", df[n,"species"])
c.f <- grep("_cf._", df[n,"species"])
x <- length(c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f))
sp.y <- length(df[n,"species"]) - x
chck <- df[n[-c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f)],"species"]
un.sp.y <- length(unique(chck))
###################
## Genus
df <- df[-n,]
n <- which(df[,"pident"] > 94.9999)
gn <- grep("Genus_", df[n,"genus"])
fm <- grep("Family_", df[n,"genus"])
or <- grep("Order_", df[n,"genus"])
cl <- grep("Class_", df[n,"genus"])
ph <- grep("Phylum_", df[n,"genus"])
kg <- grep("Kindgom_", df[n,"genus"])
nn <- grep("none", df[n,"genus"])
s.p <- grep("_sp._", df[n,"genus"])
n.r <- grep("_nr._", df[n,"genus"])
c.f <- grep("_cf._", df[n,"genus"])
g.g <- grep("_gen._", df[n,"genus"])
x <- length(c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f,g.g))
gn.y <- length(df[n,"genus"]) - x
gn <- grep("Genus_", df1[,"genus"])
fm <- grep("Family_", df1[,"genus"])
or <- grep("Order_", df1[,"genus"])
cl <- grep("Class_", df1[,"genus"])
ph <- grep("Phylum_", df1[,"genus"])
kg <- grep("Kindgom_", df1[,"genus"])
nn <- grep("none", df1[,"genus"])
s.p <- grep("_sp._", df1[,"genus"])
n.r <- grep("_nr._", df1[,"genus"])
c.f <- grep("_cf._", df1[,"genus"])
g.g <- grep("_gen._", df1[,"genus"])
chck <- df1[-c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f),"genus"]
un.gn.y <- length(unique(chck))
###################
## Family
df <- df[-n,]
n <- which(df[,"pident"] > 92.999)
gn <- grep("Genus_", df[n,"family"])
fm <- grep("Family_", df[n,"family"])
or <- grep("Order_", df[n,"family"])
cl <- grep("Class_", df[n,"family"])
ph <- grep("Phylum_", df[n,"family"])
kg <- grep("Kindgom_", df[n,"family"])
nn <- grep("none", df[n,"family"])
s.p <- grep("_sp._", df[n,"family"])
n.r <- grep("_nr._", df[n,"family"])
c.f <- grep("_cf._", df[n,"family"])
g.g <- grep("_gen._", df[n,"family"])
x <- length(c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f,g.g))
fm.y <- length(df[n,"family"]) - x
gn <- grep("Genus_", df1[,"family"])
fm <- grep("Family_", df1[,"family"])
or <- grep("Order_", df1[,"family"])
cl <- grep("Class_", df1[,"family"])
ph <- grep("Phylum_", df1[,"family"])
kg <- grep("Kindgom_", df1[,"family"])
nn <- grep("none", df1[,"family"])
s.p <- grep("_sp._", df1[,"family"])
n.r <- grep("_nr._", df1[,"family"])
c.f <- grep("_cf._", df1[,"family"])
g.g <- grep("_gen._", df1[,"family"])
chck <- df1[-c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f),"family"]
un.fm.y <- length(unique(chck))
###################
## Order
df <- df[-n,]
n <- which(df[,"pident"] > 89.9999)
gn <- grep("Genus_", df[n,"order"])
fm <- grep("Family_", df[n,"order"])
or <- grep("Order_", df[n,"order"])
cl <- grep("Class_", df[n,"order"])
ph <- grep("Phylum_", df[n,"order"])
kg <- grep("Kindgom_", df[n,"order"])
nn <- grep("none", df[n,"order"])
s.p <- grep("_sp._", df[n,"order"])
n.r <- grep("_nr._", df[n,"order"])
c.f <- grep("_cf._", df[n,"order"])
g.g <- grep("_gen._", df[n,"order"])
x <- length(c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f,g.g))
or.y <- length(df[n,"order"]) - x
gn <- grep("Genus_", df1[,"order"])
fm <- grep("Family_", df1[,"order"])
or <- grep("Order_", df1[,"order"])
cl <- grep("Class_", df1[,"order"])
ph <- grep("Phylum_", df1[,"order"])
kg <- grep("Kindgom_", df1[,"order"])
nn <- grep("none", df1[,"order"])
s.p <- grep("_sp._", df1[,"order"])
n.r <- grep("_nr._", df1[,"order"])
c.f <- grep("_cf._", df1[,"order"])
g.g <- grep("_gen._", df1[,"order"])
chck <- df1[-c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f),"order"]
un.or.y <- length(unique(chck))
df <- df1
# Save results to table to compare to Zeale primers later
tabx <- data.frame(primer = NA,
order = NA, unique.orders = NA,
family = NA, unique.families = NA,
genus = NA, unique.genus = NA,
species = NA, unique.species = NA,
total.taxa = NA,
total.reads = NA)
tabx[1,] <- c("Gillet", or.y, un.or.y, fm.y, un.fm.y,
gn.y, un.gn.y, sp.y, un.sp.y,
ntaxa(final.diet.g), sum(sample_sums(final.diet.g)))
### 2) Load and filter MOTUs and Samples in Zeale dataset ###
load("../zeale_dataset/sumaclust98/phyloseq_object_zeale_clust_iden98_taxa_assigned_no_singletons.RData")
taxa_names(zeale.phylo) <- paste("zMOTU", seq(nrow(tax_table(zeale.phylo))), sep="_")
zeale.phylo
# Remove 'sample.' part of sample names that obitools adds
chck <- sample_names(zeale.phylo)
chck <- str_remove(chck, "sample.")
sample_names(zeale.phylo) <- chck
# Examine the blanks?
n <- which(otu_table(zeale.phylo)[,"Z_NEG"] > 0)
m <- which(otu_table(zeale.phylo)[,"Z_Neg"] > 0)
l <- unique(c(n,m))
blnk.df <- as.data.frame(as.matrix(tax_table(zeale.phylo))[l,4:7])
blnk.df$total.reads <- taxa_sums(zeale.phylo)[l]
blnk.df$Neg1.reads <- otu_table(zeale.phylo)[l, "Z_NEG"]
for (i in 1:nrow(blnk.df)) {blnk.df$Neg1.prop[i] <- (blnk.df$Neg1.reads[i] / blnk.df$total.reads[i]) * 100 }
blnk.df$Neg2.reads <- otu_table(zeale.phylo)[l, "Z_Neg"]
for (i in 1:nrow(blnk.df)) {blnk.df$Neg2.prop[i] <- (blnk.df$Neg2.reads[i] / blnk.df$total.reads[i]) * 100 }
blnk.df$perc <- apply(blnk.df[,c("Neg1.prop","Neg2.prop")], 1, sum)
rownames(blnk.df) <- 1:nrow(blnk.df)
kable(blnk.df, caption = "MOTUs identified in the blanks")
# Remove taxa of which the blanks hold over 2% of the total reads for that MOTU
tab.nam <- blnk.df[,"perc"] > 2 # 2 is for 2%
tab.df <- blnk.df[tab.nam,]
removeTaxa <- rownames(tab.df) # Lists the MOTUs to remove
phy.obj <- subset_taxa(zeale.phylo, !(taxa_names(zeale.phylo) %in% removeTaxa))
phy.obj
## Visualise any vertebrate amplification
samples.phylo <- zeale.phylo
samples.phylo <- tax_glom(zeale.phylo, taxrank = "order")
n <- grep("Chiroptera", tax_table(samples.phylo)[,"order"])
m <- grep("Eulipotyphla", tax_table(samples.phylo)[,"order"])
tax_table(samples.phylo)[-c(n, m), 1:4] <- "Other"
unique(tax_table(samples.phylo)[,"order"])
mm.oth <- tax_glom(samples.phylo, taxrank = "order")
mm.oth
mm.oth.ra = transform_sample_counts(mm.oth, function(x) 100 * x/sum(x))
ra.samples.bar <- phyloseq::plot_bar(mm.oth.ra) # extracts information needed for barplots
ra.samples.bar.data <- ra.samples.bar$data
p1.2 <- ggplot(ra.samples.bar.data, aes(x= Sample, y=Abundance, fill = order))
p1.2 + geom_bar(stat="identity", color="black") +
scale_fill_manual(values = pal.o) +
facet_wrap(~ mammal, scale = "free_x") +
theme_classic() +
theme(legend.position = "right") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
# Check range of vertebrate amplification in samples
samples.phylo <- subset_samples(phy.obj, mammal != "BLANK")
n <- grep("Chordata", tax_table(samples.phylo)[,"phylum"])
tax_table(samples.phylo)[-n, 1:4] <- "Other"
unique(tax_table(samples.phylo)[,"phylum"])
mm.oth <- tax_glom(samples.phylo, taxrank = "phylum")
mm.oth
mm.oth.ra = transform_sample_counts(mm.oth, function(x) 100 * x/sum(x))
tax_table(mm.oth.ra)[,1:2]
otu_table(mm.oth.ra)
# Range of Vertebrate amplification across all samples
range(otu_table(mm.oth.ra)[1,])
# Range of Vertebrate amplification across GWTS
gwts.prop <- subset_samples(mm.oth.ra, mammal == "GWTS")
range(otu_table(gwts.prop)[1,])
# Range of Vertebrate amplification across Pygmies
pyg.prop <- subset_samples(mm.oth.ra, mammal == "Pygmy")
range(otu_table(pyg.prop)[1,])
# Range of Vertebrate amplification across Bats
bat.prop <- subset_samples(mm.oth.ra, mammal == "Bat")
range(otu_table(bat.prop)[1,])
#### Filtering ####
samples.phylo <- subset_samples(phy.obj, mammal != "BLANK") # Remove negative controls
# Remove non-prey
diet.prey <- subset_taxa(samples.phylo, !(class %in% c("Mammalia",
"none",
"Actinopteri",
"Bdelloidea",
"Udeonychophora", # velvet worms
"Merostomata", # horse shoe crabs
"Gammaproteobacteria", # bacteria
"Magnoliopsida", # plants
"Monogononta", # rotifers
"Dothideomycetes", # fungi
"Trebouxiophyceae", # green algae
"Chondrichthyes", # Cartilaginous fish
"Mucoromycetes", # fungi
"Phylum_Endomyxa", # micro things
"Eutardigrada", # tartigrades!!
"Elardia", # Amoebas
"Cephalopoda", # Cephalopods
"Amphibia", # Amphibians
"Aves", # Birds
"Chromadorea", # roundworms
"Hexanauplia", # parasitic crustaceans
"Kingdom_Metazoa",
"Kingdom_",
"Phylum_Discosea", # amoebas
"Branchiopoda", # marine crustaceans
"Phylum_Nematoda")))
sampl.filt <- prune_samples(sample_sums(diet.prey) > 1000, diet.prey)
otu.tab <- as.data.frame(otu_table(sampl.filt))
new.otu.tab <- copy_filt(otu.tab, 0.0001) # Remove MOTUs with less than 0.01% reads in each sample
new.otu.tab <- as.matrix(new.otu.tab)
otu_table(sampl.filt) <- otu_table(new.otu.tab, taxa_are_rows = TRUE)
sampl.filt
# Remove any remaining taxa with less than 5 reads in total from dataset
final.diet <- prune_taxa(taxa_sums(sampl.filt) > 4, sampl.filt)
final.diet
# Check range of read depth of samples
range(sample_sums(final.diet))
# Check average read depth
mean(sample_sums(final.diet))
# Check range of total reads per taxa
range(taxa_sums(final.diet))
# hill_div packages assessment of read depth per sample, according to a shannon diversity equivilent
depth_cov(new.otu.tab,
qvalue = 1)
## Rarefaction analysis
Bat_Z <- prune_samples(sample_data(final.diet)$mammal == "Bat", final.diet)
df1 <- as.data.frame(t(as.matrix(otu_table(Bat_Z))))
gwts_Z <- prune_samples(sample_data(final.diet)$mammal == "GWTS", final.diet)
df2 <- as.data.frame(t(as.matrix(otu_table(gwts_Z))))
pyg_Z <- prune_samples(sample_data(final.diet)$mammal == "Pygmy", final.diet)
df3 <- as.data.frame(t(as.matrix(otu_table(pyg_Z))))
set.seed(57)
r1 <- rarecurve(df1[,])
r2 <- rarecurve(df2[,])
r3 <- rarecurve(df3[,])
out <- r1 # change to the rarefaction curve to plot (r1, r2 or r3 - see above)
par(mar = c(4.5, 4.5, 1, 1)) # bottom, left, top, right
plot(c(1, 15000), c(1, 120), xlab = "Reads",
ylab = "MOTUs", type = "n", cex.axis = 2, cex.lab = 2, las = 1)
#abline(v = 1000)
#abline(v = 5000)
for (i in seq_along(out)) {
N <- attr(out[[i]], "Subsample")
lines(N, out[[i]], col = "black")
}
par(mar=c(5.1, 4.1, 4.1, 2.1)) # back to default plot parameters
# Save Zeale dataset phyloseq object
final.diet.z <- final.diet
### Identify how many MOTUs are identified to Order, Family, Genus and Species level
df1 <- as.data.frame(as.matrix(tax_table(final.diet.z)))
df <- df1
df$pident <- as.character(df$pident)
df$pident <- as.numeric(df$pident)
###################
## Species
n <- which(df[,"pident"] > 97.9999)
gn <- grep("Genus_", df[n,"species"])
fm <- grep("Family_", df[n,"species"])
or <- grep("Order_", df[n,"species"])
cl <- grep("Class_", df[n,"species"])
ph <- grep("Phylum_", df[n,"species"])
kg <- grep("Kindgom_", df[n,"species"])
nn <- grep("none", df[n,"species"])
s.p <- grep("_sp._", df[n,"species"])
n.r <- grep("_nr._", df[n,"species"])
c.f <- grep("_cf._", df[n,"species"])
x <- length(c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f))
sp.y <- length(df[n,"species"]) - x
chck <- df[n[-c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f)],"species"]
un.sp.y <- length(unique(chck))
###################
## Genus
df <- df[-n,]
n <- which(df[,"pident"] > 94.9999)
gn <- grep("Genus_", df[n,"genus"])
fm <- grep("Family_", df[n,"genus"])
or <- grep("Order_", df[n,"genus"])
cl <- grep("Class_", df[n,"genus"])
ph <- grep("Phylum_", df[n,"genus"])
kg <- grep("Kindgom_", df[n,"genus"])
nn <- grep("none", df[n,"genus"])
s.p <- grep("_sp._", df[n,"genus"])
n.r <- grep("_nr._", df[n,"genus"])
c.f <- grep("_cf._", df[n,"genus"])
g.g <- grep("_gen._", df[n,"genus"])
x <- length(c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f,g.g))
gn.y <- length(df[n,"genus"]) - x
gn <- grep("Genus_", df1[,"genus"])
fm <- grep("Family_", df1[,"genus"])
or <- grep("Order_", df1[,"genus"])
cl <- grep("Class_", df1[,"genus"])
ph <- grep("Phylum_", df1[,"genus"])
kg <- grep("Kindgom_", df1[,"genus"])
nn <- grep("none", df1[,"genus"])
s.p <- grep("_sp._", df1[,"genus"])
n.r <- grep("_nr._", df1[,"genus"])
c.f <- grep("_cf._", df1[,"genus"])
g.g <- grep("_gen._", df1[,"genus"])
chck <- df1[-c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f),"genus"]
un.gn.y <- length(unique(chck))
###################
## Family
df <- df[-n,]
n <- which(df[,"pident"] > 92.999)
gn <- grep("Genus_", df[n,"family"])
fm <- grep("Family_", df[n,"family"])
or <- grep("Order_", df[n,"family"])
cl <- grep("Class_", df[n,"family"])
ph <- grep("Phylum_", df[n,"family"])
kg <- grep("Kindgom_", df[n,"family"])
nn <- grep("none", df[n,"family"])
s.p <- grep("_sp._", df[n,"family"])
n.r <- grep("_nr._", df[n,"family"])
c.f <- grep("_cf._", df[n,"family"])
g.g <- grep("_gen._", df[n,"family"])
x <- length(c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f,g.g))
fm.y <- length(df[n,"family"]) - x
gn <- grep("Genus_", df1[,"family"])
fm <- grep("Family_", df1[,"family"])
or <- grep("Order_", df1[,"family"])
cl <- grep("Class_", df1[,"family"])
ph <- grep("Phylum_", df1[,"family"])
kg <- grep("Kindgom_", df1[,"family"])
nn <- grep("none", df1[,"family"])
s.p <- grep("_sp._", df1[,"family"])
n.r <- grep("_nr._", df1[,"family"])
c.f <- grep("_cf._", df1[,"family"])
g.g <- grep("_gen._", df1[,"family"])
chck <- df1[-c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f),"family"]
un.fm.y <- length(unique(chck))
###################
## Order
df <- df[-n,]
n <- which(df[,"pident"] > 89.9999)
gn <- grep("Genus_", df[n,"order"])
fm <- grep("Family_", df[n,"order"])
or <- grep("Order_", df[n,"order"])
cl <- grep("Class_", df[n,"order"])
ph <- grep("Phylum_", df[n,"order"])
kg <- grep("Kindgom_", df[n,"order"])
nn <- grep("none", df[n,"order"])
s.p <- grep("_sp._", df[n,"order"])
n.r <- grep("_nr._", df[n,"order"])
c.f <- grep("_cf._", df[n,"order"])
g.g <- grep("_gen._", df[n,"order"])
x <- length(c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f,g.g))
or.y <- length(df[n,"order"]) - x
gn <- grep("Genus_", df1[,"order"])
fm <- grep("Family_", df1[,"order"])
or <- grep("Order_", df1[,"order"])
cl <- grep("Class_", df1[,"order"])
ph <- grep("Phylum_", df1[,"order"])
kg <- grep("Kindgom_", df1[,"order"])
nn <- grep("none", df1[,"order"])
s.p <- grep("_sp._", df1[,"order"])
n.r <- grep("_nr._", df1[,"order"])
c.f <- grep("_cf._", df1[,"order"])
g.g <- grep("_gen._", df1[,"order"])
chck <- df1[-c(gn,fm,or,cl,ph,kg,nn,s.p,n.r,c.f),"order"]
un.or.y <- length(unique(chck))
df <- df1
#un.gn.y <- length(unique(df1[,"genus"]))
#un.fm.y <- length(unique(df1[,"family"]))
#un.or.y <- length(unique(df1[,"order"]))
tabx[2,] <- c("Zeale", or.y, un.or.y, fm.y, un.fm.y,
gn.y, un.gn.y, sp.y, un.sp.y,
ntaxa(final.diet.z), sum(sample_sums(final.diet.z)))
# View table
landscape(knitr::kable(tabx))
## Visualise the differences between taxa identification between primers ##
library(reshape2)
tabx
df <- melt(data = tabx, id.vars = "primer",
measure.vars = c("unique.orders",
"unique.families",
"unique.genus",
"unique.species"))
df$value <- as.numeric(df$value)
df$primer <- factor(df$primer, levels = c("Zeale", "Gillet"))
p <- ggplot(df, aes(x = variable, y=value)) +
geom_bar(aes(fill = primer),
stat = "identity",
color = "black",
position = position_dodge()) +
theme_classic() +
scale_fill_manual(values = c("darkblue", "pink")) +
scale_x_discrete(labels=c("unique.orders" = "Identified\nOrders",
"unique.families" = "Identified\nFamilies",
"unique.genus" = "Identified\nGenera",
"unique.species" = "Identified\nSpecies")) +
theme(axis.text.x = element_text(size = 12, angle = 0, hjust = 0.5,
face = "bold"),
axis.text.y = element_text(size = 12, face = "bold")) +
labs(y = "Number Identified") +
theme(legend.position = c(0.15,0.8)) +
theme(legend.title = element_blank(),
legend.text = element_text(size = 20)) +
#theme(legend.position = "bottom") +
theme(axis.title.x = element_blank(),
axis.title.y = element_text(size = 15, face = "bold"))
p
### 3) Merge The Gillet and Zeale Primer Datasets ###
final.diet.g
final.diet.z
sample_data(final.diet.g)$primer <- rep("Gillet", nsamples(final.diet.g))
sample_data(final.diet.z)$primer <- rep("Zeale", nsamples(final.diet.z))
mrg <- merge_phyloseq(final.diet.g, final.diet.z)
new.df <- read.csv("./samplesheet.csv")
rownames(new.df) <- new.df$id
new.df <- new.df[,-1]
sample_data(mrg) <- new.df
sample_data(mrg)$mammal_primer <- paste(sample_data(mrg)$mammal,
sample_data(mrg)$primer, sep="_")
save(mrg, file = "./merged_primer_dataset.RData")
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 6.54842673946017e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615836307-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 2,048 | r | testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 6.54842673946017e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
# mcp_s_age helpers.R
## This function is the displayed version in the UI ##
mcp_s_age_version <- function(){
return("1.00")
}
## Checks that not all of the entries are NA ##
not_all_na <- function(x) {
!all(is.na(x))
}
## Selects only columns that are not empty (using not_all_na) ##
remove_empty_cols <- function(data) {
result <- dplyr::select_if(data, not_all_na)
return(result)
}
## 7 and 4 stage EF options
seven_stages_L <- c("(Left)" = NA, "Absent (0)" = 0, "Present (1)" = 1,
"Early Active union (1/2)" = 12, "Active union (2)" = 2,
"Active/Adv. union (2/3)" = 23, "Advanced union (3)" = 3,
"Complete fusion (4)" = 4)
seven_stages_R <- c("(Right)" = NA, "Absent (0)" = 0, "Present (1)" = 1,
"Early Active union (1/2)" = 12, "Active union (2)" = 2,
"Active/Adv. union (2/3)" = 23, "Advanced union (3)" = 3,
"Complete fusion (4)" = 4)
four_stages_L <- c("(Left)" = NA, "Absent (0)" = 0, "Present Unfused (1)" = 1,
"Active Fusion (2)" = 2, "Complete Fusion (3)" = 3)
four_stages_R <- c("(Right)" = NA, "Absent (0)" = 0, "Present Unfused (1)" = 1,
"Active Fusion (2)" = 2, "Complete Fusion (3)" = 3)
# UI Extensions: Functions used to add specific HTML attributes to UI elements #
# column_xs
column_xs <- function(width, ..., offset = 0)
{
if (!is.numeric(width) || (width < 1) || (width > 12))
stop("column width must be between 1 and 12")
colClass <- paste0("col-xs-", width)
if (offset > 0)
colClass <- paste0(colClass, " col-xs-offset-")
div(class = colClass, ...)
}
# add attributes to inputs
add_attribs <- function(tag, ..., type = "input") {
switch(type,
button = {
tag$attribs <- c(tag$attribs, list(...))
},
input = {
tag$children[[2]]$attribs <- c(tag$children[[2]]$attribs, list(...))
},
select = {
tag$children[[2]]$children[[1]]$attribs <- c(tag$children[[2]]$children[[1]]$attribs, list(...))
},
numeric = {
tag$children[[2]]$attribs <- c(tag$children[[2]]$attribs, list(...))
},
date = {
tag$children[[2]]$attribs <- c(tag$children[[2]]$attribs, list(...))
},
radio = {
tag$children[[2]]$attribs <- c(tag$children[[2]]$attribs, list(...))
}
)
tag
}
######## Collapsing sides ###########
collapse_sides <- function(data, var_info) {
# Extract full column names and final column names
full_vec <- names(data)
final_vec <- suppressWarnings(unique(stringr::str_remove_all(full_vec, "_L|_R")))
no_lr_idx <- c()
# Rename columns with only one side, switch approach to ""
for(i in 1:length(final_vec)) {
cur_idx <- grep(final_vec[i], full_vec)
if(length(cur_idx) < 2) {
names(data)[cur_idx] <- final_vec[i]
no_lr_idx <- c(no_lr_idx, i)
}
}
# Separate data into keep and merge sets
keep_idx <- which(names(data) %in% final_vec[no_lr_idx])
if(length(keep_idx) > 0) {
keep_df <- data[keep_idx]
merge_df <- data[-keep_idx]
} else {
keep_df <- c()
merge_df <- data
}
if(length(merge_df) > 0){
# Subset var_info to only merging variables
if(length(no_lr_idx) > 0) {
var_info_sub <- var_info[match(final_vec[-no_lr_idx], var_info$Variable),]
} else {
var_info_sub <- var_info[match(final_vec, var_info$Variable),]
}
left_label <- unique(var_info_sub$Left_Label)
right_label <- unique(var_info_sub$Right_Label)
label_side <- unique(var_info_sub$Left_Right_Side)
# Apply yada function to merge
merge_df <- yada::merge_multiple_lr_var(input_df=merge_df,
base_var=var_info_sub$Variable,
side_loc=label_side,
side_labels=c(left_label, right_label),
approach=var_info_sub$Left_Right_Approach)
if(length(keep_df)>0) {
temp_df <- cbind(keep_df, merge_df)
} else {
temp_df <- merge_df
}
result <- temp_df[match(final_vec, names(temp_df))]
} else {
result <- keep_df
}
return(result)
}
###### FILL IN NA FOR MULTIVARIATE MODELS ######
reorder_df <- function(case_data, var_names){
new_df <- data.frame(matrix(NA, 1, length(var_names)))
names(new_df) <- var_names
for(i in var_names) {
cur_var <- grep(i, names(case_data), value=TRUE)
new_df[i] <- ifelse(length(cur_var)==0, NA, case_data[[cur_var]])
}
return(new_df)
}
| /R/helpers.R | permissive | ElaineYChu/MCP-S-Age | R | false | false | 4,694 | r | # mcp_s_age helpers.R
## This function is the displayed version in the UI ##
mcp_s_age_version <- function(){
return("1.00")
}
## Checks that not all of the entries are NA ##
not_all_na <- function(x) {
!all(is.na(x))
}
## Selects only columns that are not empty (using not_all_na) ##
remove_empty_cols <- function(data) {
result <- dplyr::select_if(data, not_all_na)
return(result)
}
## 7 and 4 stage EF options
seven_stages_L <- c("(Left)" = NA, "Absent (0)" = 0, "Present (1)" = 1,
"Early Active union (1/2)" = 12, "Active union (2)" = 2,
"Active/Adv. union (2/3)" = 23, "Advanced union (3)" = 3,
"Complete fusion (4)" = 4)
seven_stages_R <- c("(Right)" = NA, "Absent (0)" = 0, "Present (1)" = 1,
"Early Active union (1/2)" = 12, "Active union (2)" = 2,
"Active/Adv. union (2/3)" = 23, "Advanced union (3)" = 3,
"Complete fusion (4)" = 4)
four_stages_L <- c("(Left)" = NA, "Absent (0)" = 0, "Present Unfused (1)" = 1,
"Active Fusion (2)" = 2, "Complete Fusion (3)" = 3)
four_stages_R <- c("(Right)" = NA, "Absent (0)" = 0, "Present Unfused (1)" = 1,
"Active Fusion (2)" = 2, "Complete Fusion (3)" = 3)
# UI Extensions: Functions used to add specific HTML attributes to UI elements #
# column_xs
column_xs <- function(width, ..., offset = 0)
{
if (!is.numeric(width) || (width < 1) || (width > 12))
stop("column width must be between 1 and 12")
colClass <- paste0("col-xs-", width)
if (offset > 0)
colClass <- paste0(colClass, " col-xs-offset-")
div(class = colClass, ...)
}
# add attributes to inputs
add_attribs <- function(tag, ..., type = "input") {
switch(type,
button = {
tag$attribs <- c(tag$attribs, list(...))
},
input = {
tag$children[[2]]$attribs <- c(tag$children[[2]]$attribs, list(...))
},
select = {
tag$children[[2]]$children[[1]]$attribs <- c(tag$children[[2]]$children[[1]]$attribs, list(...))
},
numeric = {
tag$children[[2]]$attribs <- c(tag$children[[2]]$attribs, list(...))
},
date = {
tag$children[[2]]$attribs <- c(tag$children[[2]]$attribs, list(...))
},
radio = {
tag$children[[2]]$attribs <- c(tag$children[[2]]$attribs, list(...))
}
)
tag
}
######## Collapsing sides ###########
collapse_sides <- function(data, var_info) {
# Extract full column names and final column names
full_vec <- names(data)
final_vec <- suppressWarnings(unique(stringr::str_remove_all(full_vec, "_L|_R")))
no_lr_idx <- c()
# Rename columns with only one side, switch approach to ""
for(i in 1:length(final_vec)) {
cur_idx <- grep(final_vec[i], full_vec)
if(length(cur_idx) < 2) {
names(data)[cur_idx] <- final_vec[i]
no_lr_idx <- c(no_lr_idx, i)
}
}
# Separate data into keep and merge sets
keep_idx <- which(names(data) %in% final_vec[no_lr_idx])
if(length(keep_idx) > 0) {
keep_df <- data[keep_idx]
merge_df <- data[-keep_idx]
} else {
keep_df <- c()
merge_df <- data
}
if(length(merge_df) > 0){
# Subset var_info to only merging variables
if(length(no_lr_idx) > 0) {
var_info_sub <- var_info[match(final_vec[-no_lr_idx], var_info$Variable),]
} else {
var_info_sub <- var_info[match(final_vec, var_info$Variable),]
}
left_label <- unique(var_info_sub$Left_Label)
right_label <- unique(var_info_sub$Right_Label)
label_side <- unique(var_info_sub$Left_Right_Side)
# Apply yada function to merge
merge_df <- yada::merge_multiple_lr_var(input_df=merge_df,
base_var=var_info_sub$Variable,
side_loc=label_side,
side_labels=c(left_label, right_label),
approach=var_info_sub$Left_Right_Approach)
if(length(keep_df)>0) {
temp_df <- cbind(keep_df, merge_df)
} else {
temp_df <- merge_df
}
result <- temp_df[match(final_vec, names(temp_df))]
} else {
result <- keep_df
}
return(result)
}
###### FILL IN NA FOR MULTIVARIATE MODELS ######
reorder_df <- function(case_data, var_names){
new_df <- data.frame(matrix(NA, 1, length(var_names)))
names(new_df) <- var_names
for(i in var_names) {
cur_var <- grep(i, names(case_data), value=TRUE)
new_df[i] <- ifelse(length(cur_var)==0, NA, case_data[[cur_var]])
}
return(new_df)
}
|
# Test case 216
Data_1 <- matrix(c(3, 5, -8, 1,
7, 1, 4, -5,
2, 5, 3, 2),byrow=TRUE, nrow=3);
Input = matrix(c(3, 5,
7, 1,
2, 5), byrow = TRUE, nrow = 3);
Output = matrix(c(-8, 1,
4, -5,
2, 2), byrow = TRUE, nrow = 3);
Link = NaN;
weights = c(1);
K = 1;
N = 3;
sum_m = 2;
sum_r = 2;
sum_l = NaN;
Amount = c(2,2);
Amount_Input = c(2);
Amount_Output = c(2);
Amount_Link = NaN;
direction = "non" ;
link_con = NaN;
return_to_scale = "VRS";
NIRS = 0;
Link_obj = 0; # No Link variable in the objective function
#Loading all the functioN:
setwd(getwd())
setwd("..")
setwd("00_pkg_src")
setwd("Nsbm.function")
setwd("R")
source("load_all_func.R");
load_all_func();
setwd("..")
setwd("..")
setwd("..")
setwd("tests")
test_that("Test case 216",{
#projection_frontier
Output_vec_1 <- matrix(c(-8,
4,
2), byrow = TRUE, nrow = 3);
y_head <- 4;
y_bottom <- 2;
Output_vec_res_1 <- matrix(c(1/3,
4,
2), byrow = TRUE, nrow = 3);
Output_vec_2 <- matrix(c(1,
-5,
2), byrow = TRUE, nrow = 3);
y_head <- 2;
y_bottom <- 1;
Output_vec_res_2 <- matrix(c(1,
1/7,
2), byrow = TRUE, nrow = 3);
# Adjustment Output for the obj function and constraint1:
Output_N <- matrix(c(1/3,1,
4,1/7,
2,2), byrow = TRUE, nrow = 3);
#objective function and Constraint1:
Const_1_i1 <- matrix(c(1,rep(0,N),(0.5/(1/3)),(0.5/1),rep(0,2)), byrow = TRUE, nrow = 1);
Const_1_i2 <- matrix(c(1,rep(0,N),(0.5/(4)), (0.5/(1/7)),rep(0,2)), byrow = TRUE, nrow = 1);
Const_1_i3 <- matrix(c(1,rep(0,N),(0.5/(2)), (0.5/2),rep(0,2)), byrow = TRUE, nrow = 1);
obj_func_i1 <- matrix(c(1,rep(0,N),rep(0,2),-(0.5/3),-(0.5/5)), byrow = TRUE, nrow = 1);
obj_func_i2 <- matrix(c(1,rep(0,N),rep(0,2),-(0.5/7),-(0.5/1)), byrow = TRUE, nrow = 1);
obj_func_i3 <- matrix(c(1,rep(0,N),rep(0,2),-(0.5/2),-(0.5/5)), byrow = TRUE, nrow = 1);
Obj_func_and_Const_i1 <- list( "FOBJ" = obj_func_i1, "FCON" = Const_1_i1);
Obj_func_and_Const_i2 <- list( "FOBJ" = obj_func_i2, "FCON" = Const_1_i2);
Obj_func_and_Const_i3 <- list( "FOBJ" = obj_func_i3, "FCON" = Const_1_i3);
#########################################
#########################################
#########################################
#Object.Function and Constraint 1 and direction="non" and i=1:
expect_equal(Obj.func_and_con(1, direction, Input, Output_N, Link, Amount_Input, Amount_Output, Amount_Link, K, N, sum_m, sum_r, sum_l, weights, Link_obj), Obj_func_and_Const_i1, check.attributes = FALSE);
#Object.Function and Constraint 1 and direction="non" and i=2:
expect_equal(Obj.func_and_con(2, direction, Input, Output_N, Link, Amount_Input, Amount_Output, Amount_Link, K, N, sum_m, sum_r, sum_l, weights, Link_obj), Obj_func_and_Const_i2, check.attributes = FALSE) ;
#Object.Function and Constraint 1 and direction="non" and i=3:
expect_equal(Obj.func_and_con(3, direction, Input, Output_N, Link, Amount_Input, Amount_Output, Amount_Link, K, N, sum_m, sum_r, sum_l, weights, Link_obj), Obj_func_and_Const_i3, check.attributes = FALSE) ;
#########################################
#negative_zero_value:
expect_equal(negative_zero_value(Output_vec_1, N), Output_vec_res_1, check.attributes = FALSE)
expect_equal(negative_zero_value(Output_vec_2, N), Output_vec_res_2, check.attributes = FALSE)
})
| /2_nsbm_approach/Nsbm.function/tests/Test_case_216.R | no_license | thomaskrupa/thesis | R | false | false | 3,817 | r | # Test case 216
Data_1 <- matrix(c(3, 5, -8, 1,
7, 1, 4, -5,
2, 5, 3, 2),byrow=TRUE, nrow=3);
Input = matrix(c(3, 5,
7, 1,
2, 5), byrow = TRUE, nrow = 3);
Output = matrix(c(-8, 1,
4, -5,
2, 2), byrow = TRUE, nrow = 3);
Link = NaN;
weights = c(1);
K = 1;
N = 3;
sum_m = 2;
sum_r = 2;
sum_l = NaN;
Amount = c(2,2);
Amount_Input = c(2);
Amount_Output = c(2);
Amount_Link = NaN;
direction = "non" ;
link_con = NaN;
return_to_scale = "VRS";
NIRS = 0;
Link_obj = 0; # No Link variable in the objective function
#Loading all the functioN:
setwd(getwd())
setwd("..")
setwd("00_pkg_src")
setwd("Nsbm.function")
setwd("R")
source("load_all_func.R");
load_all_func();
setwd("..")
setwd("..")
setwd("..")
setwd("tests")
test_that("Test case 216",{
#projection_frontier
Output_vec_1 <- matrix(c(-8,
4,
2), byrow = TRUE, nrow = 3);
y_head <- 4;
y_bottom <- 2;
Output_vec_res_1 <- matrix(c(1/3,
4,
2), byrow = TRUE, nrow = 3);
Output_vec_2 <- matrix(c(1,
-5,
2), byrow = TRUE, nrow = 3);
y_head <- 2;
y_bottom <- 1;
Output_vec_res_2 <- matrix(c(1,
1/7,
2), byrow = TRUE, nrow = 3);
# Adjustment Output for the obj function and constraint1:
Output_N <- matrix(c(1/3,1,
4,1/7,
2,2), byrow = TRUE, nrow = 3);
#objective function and Constraint1:
Const_1_i1 <- matrix(c(1,rep(0,N),(0.5/(1/3)),(0.5/1),rep(0,2)), byrow = TRUE, nrow = 1);
Const_1_i2 <- matrix(c(1,rep(0,N),(0.5/(4)), (0.5/(1/7)),rep(0,2)), byrow = TRUE, nrow = 1);
Const_1_i3 <- matrix(c(1,rep(0,N),(0.5/(2)), (0.5/2),rep(0,2)), byrow = TRUE, nrow = 1);
obj_func_i1 <- matrix(c(1,rep(0,N),rep(0,2),-(0.5/3),-(0.5/5)), byrow = TRUE, nrow = 1);
obj_func_i2 <- matrix(c(1,rep(0,N),rep(0,2),-(0.5/7),-(0.5/1)), byrow = TRUE, nrow = 1);
obj_func_i3 <- matrix(c(1,rep(0,N),rep(0,2),-(0.5/2),-(0.5/5)), byrow = TRUE, nrow = 1);
Obj_func_and_Const_i1 <- list( "FOBJ" = obj_func_i1, "FCON" = Const_1_i1);
Obj_func_and_Const_i2 <- list( "FOBJ" = obj_func_i2, "FCON" = Const_1_i2);
Obj_func_and_Const_i3 <- list( "FOBJ" = obj_func_i3, "FCON" = Const_1_i3);
#########################################
#########################################
#########################################
#Object.Function and Constraint 1 and direction="non" and i=1:
expect_equal(Obj.func_and_con(1, direction, Input, Output_N, Link, Amount_Input, Amount_Output, Amount_Link, K, N, sum_m, sum_r, sum_l, weights, Link_obj), Obj_func_and_Const_i1, check.attributes = FALSE);
#Object.Function and Constraint 1 and direction="non" and i=2:
expect_equal(Obj.func_and_con(2, direction, Input, Output_N, Link, Amount_Input, Amount_Output, Amount_Link, K, N, sum_m, sum_r, sum_l, weights, Link_obj), Obj_func_and_Const_i2, check.attributes = FALSE) ;
#Object.Function and Constraint 1 and direction="non" and i=3:
expect_equal(Obj.func_and_con(3, direction, Input, Output_N, Link, Amount_Input, Amount_Output, Amount_Link, K, N, sum_m, sum_r, sum_l, weights, Link_obj), Obj_func_and_Const_i3, check.attributes = FALSE) ;
#########################################
#negative_zero_value:
expect_equal(negative_zero_value(Output_vec_1, N), Output_vec_res_1, check.attributes = FALSE)
expect_equal(negative_zero_value(Output_vec_2, N), Output_vec_res_2, check.attributes = FALSE)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/callCNV.R
\name{outputTable}
\alias{outputTable}
\alias{outputVcf}
\title{Export CNVs result.}
\usage{
outputTable(x, filepath, info.vec, threshold = 0.5, min.probes = 30)
outputVcf(x, filepath, id, info.vec, threshold = 0.5, min.probes = 30,
addinfo = NULL, addformat = NULL)
}
\arguments{
\item{x}{A data frame which is result of calling CNVs.}
\item{filepath}{A character string of file path to write to.}
\item{info.vec}{A character vector of column names in x to export.}
\item{threshold}{A non-negative numeric value. A CNV of which log2 ratio is
out of range from inverse \emph{threshold} to \emph{threshold} will not
export.}
\item{min.probes}{A non-negative integer. A CNV of which probe is under
\code{min.probes} will not export.}
\item{id}{A character string of the sample id in VCF file.}
\item{addinfo}{A character string or vecter of column names in x to add to
VCF file header and \emph{INFO} column. Default header can be created by
\code{makeVcfHeader()}.}
\item{addformat}{A character string or vecter of column names in x to add to
VCF file header, \emph{FORMAT} and sample column. Default header can be
created by running \code{makeVcfHeader}.}
}
\description{
Write CNVs result to tabular or VCF format file
}
\examples{
loss <- callLossCNV(system.file("extdata", 'testSample.state', package = "kfltCNV"), 0)
gene <- callCNVGene(system.file("extdata", 'testSample.anno.state', package = "kfltCNV"), 0)
# tabular
outputTable(loss, filepath = 'cnv.result.txt',
info.vec = c("svtype", "svlen", "end", "log2ratio", "probe", "depth", "baseline"))
# vcf file
outputVcf(loss, filepath = 'cnv.result.vcf', id = 'testSample',
info.vec = c("svtype", "svlen", "end", "log2ratio", "probe", "depth", "baseline"))
# vcf file adding gene
outputVcf(gene, filepath = 'cnv.result.vcf', id = 'testSample',
info.vec = c("svtype", "end", 'gene', "log2ratio", "probe", "depth", "baseline"),
addinfo = '##INFO=<ID=GENE,Number=1,Type=String,Description="Gene symbol">')
}
\author{
Zhan-Ni Chen
}
| /man/outputTable.Rd | no_license | sanadamakomi/kfltCNV | R | false | true | 2,096 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/callCNV.R
\name{outputTable}
\alias{outputTable}
\alias{outputVcf}
\title{Export CNVs result.}
\usage{
outputTable(x, filepath, info.vec, threshold = 0.5, min.probes = 30)
outputVcf(x, filepath, id, info.vec, threshold = 0.5, min.probes = 30,
addinfo = NULL, addformat = NULL)
}
\arguments{
\item{x}{A data frame which is result of calling CNVs.}
\item{filepath}{A character string of file path to write to.}
\item{info.vec}{A character vector of column names in x to export.}
\item{threshold}{A non-negative numeric value. A CNV of which log2 ratio is
out of range from inverse \emph{threshold} to \emph{threshold} will not
export.}
\item{min.probes}{A non-negative integer. A CNV of which probe is under
\code{min.probes} will not export.}
\item{id}{A character string of the sample id in VCF file.}
\item{addinfo}{A character string or vecter of column names in x to add to
VCF file header and \emph{INFO} column. Default header can be created by
\code{makeVcfHeader()}.}
\item{addformat}{A character string or vecter of column names in x to add to
VCF file header, \emph{FORMAT} and sample column. Default header can be
created by running \code{makeVcfHeader}.}
}
\description{
Write CNVs result to tabular or VCF format file
}
\examples{
loss <- callLossCNV(system.file("extdata", 'testSample.state', package = "kfltCNV"), 0)
gene <- callCNVGene(system.file("extdata", 'testSample.anno.state', package = "kfltCNV"), 0)
# tabular
outputTable(loss, filepath = 'cnv.result.txt',
info.vec = c("svtype", "svlen", "end", "log2ratio", "probe", "depth", "baseline"))
# vcf file
outputVcf(loss, filepath = 'cnv.result.vcf', id = 'testSample',
info.vec = c("svtype", "svlen", "end", "log2ratio", "probe", "depth", "baseline"))
# vcf file adding gene
outputVcf(gene, filepath = 'cnv.result.vcf', id = 'testSample',
info.vec = c("svtype", "end", 'gene', "log2ratio", "probe", "depth", "baseline"),
addinfo = '##INFO=<ID=GENE,Number=1,Type=String,Description="Gene symbol">')
}
\author{
Zhan-Ni Chen
}
|
###############################################################################################
##### df for reg.linreg()
###############################################################################################
#' @name reg.dof
#' @aliases reg.dof
#' @title Degree of freedom for lim.fit
#' @description Calculate degree of freedom for the outcome of lm.fit(). This function is built for reg.linreg for higher efficiency only.
#' It can't be used for calculating degree of freedom in general operation.
#' @usage reg.dof(fit)
#' @param fit :outcome of lm.f
#' @examples X <- as.matrix(cbind(1,EuStockMarkets[,1:2])) # create the design matrix
#' Y <- as.data.frame(EuStockMarkets)$FTSE
#' fit <- lm.fit(x = X, y = Y)
#' reg.dof(fit)
reg.dof <- function(fit){
return(length(fit$fitted.values) - fit$rank)
}
| /R/reg.dof.R | no_license | cran/YRmisc | R | false | false | 846 | r | ###############################################################################################
##### df for reg.linreg()
###############################################################################################
#' @name reg.dof
#' @aliases reg.dof
#' @title Degree of freedom for lim.fit
#' @description Calculate degree of freedom for the outcome of lm.fit(). This function is built for reg.linreg for higher efficiency only.
#' It can't be used for calculating degree of freedom in general operation.
#' @usage reg.dof(fit)
#' @param fit :outcome of lm.f
#' @examples X <- as.matrix(cbind(1,EuStockMarkets[,1:2])) # create the design matrix
#' Y <- as.data.frame(EuStockMarkets)$FTSE
#' fit <- lm.fit(x = X, y = Y)
#' reg.dof(fit)
reg.dof <- function(fit){
return(length(fit$fitted.values) - fit$rank)
}
|
#' Draw color legend.
#'
#' @param colbar Vector, color of colbar.
#' @param labels Vector, numeric or character to be written.
#' @param at Numeric vector (quantile), the position to put labels. See examples
#' for details.
#' @param xlim See in \code{\link{plot}}
#' @param ylim See in \code{\link{plot}}
#' @param vertical Logical, whether the colorlegend is vertical or horizon.
#' @param ratio.colbar The width ratio of colorbar to the total colorlegend
#' (including colorbar, segments and labels).
#' @param lim.segment Vector (quantile) of length 2, the elements should be in
#' [0,1], giving segments coordinates ranges. If the value is NULL or "auto",
#' then the ranges are derived automatically.
#' @param align Character, alignment type of labels, \code{"l"} means left,
#' \code{"c"} means center and \code{"r"} right.
#' @param addlabels Logical, whether add text label or not.
#' @param \dots Additional arguments, passed to \code{\link{plot}}
#'
#' @example vignettes/example-colorlegend.R
#' @keywords hplot
#' @author Taiyun Wei
#' @export
colorlegend <- function(
colbar,
labels,
at = NULL,
xlim = c(0, 1),
ylim = c(0, 1),
vertical = TRUE,
ratio.colbar = 0.4,
lim.segment = "auto", # NOTE: NULL treated as "auto"
align = c("c", "l", "r"),
addlabels = TRUE,
...)
{
if (is.null(at) && addlabels) {
at <- seq(0L, 1L, length = length(labels))
}
if (any(is.null(lim.segment)) || any(lim.segment == "auto")) {
lim.segment <- ratio.colbar + c(0, ratio.colbar * .2)
}
if (any(at < 0L) || any(at > 1L)) {
stop("at should be between 0 and 1")
}
if (length(lim.segment) != 2) {
stop("lim.segment should be a vector of length 2")
}
if (any(lim.segment < 0L) || any(lim.segment > 1L)) {
stop("lim.segment should be between 0 and 1")
}
align <- match.arg(align)
xgap <- diff(xlim)
ygap <- diff(ylim)
len <- length(colbar)
rat1 <- ratio.colbar
rat2 <- lim.segment
if (vertical) {
at <- at * ygap + ylim[1]
yyy <- seq(ylim[1], ylim[2], length = len + 1)
rect(rep(xlim[1], len), yyy[1:len],
rep(xlim[1] + xgap * rat1, len), yyy[-1],
col = colbar, border = colbar)
rect(xlim[1], ylim[1], xlim[1] + xgap * rat1, ylim[2], border = "black")
segments(xlim[1] + xgap * rat2[1], at, xlim[1] + xgap * rat2[2], at)
if (addlabels) {
pos.xlabel <- rep(xlim[1] + xgap * max(rat2, rat1), length(at))
switch(align,
l = text(pos.xlabel, y = at, labels = labels, pos = 4, ...),
r = text(xlim[2], y = at, labels = labels, pos = 2, ...),
c = text((pos.xlabel + xlim[2]) / 2, y = at, labels = labels, ...),
stop("programming error - should not have reached this line!")
)
}
} else {
at <- at * xgap + xlim[1]
xxx <- seq(xlim[1], xlim[2], length = len + 1)
rect(xxx[1:len], rep(ylim[2] - rat1 * ygap, len),
xxx[-1], rep(ylim[2], len),
col = colbar, border = colbar)
rect(xlim[1], ylim[2] - rat1 * ygap, xlim[2], ylim[2], border = "black")
segments(at, ylim[2] - ygap * rat2[1], at, ylim[2] - ygap * rat2[2])
if (addlabels) {
pos.ylabel <- rep(ylim[2] - ygap * max(rat2, rat1), length(at))
switch(align,
l = text(x = at, y = pos.ylabel, labels = labels, pos = 1, ...),
r = text(x = at, y = ylim[1], labels = labels, pos = 2, ...),
c = text(x = at, y = (pos.ylabel + ylim[1]) / 2, labels = labels, ...),
stop("programming error - should not have reached this line!")
)
}
}
}
| /R/colorlegend.R | permissive | douglaswhitaker/corrplot | R | false | false | 3,584 | r | #' Draw color legend.
#'
#' @param colbar Vector, color of colbar.
#' @param labels Vector, numeric or character to be written.
#' @param at Numeric vector (quantile), the position to put labels. See examples
#' for details.
#' @param xlim See in \code{\link{plot}}
#' @param ylim See in \code{\link{plot}}
#' @param vertical Logical, whether the colorlegend is vertical or horizon.
#' @param ratio.colbar The width ratio of colorbar to the total colorlegend
#' (including colorbar, segments and labels).
#' @param lim.segment Vector (quantile) of length 2, the elements should be in
#' [0,1], giving segments coordinates ranges. If the value is NULL or "auto",
#' then the ranges are derived automatically.
#' @param align Character, alignment type of labels, \code{"l"} means left,
#' \code{"c"} means center and \code{"r"} right.
#' @param addlabels Logical, whether add text label or not.
#' @param \dots Additional arguments, passed to \code{\link{plot}}
#'
#' @example vignettes/example-colorlegend.R
#' @keywords hplot
#' @author Taiyun Wei
#' @export
colorlegend <- function(
colbar,
labels,
at = NULL,
xlim = c(0, 1),
ylim = c(0, 1),
vertical = TRUE,
ratio.colbar = 0.4,
lim.segment = "auto", # NOTE: NULL treated as "auto"
align = c("c", "l", "r"),
addlabels = TRUE,
...)
{
if (is.null(at) && addlabels) {
at <- seq(0L, 1L, length = length(labels))
}
if (any(is.null(lim.segment)) || any(lim.segment == "auto")) {
lim.segment <- ratio.colbar + c(0, ratio.colbar * .2)
}
if (any(at < 0L) || any(at > 1L)) {
stop("at should be between 0 and 1")
}
if (length(lim.segment) != 2) {
stop("lim.segment should be a vector of length 2")
}
if (any(lim.segment < 0L) || any(lim.segment > 1L)) {
stop("lim.segment should be between 0 and 1")
}
align <- match.arg(align)
xgap <- diff(xlim)
ygap <- diff(ylim)
len <- length(colbar)
rat1 <- ratio.colbar
rat2 <- lim.segment
if (vertical) {
at <- at * ygap + ylim[1]
yyy <- seq(ylim[1], ylim[2], length = len + 1)
rect(rep(xlim[1], len), yyy[1:len],
rep(xlim[1] + xgap * rat1, len), yyy[-1],
col = colbar, border = colbar)
rect(xlim[1], ylim[1], xlim[1] + xgap * rat1, ylim[2], border = "black")
segments(xlim[1] + xgap * rat2[1], at, xlim[1] + xgap * rat2[2], at)
if (addlabels) {
pos.xlabel <- rep(xlim[1] + xgap * max(rat2, rat1), length(at))
switch(align,
l = text(pos.xlabel, y = at, labels = labels, pos = 4, ...),
r = text(xlim[2], y = at, labels = labels, pos = 2, ...),
c = text((pos.xlabel + xlim[2]) / 2, y = at, labels = labels, ...),
stop("programming error - should not have reached this line!")
)
}
} else {
at <- at * xgap + xlim[1]
xxx <- seq(xlim[1], xlim[2], length = len + 1)
rect(xxx[1:len], rep(ylim[2] - rat1 * ygap, len),
xxx[-1], rep(ylim[2], len),
col = colbar, border = colbar)
rect(xlim[1], ylim[2] - rat1 * ygap, xlim[2], ylim[2], border = "black")
segments(at, ylim[2] - ygap * rat2[1], at, ylim[2] - ygap * rat2[2])
if (addlabels) {
pos.ylabel <- rep(ylim[2] - ygap * max(rat2, rat1), length(at))
switch(align,
l = text(x = at, y = pos.ylabel, labels = labels, pos = 1, ...),
r = text(x = at, y = ylim[1], labels = labels, pos = 2, ...),
c = text(x = at, y = (pos.ylabel + ylim[1]) / 2, labels = labels, ...),
stop("programming error - should not have reached this line!")
)
}
}
}
|
# WD Specific Liver DMR HOMER TF Analysis ####
# Charles Mordaunt
# 12/14/17
# Packages ####
library(ggplot2)
library(reshape2)
library(reshape)
library(scales)
# Known Motif Enrichment Results
# All DMRs
homerAll <- read.delim("Homer/All DMRs/knownResults.txt", sep="\t", stringsAsFactors=FALSE)
colnames(homerAll) <- c("MotifName", "Consensus", "Pvalue", "LogPvalue", "qValue", "NumTargetSeq", "PerTargetSeq",
"NumBackgroundSeq", "PerBackgroundSeq")
MotifName <- strsplit(homerAll$MotifName, "/")
MotifName1 <- sapply(MotifName, function(x) x[1])
MotifName1 <- strsplit(MotifName1, "(", fixed=TRUE)
TF <- sapply(MotifName1, function(x) x[1])
TFtype <- sapply(MotifName1, function(x) x[2])
TFtype <- as.character(sapply(TFtype, function(x) gsub(")", "", x, fixed=TRUE)))
TFtype <- as.character(sapply(TFtype, function(x) gsub("?", "", x, fixed=TRUE)))
TFtype[TFtype == ",Zf"] <- "Zf"
MotifName2 <- sapply(MotifName, function(x) x[2])
MotifName2 <- strsplit(MotifName2, "-")
CellType <- sapply(MotifName2, function(x) x[1])
Antibody <- sapply(MotifName2, function(x) x[2])
Reference <- sapply(MotifName2, function(x) x[4])
Reference <- as.character(sapply(Reference, function(x) gsub("Seq(", "", x, fixed=TRUE)))
Reference <- as.character(sapply(Reference, function(x) gsub("Chip(", "", x, fixed=TRUE)))
Reference <- as.character(sapply(Reference, function(x) gsub(")", "", x, fixed=TRUE)))
Reference <- as.character(sapply(Reference, function(x) gsub("Seq", "", x, fixed=TRUE)))
homerAll <- cbind(TF, TFtype, CellType, Antibody, Reference, homerAll)
homerAll$PerTargetSeq <- as.numeric(sapply(homerAll$PerTargetSeq, function(x) gsub("%", "", x, fixed=TRUE)))
homerAll$PerBackgroundSeq <- as.numeric(sapply(homerAll$PerBackgroundSeq, function(x) gsub("%", "", x, fixed=TRUE)))
homerAll$LogPvalue <- -homerAll$LogPvalue
homerAll$Pvalue <- 10^-homerAll$LogPvalue
homerAll$qValue <- p.adjust(homerAll$Pvalue, "fdr")
homerAll$LogQvalue <- -log10(homerAll$qValue)
homerAll$FoldEnrichment <- homerAll$PerTargetSeq/homerAll$PerBackgroundSeq
homerAll$Enriched <- sapply(homerAll$qValue, function(x) ifelse(x < 0.05, TRUE, FALSE))
homerAll <- homerAll[order(homerAll$MotifName),]
homerAll$ID <- 1:dim(homerAll)[1]
table(homerAll$Enriched) #258 Enriched Factors at FDR < 0.05
# Hyper DMRs
homerHyper <- read.delim("Homer/Hyper DMRs/knownResults.txt", sep="\t", stringsAsFactors=FALSE)
colnames(homerHyper) <- c("MotifName", "Consensus", "Pvalue", "LogPvalue", "qValue", "NumTargetSeq", "PerTargetSeq",
"NumBackgroundSeq", "PerBackgroundSeq")
MotifName <- strsplit(homerHyper$MotifName, "/")
MotifName1 <- sapply(MotifName, function(x) x[1])
MotifName1 <- strsplit(MotifName1, "(", fixed=TRUE)
TF <- sapply(MotifName1, function(x) x[1])
TFtype <- sapply(MotifName1, function(x) x[2])
TFtype <- as.character(sapply(TFtype, function(x) gsub(")", "", x, fixed=TRUE)))
TFtype <- as.character(sapply(TFtype, function(x) gsub("?", "", x, fixed=TRUE)))
TFtype[TFtype == ",Zf"] <- "Zf"
MotifName2 <- sapply(MotifName, function(x) x[2])
MotifName2 <- strsplit(MotifName2, "-")
CellType <- sapply(MotifName2, function(x) x[1])
Antibody <- sapply(MotifName2, function(x) x[2])
Reference <- sapply(MotifName2, function(x) x[4])
Reference <- as.character(sapply(Reference, function(x) gsub("Seq(", "", x, fixed=TRUE)))
Reference <- as.character(sapply(Reference, function(x) gsub("Chip(", "", x, fixed=TRUE)))
Reference <- as.character(sapply(Reference, function(x) gsub(")", "", x, fixed=TRUE)))
Reference <- as.character(sapply(Reference, function(x) gsub("Seq", "", x, fixed=TRUE)))
homerHyper <- cbind(TF, TFtype, CellType, Antibody, Reference, homerHyper)
homerHyper$PerTargetSeq <- as.numeric(sapply(homerHyper$PerTargetSeq, function(x) gsub("%", "", x, fixed=TRUE)))
homerHyper$PerBackgroundSeq <- as.numeric(sapply(homerHyper$PerBackgroundSeq, function(x) gsub("%", "", x, fixed=TRUE)))
homerHyper$LogPvalue <- -homerHyper$LogPvalue
homerHyper$Pvalue <- 10^-homerHyper$LogPvalue
homerHyper$qValue <- p.adjust(homerHyper$Pvalue, "fdr")
homerHyper$LogQvalue <- -log10(homerHyper$qValue)
homerHyper$FoldEnrichment <- homerHyper$PerTargetSeq/homerHyper$PerBackgroundSeq
homerHyper$Enriched <- sapply(homerHyper$qValue, function(x) ifelse(x < 0.05, TRUE, FALSE))
homerHyper <- homerHyper[order(homerHyper$MotifName),]
homerHyper$ID <- 1:dim(homerHyper)[1]
table(homerHyper$Enriched) #205 Enriched Factors at FDR < 0.05
# Hypo DMRs
homerHypo <- read.delim("Homer/Hypo DMRs/knownResults.txt", sep="\t", stringsAsFactors=FALSE)
colnames(homerHypo) <- c("MotifName", "Consensus", "Pvalue", "LogPvalue", "qValue", "NumTargetSeq", "PerTargetSeq",
"NumBackgroundSeq", "PerBackgroundSeq")
MotifName <- strsplit(homerHypo$MotifName, "/")
MotifName1 <- sapply(MotifName, function(x) x[1])
MotifName1 <- strsplit(MotifName1, "(", fixed=TRUE)
TF <- sapply(MotifName1, function(x) x[1])
TFtype <- sapply(MotifName1, function(x) x[2])
TFtype <- as.character(sapply(TFtype, function(x) gsub(")", "", x, fixed=TRUE)))
TFtype <- as.character(sapply(TFtype, function(x) gsub("?", "", x, fixed=TRUE)))
TFtype[TFtype == ",Zf"] <- "Zf"
MotifName2 <- sapply(MotifName, function(x) x[2])
MotifName2 <- strsplit(MotifName2, "-")
CellType <- sapply(MotifName2, function(x) x[1])
Antibody <- sapply(MotifName2, function(x) x[2])
Reference <- sapply(MotifName2, function(x) x[4])
Reference <- as.character(sapply(Reference, function(x) gsub("Seq(", "", x, fixed=TRUE)))
Reference <- as.character(sapply(Reference, function(x) gsub("Chip(", "", x, fixed=TRUE)))
Reference <- as.character(sapply(Reference, function(x) gsub(")", "", x, fixed=TRUE)))
Reference <- as.character(sapply(Reference, function(x) gsub("Seq", "", x, fixed=TRUE)))
homerHypo <- cbind(TF, TFtype, CellType, Antibody, Reference, homerHypo)
homerHypo$PerTargetSeq <- as.numeric(sapply(homerHypo$PerTargetSeq, function(x) gsub("%", "", x, fixed=TRUE)))
homerHypo$PerBackgroundSeq <- as.numeric(sapply(homerHypo$PerBackgroundSeq, function(x) gsub("%", "", x, fixed=TRUE)))
homerHypo$LogPvalue <- -homerHypo$LogPvalue
homerHypo$Pvalue <- 10^-homerHypo$LogPvalue
homerHypo$qValue <- p.adjust(homerHypo$Pvalue, "fdr")
homerHypo$LogQvalue <- -log10(homerHypo$qValue)
homerHypo$FoldEnrichment <- homerHypo$PerTargetSeq/homerHypo$PerBackgroundSeq
homerHypo$Enriched <- sapply(homerHypo$qValue, function(x) ifelse(x < 0.05, TRUE, FALSE))
homerHypo <- homerHypo[order(homerHypo$MotifName),]
homerHypo$ID <- 1:dim(homerHypo)[1]
table(homerHypo$Enriched) #169 Enriched Factors at FDR < 0.05
# Volcano Plot All DMRs
top <- data.frame(TF=homerAll$TF, FoldEnrichment=homerAll$FoldEnrichment, LogQvalue=homerAll$LogQvalue)
top <- subset(top, rank(-top$LogQvalue) <= 20)
top$TF <- as.character(top$TF)
top$TF[top$TF == "Erra"] <- "ERRa"
top$TF[top$TF == "Etv2"] <- "ETV2"
top$TF[top$TF == "Rfx6"] <- "RFX6"
top$TF[top$TF == "Foxa2"] <- "FOXA2"
top$TF[top$TF == "Fli1"] <- "FLI1"
top$TF[top$TF == "AP-2gamma"] <- "AP-2g"
top$TF
#[1] "NF1" "THRb" "ERRa" "ERG" "ZNF711" "GABPA" "NF1-halfsite" "ETS1"
#[9] "ETV1" "ZFX" "FOXM1" "COUP-TFII" "ETV2" "PPARE" "HNF4a" "RFX6"
#[17] "FOXA2" "FLI1" "STAT4" "AP-2g"
gg <- ggplot(data = homerAll)
gg +
geom_point(aes(x = FoldEnrichment, y = LogQvalue), color="#3366CC", size=2) +
geom_text(data=top, aes(x=FoldEnrichment, y=LogQvalue, label=TF),
size=3.5, check_overlap = TRUE, hjust=0, nudge_x=0.05, nudge_y=0) +
theme_bw(base_size = 24) +
labs(x="Fold Enrichment", y="-log(q-value)") +
theme(panel.grid.major = element_blank(), panel.border = element_rect(color = "black", size = 1.25),
axis.ticks = element_line(size = 1.25), legend.key = element_blank(),
panel.grid.minor = element_blank(), legend.position = c(1.2, 0.84),
legend.background = element_blank(),
plot.margin = unit(c(1,1,1,1), "lines"),
axis.text = element_text(size = 15, color = "Black"),
axis.title = element_text(size=15, color="Black"),
legend.title = element_text(size = 18),
plot.title = element_text(size = 18)) +
scale_x_continuous(breaks=pretty_breaks(n=4), limits=c(0,3))
ggsave("Figures/Homer WD Specific DMR TF Motif Enrichment.png", dpi = 600, width = 7, height = 5, units = "in")
# Volcano Plot Hyper DMRs
topHyper <- data.frame(TF=homerHyper$TF, FoldEnrichment=homerHyper$FoldEnrichment, LogQvalue=homerHyper$LogQvalue)
topHyper <- subset(topHyper, rank(-topHyper$LogQvalue) <= 20)
topHyper$TF <- as.character(topHyper$TF)
topHyper$TF[topHyper$TF == "Erra"] <- "ERRa"
topHyper$TF[topHyper$TF == "Rfx6"] <- "RFX6"
topHyper$TF[topHyper$TF == "Foxa2"] <- "FOXA2"
topHyper$TF[topHyper$TF == "Foxa3"] <- "FOXA3"
topHyper$TF[topHyper$TF == "Nr5a2"] <- "LRH1"
topHyper$TF[topHyper$TF == "Tlx?"] <- "TLX"
topHyper$TF[topHyper$TF == "Esrrb"] <- "ESRRb"
topHyper$TF
#[1] "HNF4a" "NF1-halfsite" "ERRa" "THRb" "NF1" "FOXA2" "COUP-TFII" "FOXM1"
#[9] "ZNF711" "FOXA1" "RXR" "PPARE" "ZNF322" "ZFX" "FOXA3" "FOXA1"
#[17] "LRH1" "TLX" "RFX6" "ESRRb"
gg <- ggplot(data = homerHyper)
gg +
geom_point(aes(x = FoldEnrichment, y = LogQvalue), color="#3366CC", size=2) +
geom_text(data=topHyper, aes(x=FoldEnrichment, y=LogQvalue, label=TF),
size=3.5, check_overlap = TRUE, hjust=0, nudge_x=0.05, nudge_y=0) +
theme_bw(base_size = 24) +
labs(x="Fold Enrichment", y="-log(q-value)") +
theme(panel.grid.major = element_blank(), panel.border = element_rect(color = "black", size = 1.25),
axis.ticks = element_line(size = 1.25), legend.key = element_blank(),
panel.grid.minor = element_blank(), legend.position = c(1.2, 0.84),
legend.background = element_blank(),
plot.margin = unit(c(1,1,1,1), "lines"),
axis.text = element_text(size = 15, color = "Black"),
axis.title = element_text(size=15, color="Black"),
legend.title = element_text(size = 18),
plot.title = element_text(size = 18)) +
scale_x_continuous(breaks=pretty_breaks(n=4), limits=c(0,3))
ggsave("Figures/Homer WD Specific Hyper DMR TF Motif Enrichment.png", dpi = 600, width = 7, height = 5, units = "in")
# Volcano Plot Hypo DMRs
topHypo <- data.frame(TF=homerHypo$TF, FoldEnrichment=homerHypo$FoldEnrichment, LogQvalue=homerHypo$LogQvalue)
topHypo <- subset(topHypo, rank(-topHypo$LogQvalue) <= 20)
topHypo$TF <- as.character(topHypo$TF)
topHypo$TF[topHypo$TF == "Atf3"] <- "ATF3"
topHypo$TF[topHypo$TF == "Ets1-distal"] <- "ETS1-distal"
topHypo$TF[topHypo$TF == "Etv2"] <- "ETV2"
topHypo$TF[topHypo$TF == "Fli1"] <- "FLI1"
topHypo$TF[topHypo$TF == "Fosl2"] <- "FOSL2"
topHypo$TF[topHypo$TF == "Fra1"] <- "FRA1"
topHypo$TF[topHypo$TF == "Fra2"] <- "FRA2"
topHypo$TF[topHypo$TF == "Jun-AP1"] <- "JUN-AP1"
topHypo$TF[topHypo$TF == "JunB"] <- "JUNB"
topHypo$TF
# [1] "ETS1" "ERG" "ETV2" "FLI1" "ETV1" "EHF" "GABPA"
# [8] "EWS:ERG-fusion" "ELF3" "EWS:FLI1-fusion" "ETS1-distal" "PU.1" "FRA2" "BATF"
# [15] "FRA1" "AP-1" "JUNB" "FOSL2" "ATF3" "JUN-AP1"
table(topHypo$TF %in% top$TF) #6
table(topHyper$TF %in% top$TF) #12
table(topHypo$TF %in% topHyper$TF) #0
enrichedAll <- subset(homerAll, qValue < 1e-10) #50 / 364
enrichedHyper <- subset(homerHyper, qValue < 1e-10) #28 / 364
enrichedHypo <- subset(homerHypo, qValue < 1e-10) #25 / 364
table(enrichedAll$MotifName %in% enrichedHyper$MotifName) #20
table(enrichedAll$MotifName %in% enrichedHypo$MotifName) #22
table(enrichedHyper$MotifName %in% enrichedHypo$MotifName) #0 None Overlap between hyper and hypo
gg <- ggplot(data = homerHypo)
gg +
geom_point(aes(x = FoldEnrichment, y = LogQvalue), color="#3366CC", size=2) +
geom_text(data=topHypo, aes(x=FoldEnrichment, y=LogQvalue, label=TF),
size=3.5, check_overlap = TRUE, hjust=0, nudge_x=0.05, nudge_y=0) +
theme_bw(base_size = 24) +
labs(x="Fold Enrichment", y="-log(q-value)") +
theme(panel.grid.major = element_blank(), panel.border = element_rect(color = "black", size = 1.25),
axis.ticks = element_line(size = 1.25), legend.key = element_blank(),
panel.grid.minor = element_blank(), legend.position = c(1.2, 0.84),
legend.background = element_blank(),
plot.margin = unit(c(1,1,1,1), "lines"),
axis.text = element_text(size = 15, color = "Black"),
axis.title = element_text(size=15, color="Black"),
legend.title = element_text(size = 18),
plot.title = element_text(size = 18)) #+
#scale_x_continuous(breaks=pretty_breaks(n=4), limits=c(0,3))
ggsave("Figures/Homer WD Specific Hypo DMR TF Motif Enrichment.png", dpi = 600, width = 7, height = 5, units = "in")
# Scatter Plot Hyper vs Hypo DMR log q-value
LogQ <- data.frame(TF=homerAll$TF, LogQvalueHyper=homerHyper$LogQvalue, LogQvalueHypo=homerHypo$LogQvalue)
topLogQ <- subset(LogQ, rank(-LogQ$LogQvalueHyper) <= 10 | rank(-LogQ$LogQvalueHypo) <= 10)
topLogQ$TF <- as.character(topLogQ$TF)
topLogQ$TF[topLogQ$TF == "Erra"] <- "ERRa"
topLogQ$TF[topLogQ$TF == "Foxa2"] <- "FOXA2"
topLogQ$TF[topLogQ$TF == "Etv2"] <- "ETV2"
topLogQ$TF[topLogQ$TF == "Fli1"] <- "FLI1"
topLogQ$TF
# [1] "COUP-TFII" "EHF" "ELF3" "ERG" "ERRa" "ETS1" "ETV1"
# [8] "ETV2" "EWS:ERG-fusion" "EWS:FLI1-fusion" "FLI1" "FOXA1" "FOXA2" "FOXM1"
# [15] "GABPA" "HNF4a" "NF1-halfsite" "NF1" "THRb" "ZNF711"
gg <- ggplot(LogQ)
gg +
geom_point(aes(x = LogQvalueHyper, y = LogQvalueHypo), color="#3366CC", size=2) +
geom_text(data=topLogQ, aes(x=LogQvalueHyper, y=LogQvalueHypo, label=TF),
size=3.5, check_overlap = TRUE, hjust=0, nudge_x=0.5, nudge_y=0) +
theme_bw(base_size = 24) +
labs(x="Hyper DMR -log(q-value)", y="Hypo DMR -log(q-value)") +
theme(panel.grid.major = element_blank(), panel.border = element_rect(color = "black", size = 1.25),
axis.ticks = element_line(size = 1.25), legend.key = element_blank(),
panel.grid.minor = element_blank(), legend.position = c(1.2, 0.84),
legend.background = element_blank(),
plot.margin = unit(c(1,1,1,1), "lines"),
axis.text = element_text(size = 15, color = "Black"),
axis.title = element_text(size=15, color="Black"),
legend.title = element_text(size = 18),
plot.title = element_text(size = 18)) +
coord_cartesian(xlim=c(0,46), ylim=c(0,46))
ggsave("Figures/Homer WD Specific Hyper vs Hypo DMR TF Motif logqvalue.png", dpi = 600, width = 7, height = 5, units = "in")
# LOLA Enriched TFs Hyper DMRs
lolaTFs <- c("HNF4a", "RXR", "Foxa2", "FOXA1", "Max", "Atf3", "JunD", "REST-NRSF", "YY1", "COUP-TFII", "GABPA", "ZBTB33", "CTCF")
homerHyper_lola <- subset(homerHyper, TF %in% lolaTFs)
# Atf3 COUP-TFII CTCF FOXA1 FOXA1 Foxa2 GABPA HNF4a JunD Max REST-NRSF RXR YY1
# ZBTB33
homerHyper_lola <- subset(homerHyper_lola, !(TF == "FOXA1" & CellType == "MCF7")) #Remove duplicate FOXA1
homerHyper_lola$TF <- as.character(homerHyper_lola$TF)
homerHyper_lola$TF[homerHyper_lola$TF == "HNF4a"] <- "HNF4A"
homerHyper_lola$TF[homerHyper_lola$TF == "Foxa2"] <- "FOXA2"
homerHyper_lola$TF[homerHyper_lola$TF == "COUP-TFII"] <- "NR2F2"
homerHyper_lola$TF[homerHyper_lola$TF =="RXR"] <- "RXRA"
homerHyper_lola$TF[homerHyper_lola$TF =="Max"] <- "MAX"
homerHyper_lola$TF[homerHyper_lola$TF =="JunD"] <- "JUND"
homerHyper_lola$TF[homerHyper_lola$TF =="Atf3"] <- "ATF3"
homerHyper_lola$TF[homerHyper_lola$TF =="REST-NRSF"] <- "REST"
homerHyper_lola <- homerHyper_lola[order(homerHyper_lola$LogQvalue),]
homerHyper_lola$TF <- factor(homerHyper_lola$TF, levels=unique(homerHyper_lola$TF), ordered=TRUE)
gg <- ggplot(data = homerHyper_lola)
gg +
geom_col(aes(x = TF, y = LogQvalue), color="#3366CC", fill="white", size=1.2, position="dodge") +
coord_flip(ylim=c(0,45)) +
theme_bw(base_size = 24) +
labs(y="-log(q-value)") +
theme(panel.grid.major = element_blank(), panel.border = element_rect(color = "black", size = 1.25),
axis.ticks = element_line(size = 1.25), legend.key = element_blank(),
panel.grid.minor = element_blank(), legend.position = c(1.2, 0.84),
legend.background = element_blank(),
plot.margin = unit(c(1,1,1,1), "lines"),
axis.text = element_text(size = 15, color = "Black"),
axis.title.x = element_text(size=15, color="Black"),
axis.title.y = element_blank(),
legend.title = element_text(size = 18),
plot.title = element_text(size = 18)) +
scale_y_continuous(breaks=pretty_breaks(n=4))
ggsave("Figures/Homer WD Specific Hyper DMR LOLA Liver TF Motif Enrichment.png", dpi = 600, width = 5, height = 7, units = "in")
# LOLA Enriched TFs Hypo DMRs
lolaTFs <- c("HNF4a", "RXR", "Foxa2", "FOXA1", "Max", "Atf3", "JunD", "REST-NRSF", "YY1", "COUP-TFII", "GABPA", "ZBTB33", "CTCF")
homerHypo_lola <- subset(homerHypo, TF %in% lolaTFs)
# Atf3 COUP-TFII CTCF FOXA1 FOXA1 Foxa2 GABPA HNF4a JunD Max REST-NRSF RXR YY1
# ZBTB33
homerHypo_lola <- subset(homerHypo_lola, !(TF == "FOXA1" & CellType == "MCF7")) #Remove duplicate FOXA1
homerHypo_lola$TF <- as.character(homerHypo_lola$TF)
homerHypo_lola$TF[homerHypo_lola$TF == "HNF4a"] <- "HNF4A"
homerHypo_lola$TF[homerHypo_lola$TF == "Foxa2"] <- "FOXA2"
homerHypo_lola$TF[homerHypo_lola$TF == "COUP-TFII"] <- "NR2F2"
homerHypo_lola$TF[homerHypo_lola$TF =="RXR"] <- "RXRA"
homerHypo_lola$TF[homerHypo_lola$TF =="Max"] <- "MAX"
homerHypo_lola$TF[homerHypo_lola$TF =="JunD"] <- "JUND"
homerHypo_lola$TF[homerHypo_lola$TF =="Atf3"] <- "ATF3"
homerHypo_lola$TF[homerHypo_lola$TF =="REST-NRSF"] <- "REST"
homerHypo_lola <- homerHypo_lola[match(homerHyper_lola$TF, homerHypo_lola$TF),]
homerHypo_lola$TF <- factor(homerHypo_lola$TF, levels=unique(homerHypo_lola$TF), ordered=TRUE)
gg <- ggplot(data = homerHypo_lola)
gg +
geom_col(aes(x = TF, y = LogQvalue), color="#3366CC", fill="white", size=1.2, position="dodge") +
coord_flip(ylim=c(0,45)) +
theme_bw(base_size = 24) +
labs(y="-log(q-value)") +
theme(panel.grid.major = element_blank(), panel.border = element_rect(color = "black", size = 1.25),
axis.ticks.x = element_line(size = 1.25), legend.key = element_blank(), axis.ticks.y=element_blank(),
panel.grid.minor = element_blank(), legend.position = c(1.2, 0.84),
legend.background = element_blank(),
plot.margin = unit(c(1,1,1,1), "lines"),
axis.text = element_text(size = 15, color = "Black"),
axis.title.x = element_text(size=15, color="Black"),
axis.title.y = element_blank(),
legend.title = element_text(size = 18),
plot.title = element_text(size = 18)) +
scale_y_continuous(breaks=pretty_breaks(n=4))
ggsave("Figures/Homer WD Specific Hypo DMR LOLA Liver TF Motif Enrichment.png", dpi = 600, width = 5, height = 7, units = "in")
| /Liver/R/WD_Specific_Liver_DMR_HOMER.R | permissive | cemordaunt/WilsonDiseaseEpigenome | R | false | false | 20,258 | r | # WD Specific Liver DMR HOMER TF Analysis ####
# Charles Mordaunt
# 12/14/17
# Packages ####
library(ggplot2)
library(reshape2)
library(reshape)
library(scales)
# Known Motif Enrichment Results
# All DMRs
homerAll <- read.delim("Homer/All DMRs/knownResults.txt", sep="\t", stringsAsFactors=FALSE)
colnames(homerAll) <- c("MotifName", "Consensus", "Pvalue", "LogPvalue", "qValue", "NumTargetSeq", "PerTargetSeq",
"NumBackgroundSeq", "PerBackgroundSeq")
MotifName <- strsplit(homerAll$MotifName, "/")
MotifName1 <- sapply(MotifName, function(x) x[1])
MotifName1 <- strsplit(MotifName1, "(", fixed=TRUE)
TF <- sapply(MotifName1, function(x) x[1])
TFtype <- sapply(MotifName1, function(x) x[2])
TFtype <- as.character(sapply(TFtype, function(x) gsub(")", "", x, fixed=TRUE)))
TFtype <- as.character(sapply(TFtype, function(x) gsub("?", "", x, fixed=TRUE)))
TFtype[TFtype == ",Zf"] <- "Zf"
MotifName2 <- sapply(MotifName, function(x) x[2])
MotifName2 <- strsplit(MotifName2, "-")
CellType <- sapply(MotifName2, function(x) x[1])
Antibody <- sapply(MotifName2, function(x) x[2])
Reference <- sapply(MotifName2, function(x) x[4])
Reference <- as.character(sapply(Reference, function(x) gsub("Seq(", "", x, fixed=TRUE)))
Reference <- as.character(sapply(Reference, function(x) gsub("Chip(", "", x, fixed=TRUE)))
Reference <- as.character(sapply(Reference, function(x) gsub(")", "", x, fixed=TRUE)))
Reference <- as.character(sapply(Reference, function(x) gsub("Seq", "", x, fixed=TRUE)))
homerAll <- cbind(TF, TFtype, CellType, Antibody, Reference, homerAll)
homerAll$PerTargetSeq <- as.numeric(sapply(homerAll$PerTargetSeq, function(x) gsub("%", "", x, fixed=TRUE)))
homerAll$PerBackgroundSeq <- as.numeric(sapply(homerAll$PerBackgroundSeq, function(x) gsub("%", "", x, fixed=TRUE)))
homerAll$LogPvalue <- -homerAll$LogPvalue
homerAll$Pvalue <- 10^-homerAll$LogPvalue
homerAll$qValue <- p.adjust(homerAll$Pvalue, "fdr")
homerAll$LogQvalue <- -log10(homerAll$qValue)
homerAll$FoldEnrichment <- homerAll$PerTargetSeq/homerAll$PerBackgroundSeq
homerAll$Enriched <- sapply(homerAll$qValue, function(x) ifelse(x < 0.05, TRUE, FALSE))
homerAll <- homerAll[order(homerAll$MotifName),]
homerAll$ID <- 1:dim(homerAll)[1]
table(homerAll$Enriched) #258 Enriched Factors at FDR < 0.05
# Hyper DMRs
homerHyper <- read.delim("Homer/Hyper DMRs/knownResults.txt", sep="\t", stringsAsFactors=FALSE)
colnames(homerHyper) <- c("MotifName", "Consensus", "Pvalue", "LogPvalue", "qValue", "NumTargetSeq", "PerTargetSeq",
"NumBackgroundSeq", "PerBackgroundSeq")
MotifName <- strsplit(homerHyper$MotifName, "/")
MotifName1 <- sapply(MotifName, function(x) x[1])
MotifName1 <- strsplit(MotifName1, "(", fixed=TRUE)
TF <- sapply(MotifName1, function(x) x[1])
TFtype <- sapply(MotifName1, function(x) x[2])
TFtype <- as.character(sapply(TFtype, function(x) gsub(")", "", x, fixed=TRUE)))
TFtype <- as.character(sapply(TFtype, function(x) gsub("?", "", x, fixed=TRUE)))
TFtype[TFtype == ",Zf"] <- "Zf"
MotifName2 <- sapply(MotifName, function(x) x[2])
MotifName2 <- strsplit(MotifName2, "-")
CellType <- sapply(MotifName2, function(x) x[1])
Antibody <- sapply(MotifName2, function(x) x[2])
Reference <- sapply(MotifName2, function(x) x[4])
Reference <- as.character(sapply(Reference, function(x) gsub("Seq(", "", x, fixed=TRUE)))
Reference <- as.character(sapply(Reference, function(x) gsub("Chip(", "", x, fixed=TRUE)))
Reference <- as.character(sapply(Reference, function(x) gsub(")", "", x, fixed=TRUE)))
Reference <- as.character(sapply(Reference, function(x) gsub("Seq", "", x, fixed=TRUE)))
homerHyper <- cbind(TF, TFtype, CellType, Antibody, Reference, homerHyper)
homerHyper$PerTargetSeq <- as.numeric(sapply(homerHyper$PerTargetSeq, function(x) gsub("%", "", x, fixed=TRUE)))
homerHyper$PerBackgroundSeq <- as.numeric(sapply(homerHyper$PerBackgroundSeq, function(x) gsub("%", "", x, fixed=TRUE)))
homerHyper$LogPvalue <- -homerHyper$LogPvalue
homerHyper$Pvalue <- 10^-homerHyper$LogPvalue
homerHyper$qValue <- p.adjust(homerHyper$Pvalue, "fdr")
homerHyper$LogQvalue <- -log10(homerHyper$qValue)
homerHyper$FoldEnrichment <- homerHyper$PerTargetSeq/homerHyper$PerBackgroundSeq
homerHyper$Enriched <- sapply(homerHyper$qValue, function(x) ifelse(x < 0.05, TRUE, FALSE))
homerHyper <- homerHyper[order(homerHyper$MotifName),]
homerHyper$ID <- 1:dim(homerHyper)[1]
table(homerHyper$Enriched) #205 Enriched Factors at FDR < 0.05
# Hypo DMRs
homerHypo <- read.delim("Homer/Hypo DMRs/knownResults.txt", sep="\t", stringsAsFactors=FALSE)
colnames(homerHypo) <- c("MotifName", "Consensus", "Pvalue", "LogPvalue", "qValue", "NumTargetSeq", "PerTargetSeq",
"NumBackgroundSeq", "PerBackgroundSeq")
MotifName <- strsplit(homerHypo$MotifName, "/")
MotifName1 <- sapply(MotifName, function(x) x[1])
MotifName1 <- strsplit(MotifName1, "(", fixed=TRUE)
TF <- sapply(MotifName1, function(x) x[1])
TFtype <- sapply(MotifName1, function(x) x[2])
TFtype <- as.character(sapply(TFtype, function(x) gsub(")", "", x, fixed=TRUE)))
TFtype <- as.character(sapply(TFtype, function(x) gsub("?", "", x, fixed=TRUE)))
TFtype[TFtype == ",Zf"] <- "Zf"
MotifName2 <- sapply(MotifName, function(x) x[2])
MotifName2 <- strsplit(MotifName2, "-")
CellType <- sapply(MotifName2, function(x) x[1])
Antibody <- sapply(MotifName2, function(x) x[2])
Reference <- sapply(MotifName2, function(x) x[4])
Reference <- as.character(sapply(Reference, function(x) gsub("Seq(", "", x, fixed=TRUE)))
Reference <- as.character(sapply(Reference, function(x) gsub("Chip(", "", x, fixed=TRUE)))
Reference <- as.character(sapply(Reference, function(x) gsub(")", "", x, fixed=TRUE)))
Reference <- as.character(sapply(Reference, function(x) gsub("Seq", "", x, fixed=TRUE)))
homerHypo <- cbind(TF, TFtype, CellType, Antibody, Reference, homerHypo)
homerHypo$PerTargetSeq <- as.numeric(sapply(homerHypo$PerTargetSeq, function(x) gsub("%", "", x, fixed=TRUE)))
homerHypo$PerBackgroundSeq <- as.numeric(sapply(homerHypo$PerBackgroundSeq, function(x) gsub("%", "", x, fixed=TRUE)))
homerHypo$LogPvalue <- -homerHypo$LogPvalue
homerHypo$Pvalue <- 10^-homerHypo$LogPvalue
homerHypo$qValue <- p.adjust(homerHypo$Pvalue, "fdr")
homerHypo$LogQvalue <- -log10(homerHypo$qValue)
homerHypo$FoldEnrichment <- homerHypo$PerTargetSeq/homerHypo$PerBackgroundSeq
homerHypo$Enriched <- sapply(homerHypo$qValue, function(x) ifelse(x < 0.05, TRUE, FALSE))
homerHypo <- homerHypo[order(homerHypo$MotifName),]
homerHypo$ID <- 1:dim(homerHypo)[1]
table(homerHypo$Enriched) #169 Enriched Factors at FDR < 0.05
# Volcano Plot All DMRs
top <- data.frame(TF=homerAll$TF, FoldEnrichment=homerAll$FoldEnrichment, LogQvalue=homerAll$LogQvalue)
top <- subset(top, rank(-top$LogQvalue) <= 20)
top$TF <- as.character(top$TF)
top$TF[top$TF == "Erra"] <- "ERRa"
top$TF[top$TF == "Etv2"] <- "ETV2"
top$TF[top$TF == "Rfx6"] <- "RFX6"
top$TF[top$TF == "Foxa2"] <- "FOXA2"
top$TF[top$TF == "Fli1"] <- "FLI1"
top$TF[top$TF == "AP-2gamma"] <- "AP-2g"
top$TF
#[1] "NF1" "THRb" "ERRa" "ERG" "ZNF711" "GABPA" "NF1-halfsite" "ETS1"
#[9] "ETV1" "ZFX" "FOXM1" "COUP-TFII" "ETV2" "PPARE" "HNF4a" "RFX6"
#[17] "FOXA2" "FLI1" "STAT4" "AP-2g"
gg <- ggplot(data = homerAll)
gg +
geom_point(aes(x = FoldEnrichment, y = LogQvalue), color="#3366CC", size=2) +
geom_text(data=top, aes(x=FoldEnrichment, y=LogQvalue, label=TF),
size=3.5, check_overlap = TRUE, hjust=0, nudge_x=0.05, nudge_y=0) +
theme_bw(base_size = 24) +
labs(x="Fold Enrichment", y="-log(q-value)") +
theme(panel.grid.major = element_blank(), panel.border = element_rect(color = "black", size = 1.25),
axis.ticks = element_line(size = 1.25), legend.key = element_blank(),
panel.grid.minor = element_blank(), legend.position = c(1.2, 0.84),
legend.background = element_blank(),
plot.margin = unit(c(1,1,1,1), "lines"),
axis.text = element_text(size = 15, color = "Black"),
axis.title = element_text(size=15, color="Black"),
legend.title = element_text(size = 18),
plot.title = element_text(size = 18)) +
scale_x_continuous(breaks=pretty_breaks(n=4), limits=c(0,3))
ggsave("Figures/Homer WD Specific DMR TF Motif Enrichment.png", dpi = 600, width = 7, height = 5, units = "in")
# Volcano Plot Hyper DMRs
topHyper <- data.frame(TF=homerHyper$TF, FoldEnrichment=homerHyper$FoldEnrichment, LogQvalue=homerHyper$LogQvalue)
topHyper <- subset(topHyper, rank(-topHyper$LogQvalue) <= 20)
topHyper$TF <- as.character(topHyper$TF)
topHyper$TF[topHyper$TF == "Erra"] <- "ERRa"
topHyper$TF[topHyper$TF == "Rfx6"] <- "RFX6"
topHyper$TF[topHyper$TF == "Foxa2"] <- "FOXA2"
topHyper$TF[topHyper$TF == "Foxa3"] <- "FOXA3"
topHyper$TF[topHyper$TF == "Nr5a2"] <- "LRH1"
topHyper$TF[topHyper$TF == "Tlx?"] <- "TLX"
topHyper$TF[topHyper$TF == "Esrrb"] <- "ESRRb"
topHyper$TF
#[1] "HNF4a" "NF1-halfsite" "ERRa" "THRb" "NF1" "FOXA2" "COUP-TFII" "FOXM1"
#[9] "ZNF711" "FOXA1" "RXR" "PPARE" "ZNF322" "ZFX" "FOXA3" "FOXA1"
#[17] "LRH1" "TLX" "RFX6" "ESRRb"
gg <- ggplot(data = homerHyper)
gg +
geom_point(aes(x = FoldEnrichment, y = LogQvalue), color="#3366CC", size=2) +
geom_text(data=topHyper, aes(x=FoldEnrichment, y=LogQvalue, label=TF),
size=3.5, check_overlap = TRUE, hjust=0, nudge_x=0.05, nudge_y=0) +
theme_bw(base_size = 24) +
labs(x="Fold Enrichment", y="-log(q-value)") +
theme(panel.grid.major = element_blank(), panel.border = element_rect(color = "black", size = 1.25),
axis.ticks = element_line(size = 1.25), legend.key = element_blank(),
panel.grid.minor = element_blank(), legend.position = c(1.2, 0.84),
legend.background = element_blank(),
plot.margin = unit(c(1,1,1,1), "lines"),
axis.text = element_text(size = 15, color = "Black"),
axis.title = element_text(size=15, color="Black"),
legend.title = element_text(size = 18),
plot.title = element_text(size = 18)) +
scale_x_continuous(breaks=pretty_breaks(n=4), limits=c(0,3))
ggsave("Figures/Homer WD Specific Hyper DMR TF Motif Enrichment.png", dpi = 600, width = 7, height = 5, units = "in")
# Volcano Plot Hypo DMRs
topHypo <- data.frame(TF=homerHypo$TF, FoldEnrichment=homerHypo$FoldEnrichment, LogQvalue=homerHypo$LogQvalue)
topHypo <- subset(topHypo, rank(-topHypo$LogQvalue) <= 20)
topHypo$TF <- as.character(topHypo$TF)
topHypo$TF[topHypo$TF == "Atf3"] <- "ATF3"
topHypo$TF[topHypo$TF == "Ets1-distal"] <- "ETS1-distal"
topHypo$TF[topHypo$TF == "Etv2"] <- "ETV2"
topHypo$TF[topHypo$TF == "Fli1"] <- "FLI1"
topHypo$TF[topHypo$TF == "Fosl2"] <- "FOSL2"
topHypo$TF[topHypo$TF == "Fra1"] <- "FRA1"
topHypo$TF[topHypo$TF == "Fra2"] <- "FRA2"
topHypo$TF[topHypo$TF == "Jun-AP1"] <- "JUN-AP1"
topHypo$TF[topHypo$TF == "JunB"] <- "JUNB"
topHypo$TF
# [1] "ETS1" "ERG" "ETV2" "FLI1" "ETV1" "EHF" "GABPA"
# [8] "EWS:ERG-fusion" "ELF3" "EWS:FLI1-fusion" "ETS1-distal" "PU.1" "FRA2" "BATF"
# [15] "FRA1" "AP-1" "JUNB" "FOSL2" "ATF3" "JUN-AP1"
table(topHypo$TF %in% top$TF) #6
table(topHyper$TF %in% top$TF) #12
table(topHypo$TF %in% topHyper$TF) #0
enrichedAll <- subset(homerAll, qValue < 1e-10) #50 / 364
enrichedHyper <- subset(homerHyper, qValue < 1e-10) #28 / 364
enrichedHypo <- subset(homerHypo, qValue < 1e-10) #25 / 364
table(enrichedAll$MotifName %in% enrichedHyper$MotifName) #20
table(enrichedAll$MotifName %in% enrichedHypo$MotifName) #22
table(enrichedHyper$MotifName %in% enrichedHypo$MotifName) #0 None Overlap between hyper and hypo
gg <- ggplot(data = homerHypo)
gg +
geom_point(aes(x = FoldEnrichment, y = LogQvalue), color="#3366CC", size=2) +
geom_text(data=topHypo, aes(x=FoldEnrichment, y=LogQvalue, label=TF),
size=3.5, check_overlap = TRUE, hjust=0, nudge_x=0.05, nudge_y=0) +
theme_bw(base_size = 24) +
labs(x="Fold Enrichment", y="-log(q-value)") +
theme(panel.grid.major = element_blank(), panel.border = element_rect(color = "black", size = 1.25),
axis.ticks = element_line(size = 1.25), legend.key = element_blank(),
panel.grid.minor = element_blank(), legend.position = c(1.2, 0.84),
legend.background = element_blank(),
plot.margin = unit(c(1,1,1,1), "lines"),
axis.text = element_text(size = 15, color = "Black"),
axis.title = element_text(size=15, color="Black"),
legend.title = element_text(size = 18),
plot.title = element_text(size = 18)) #+
#scale_x_continuous(breaks=pretty_breaks(n=4), limits=c(0,3))
ggsave("Figures/Homer WD Specific Hypo DMR TF Motif Enrichment.png", dpi = 600, width = 7, height = 5, units = "in")
# Scatter Plot Hyper vs Hypo DMR log q-value
LogQ <- data.frame(TF=homerAll$TF, LogQvalueHyper=homerHyper$LogQvalue, LogQvalueHypo=homerHypo$LogQvalue)
topLogQ <- subset(LogQ, rank(-LogQ$LogQvalueHyper) <= 10 | rank(-LogQ$LogQvalueHypo) <= 10)
topLogQ$TF <- as.character(topLogQ$TF)
topLogQ$TF[topLogQ$TF == "Erra"] <- "ERRa"
topLogQ$TF[topLogQ$TF == "Foxa2"] <- "FOXA2"
topLogQ$TF[topLogQ$TF == "Etv2"] <- "ETV2"
topLogQ$TF[topLogQ$TF == "Fli1"] <- "FLI1"
topLogQ$TF
# [1] "COUP-TFII" "EHF" "ELF3" "ERG" "ERRa" "ETS1" "ETV1"
# [8] "ETV2" "EWS:ERG-fusion" "EWS:FLI1-fusion" "FLI1" "FOXA1" "FOXA2" "FOXM1"
# [15] "GABPA" "HNF4a" "NF1-halfsite" "NF1" "THRb" "ZNF711"
gg <- ggplot(LogQ)
gg +
geom_point(aes(x = LogQvalueHyper, y = LogQvalueHypo), color="#3366CC", size=2) +
geom_text(data=topLogQ, aes(x=LogQvalueHyper, y=LogQvalueHypo, label=TF),
size=3.5, check_overlap = TRUE, hjust=0, nudge_x=0.5, nudge_y=0) +
theme_bw(base_size = 24) +
labs(x="Hyper DMR -log(q-value)", y="Hypo DMR -log(q-value)") +
theme(panel.grid.major = element_blank(), panel.border = element_rect(color = "black", size = 1.25),
axis.ticks = element_line(size = 1.25), legend.key = element_blank(),
panel.grid.minor = element_blank(), legend.position = c(1.2, 0.84),
legend.background = element_blank(),
plot.margin = unit(c(1,1,1,1), "lines"),
axis.text = element_text(size = 15, color = "Black"),
axis.title = element_text(size=15, color="Black"),
legend.title = element_text(size = 18),
plot.title = element_text(size = 18)) +
coord_cartesian(xlim=c(0,46), ylim=c(0,46))
ggsave("Figures/Homer WD Specific Hyper vs Hypo DMR TF Motif logqvalue.png", dpi = 600, width = 7, height = 5, units = "in")
# LOLA Enriched TFs Hyper DMRs
lolaTFs <- c("HNF4a", "RXR", "Foxa2", "FOXA1", "Max", "Atf3", "JunD", "REST-NRSF", "YY1", "COUP-TFII", "GABPA", "ZBTB33", "CTCF")
homerHyper_lola <- subset(homerHyper, TF %in% lolaTFs)
# Atf3 COUP-TFII CTCF FOXA1 FOXA1 Foxa2 GABPA HNF4a JunD Max REST-NRSF RXR YY1
# ZBTB33
homerHyper_lola <- subset(homerHyper_lola, !(TF == "FOXA1" & CellType == "MCF7")) #Remove duplicate FOXA1
homerHyper_lola$TF <- as.character(homerHyper_lola$TF)
homerHyper_lola$TF[homerHyper_lola$TF == "HNF4a"] <- "HNF4A"
homerHyper_lola$TF[homerHyper_lola$TF == "Foxa2"] <- "FOXA2"
homerHyper_lola$TF[homerHyper_lola$TF == "COUP-TFII"] <- "NR2F2"
homerHyper_lola$TF[homerHyper_lola$TF =="RXR"] <- "RXRA"
homerHyper_lola$TF[homerHyper_lola$TF =="Max"] <- "MAX"
homerHyper_lola$TF[homerHyper_lola$TF =="JunD"] <- "JUND"
homerHyper_lola$TF[homerHyper_lola$TF =="Atf3"] <- "ATF3"
homerHyper_lola$TF[homerHyper_lola$TF =="REST-NRSF"] <- "REST"
homerHyper_lola <- homerHyper_lola[order(homerHyper_lola$LogQvalue),]
homerHyper_lola$TF <- factor(homerHyper_lola$TF, levels=unique(homerHyper_lola$TF), ordered=TRUE)
gg <- ggplot(data = homerHyper_lola)
gg +
geom_col(aes(x = TF, y = LogQvalue), color="#3366CC", fill="white", size=1.2, position="dodge") +
coord_flip(ylim=c(0,45)) +
theme_bw(base_size = 24) +
labs(y="-log(q-value)") +
theme(panel.grid.major = element_blank(), panel.border = element_rect(color = "black", size = 1.25),
axis.ticks = element_line(size = 1.25), legend.key = element_blank(),
panel.grid.minor = element_blank(), legend.position = c(1.2, 0.84),
legend.background = element_blank(),
plot.margin = unit(c(1,1,1,1), "lines"),
axis.text = element_text(size = 15, color = "Black"),
axis.title.x = element_text(size=15, color="Black"),
axis.title.y = element_blank(),
legend.title = element_text(size = 18),
plot.title = element_text(size = 18)) +
scale_y_continuous(breaks=pretty_breaks(n=4))
ggsave("Figures/Homer WD Specific Hyper DMR LOLA Liver TF Motif Enrichment.png", dpi = 600, width = 5, height = 7, units = "in")
# LOLA Enriched TFs Hypo DMRs
lolaTFs <- c("HNF4a", "RXR", "Foxa2", "FOXA1", "Max", "Atf3", "JunD", "REST-NRSF", "YY1", "COUP-TFII", "GABPA", "ZBTB33", "CTCF")
homerHypo_lola <- subset(homerHypo, TF %in% lolaTFs)
# Atf3 COUP-TFII CTCF FOXA1 FOXA1 Foxa2 GABPA HNF4a JunD Max REST-NRSF RXR YY1
# ZBTB33
homerHypo_lola <- subset(homerHypo_lola, !(TF == "FOXA1" & CellType == "MCF7")) #Remove duplicate FOXA1
homerHypo_lola$TF <- as.character(homerHypo_lola$TF)
homerHypo_lola$TF[homerHypo_lola$TF == "HNF4a"] <- "HNF4A"
homerHypo_lola$TF[homerHypo_lola$TF == "Foxa2"] <- "FOXA2"
homerHypo_lola$TF[homerHypo_lola$TF == "COUP-TFII"] <- "NR2F2"
homerHypo_lola$TF[homerHypo_lola$TF =="RXR"] <- "RXRA"
homerHypo_lola$TF[homerHypo_lola$TF =="Max"] <- "MAX"
homerHypo_lola$TF[homerHypo_lola$TF =="JunD"] <- "JUND"
homerHypo_lola$TF[homerHypo_lola$TF =="Atf3"] <- "ATF3"
homerHypo_lola$TF[homerHypo_lola$TF =="REST-NRSF"] <- "REST"
homerHypo_lola <- homerHypo_lola[match(homerHyper_lola$TF, homerHypo_lola$TF),]
homerHypo_lola$TF <- factor(homerHypo_lola$TF, levels=unique(homerHypo_lola$TF), ordered=TRUE)
gg <- ggplot(data = homerHypo_lola)
gg +
geom_col(aes(x = TF, y = LogQvalue), color="#3366CC", fill="white", size=1.2, position="dodge") +
coord_flip(ylim=c(0,45)) +
theme_bw(base_size = 24) +
labs(y="-log(q-value)") +
theme(panel.grid.major = element_blank(), panel.border = element_rect(color = "black", size = 1.25),
axis.ticks.x = element_line(size = 1.25), legend.key = element_blank(), axis.ticks.y=element_blank(),
panel.grid.minor = element_blank(), legend.position = c(1.2, 0.84),
legend.background = element_blank(),
plot.margin = unit(c(1,1,1,1), "lines"),
axis.text = element_text(size = 15, color = "Black"),
axis.title.x = element_text(size=15, color="Black"),
axis.title.y = element_blank(),
legend.title = element_text(size = 18),
plot.title = element_text(size = 18)) +
scale_y_continuous(breaks=pretty_breaks(n=4))
ggsave("Figures/Homer WD Specific Hypo DMR LOLA Liver TF Motif Enrichment.png", dpi = 600, width = 5, height = 7, units = "in")
|
library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/upper_aerodigestive_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.3,family="gaussian",standardize=TRUE)
sink('./Model/EN/Lasso/upper_aerodigestive_tract/upper_aerodigestive_tract_043.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Lasso/upper_aerodigestive_tract/upper_aerodigestive_tract_043.R | no_license | leon1003/QSMART | R | false | false | 407 | r | library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/upper_aerodigestive_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.3,family="gaussian",standardize=TRUE)
sink('./Model/EN/Lasso/upper_aerodigestive_tract/upper_aerodigestive_tract_043.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
library(readr)
library(dplyr)
library(data.table)
# load in FNN outliers
fnn_outliers <- as.character(read.csv("thesis/outliers.csv")$x)
##################################
############## LIAR ##############
##################################
loadLIARTrain <- function() {
## load train data
train<-fread(file="../data/LIAR/dataset/train.TSV",sep = '\t', quote="", header = FALSE) %>%
as_tibble()
header<-c("ID","label","text","subject","speaker","speaker.title","state",
"party","bt.count","f.count","ht.count","mt.count","pof.count","context")
names(train)<-header
## reorder and number label
train$label <- as.factor(train$label)
labels<-c("pants-fire","false","barely-true","half-true","mostly-true","true")
for (num in 6:1) {
train$label <- relevel(train$label,labels[num])
}
return(train)
}
loadLIARTest <- function() {
## load train data
test<-fread(file="data/LIAR/dataset/test.TSV",sep = '\t', quote="", header = FALSE) %>%
as_tibble()
header<-c("ID","label","text","subject","speaker","speaker.title","state",
"party","bt.count","f.count","ht.count","mt.count","pof.count","context")
names(test)<-header
## reorder and number label
test$label <- as.factor(test$label)
labels<-c("pants-fire","false","barely-true","half-true","mostly-true","true")
for (num in 6:1) {
test$label <- relevel(test$label,labels[num])
}
return(test)
}
##################################
########## FAKENEWSNET ###########
##################################
#splitDataFNN()
## load FakeNewsNet dataset
splitDataFNN <- function() {
ffn<-fread(file="data/FakeNewsNet/dataset/fnn_data.TSV",sep = '\t', quote="", header = TRUE, encoding="UTF-8") %>%
as_tibble()
train_ind <- sample(seq_len(nrow(ffn)), size = floor(nrow(ffn) * .75))
train <- ffn[train_ind,]
test <- ffn[-train_ind,]
write_tsv(data.frame(train), "FakeNewsNet/dataset/fnn_train.tsv")
write_tsv(data.frame(test), "FakeNewsNet/dataset/fnn_test.tsv")
}
loadFNNTrain <- function() {
train<-fread(file="data/FakeNewsNet/dataset/fnn_train.tsv",sep = '\t', quote="", header = TRUE, encoding="UTF-8") %>%
as_tibble() %>%
mutate(label = as.factor(label)) %>%
filter(!(ID %in% fnn_outliers))
return(train)
}
loadFNNTest <- function() {
test<-fread(file="data/FakeNewsNet/dataset/fnn_test.tsv",sep = '\t', quote="", header = TRUE, encoding="UTF-8") %>%
as_tibble() %>%
mutate(label = as.factor(label))
return(test)
}
loadFNN <- function() {
fnn<-fread(file="data/FakeNewsNet/dataset/fnn_data.TSV",sep = '\t', quote="", header = TRUE, encoding="UTF-8") %>%
as_tibble() %>%
filter(!(ID %in% fnn_outliers))
return(fnn)
}
loadFNNComplexity <- function(str){
path = paste("features/fnn_",str,"_complexity.tsv", sep = '')
dat_complexity <- fread(file=path,sep = '\t', quote="", header = TRUE, encoding="UTF-8") %>%
as_tibble() %>%
mutate(ID = as.character(ID), label = as.factor(label)) %>%
dplyr::select(-text) %>%
filter(!(ID %in% fnn_outliers))
return(dat_complexity)
}
loadLIWCGroups <- function(){
LIWC_groups <- tibble(
WC = "summary",
Analytic = "summary",
Clout = "summary",
Authentic = "summary",
Tone = "summary",
WPS = "summary",
Sixltr = "summary",
Dic = "summary",
`function` = "function",
pronoun = "function",
ppron = "function",
i = "function",
we = "function",
you = "function",
shehe = "function",
they = "function",
ipron = "function",
article = "function",
prep = "function",
auxverb = "function",
adverb = "function",
conj = "function",
negate = "function",
verb = "othergram",
adj = "othergram",
compare = "othergram",
interrog = "othergram",
number = "othergram",
quant = "othergram",
affect = "affect",
posemo = "affect",
negemo = "affect",
anx = "affect",
anger = "affect",
sad = "affect",
social = "social",
family = "social",
friend = "social",
female = "social",
male = "social",
cogproc = "cogproc",
insight = "cogproc",
cause = "cogproc",
discrep = "cogproc",
tentat = "cogproc",
certain = "cogproc",
differ = "cogproc",
percept = "percept",
see = "percept",
hear = "percept",
feel = "percept",
bio = "bio",
body = "bio",
health = "bio",
sexual = "bio",
ingest = "bio",
drives = "drives",
affiliation = "drives",
achieve = "drives",
power = "drives",
reward = "drives",
risk = "drives",
focuspast = "timeorient",
focuspresent = "timeorient",
focusfuture = "timeorient",
relativ = "relativ",
motion = "relativ",
space = "relativ",
time = "relativ",
work = "personc",
leisure = "personc",
home = "personc",
money = "personc",
relig = "personc",
death = "personc",
informal = "informal",
swear = "informal",
netspeak = "informal",
assent = "informal",
nonflu = "informal",
filler = "informal",
AllPunc = "punc",
Period = "punc",
Comma = "punc",
Colon = "punc",
SemiC = "punc",
QMark = "punc",
Exclam = "punc",
Dash = "punc",
Quote = "punc",
Apostro = "punc",
Parenth = "punc",
OtherP = "punc"
) %>%
gather(var, group)
return(LIWC_groups)
}
loadFNNLIWC <- function(str){
path = paste("annotations/LIWC/LIWC2015_fnn_",str,".csv", sep = '')
dat_LIWC<-fread(file=path,header = TRUE, encoding="UTF-8") %>%
as_tibble() %>%
mutate(ID = as.character(A), label = as.factor(B), title = as.character(C), text = as.character(D)) %>%
dplyr::select(ID, label, -title, -text, WC:OtherP) %>%
filter(!(ID %in% fnn_outliers))
return(dat_LIWC)
}
loadFNNPOS <- function(str){
path = paste("annotations/coreNLP/fnn_",str,"_POS.tsv", sep = '')
dat_POS <- fread(file=path,sep = '\t', quote="", header = TRUE, encoding="UTF-8") %>%
as_tibble() %>%
mutate(ID = as.character(ID), label = as.factor(label)) %>%
dplyr::select(ID, label, everything()) %>%
filter(!(ID %in% fnn_outliers))
return(dat_POS)
}
loadFNNNER <- function(str){
path = paste("annotations/coreNLP/fnn_",str,"_NER.tsv", sep = '')
dat_NER <- fread(file=path,sep = '\t', quote="", header = TRUE, encoding="UTF-8") %>%
as_tibble() %>%
mutate(ID = as.character(ID), label=as.factor(label)) %>%
dplyr::select(ID, label, everything()) %>%
filter(!(ID %in% fnn_outliers))
return(dat_NER)
}
loadFNNtxtfeat <- function(str){
# load complexity
temp_complexity <- loadFNNComplexity(str)
# load LWIC
temp_LIWC<-loadFNNLIWC(str)
# load POS
temp_POS <- loadFNNPOS(str)
# load NER
temp_NER <- loadFNNNER(str)
# merge
temp_txtfeat <- temp_complexity %>%
left_join(temp_LIWC, by = c("ID", "label")) %>%
left_join(temp_POS, by = c("ID", "label")) %>%
left_join(temp_NER, by = c("ID", "label")) %>%
mutate(label = as.factor(2 - unclass(label))) %>%
distinct(ID, .keep_all= TRUE)
return(temp_txtfeat)
}
## write LIAR to fastText format
# file.create("LIAR/train.txt")
# #out<-file("liar_dataset/train.txt")
# for (i in 1:nrow(train)) {
# line<-paste("__label__", unclass(train$label[i]), " ",train$statement[i], sep="")
# write(line,file="LIAR/train.txt",append=TRUE)
# }
#close(out)
## write LIAR to output
# file.create("LIAR/test.txt")
# #out<-file("liar_dataset/test.txt")
# for (i in 1:nrow(test)) {
# line<-paste("__label__", unclass(test$label[i]), " ",test$statement[i], sep="")
# write(line,file="LIAR/test.txt",append=TRUE)
# }
#close(out)
## sentiment analysis
#library("sentimentr")
#test <- get_sentences(as.character(train$statement))
# library("syuzhet")
# sentiments<-get_nrc_sentiment(as.character(train$statement))
# train<-cbind(train,sentiments)
# #logistic
# mod.logit<-glm(label~anger+anticipation+disgust+fear+joy+sadness+surprise+
# trust+negative+positive,data=train,family = binomial(link = "logit"))
#
# summary(mod.logit)
# library(pscl)
# pR2(mod.logit)
# #linear
# mod.lm<-lm(unclass(label)~anger+anticipation+disgust+fear+joy+sadness+surprise+
# trust+negative+positive,data=train)
# summary(mod.lm)
| /helpers/loaddata.R | permissive | CaioBrighenti/fake-news | R | false | false | 8,301 | r | library(readr)
library(dplyr)
library(data.table)
# load in FNN outliers
fnn_outliers <- as.character(read.csv("thesis/outliers.csv")$x)
##################################
############## LIAR ##############
##################################
loadLIARTrain <- function() {
## load train data
train<-fread(file="../data/LIAR/dataset/train.TSV",sep = '\t', quote="", header = FALSE) %>%
as_tibble()
header<-c("ID","label","text","subject","speaker","speaker.title","state",
"party","bt.count","f.count","ht.count","mt.count","pof.count","context")
names(train)<-header
## reorder and number label
train$label <- as.factor(train$label)
labels<-c("pants-fire","false","barely-true","half-true","mostly-true","true")
for (num in 6:1) {
train$label <- relevel(train$label,labels[num])
}
return(train)
}
loadLIARTest <- function() {
## load train data
test<-fread(file="data/LIAR/dataset/test.TSV",sep = '\t', quote="", header = FALSE) %>%
as_tibble()
header<-c("ID","label","text","subject","speaker","speaker.title","state",
"party","bt.count","f.count","ht.count","mt.count","pof.count","context")
names(test)<-header
## reorder and number label
test$label <- as.factor(test$label)
labels<-c("pants-fire","false","barely-true","half-true","mostly-true","true")
for (num in 6:1) {
test$label <- relevel(test$label,labels[num])
}
return(test)
}
##################################
########## FAKENEWSNET ###########
##################################
#splitDataFNN()
## load FakeNewsNet dataset
splitDataFNN <- function() {
ffn<-fread(file="data/FakeNewsNet/dataset/fnn_data.TSV",sep = '\t', quote="", header = TRUE, encoding="UTF-8") %>%
as_tibble()
train_ind <- sample(seq_len(nrow(ffn)), size = floor(nrow(ffn) * .75))
train <- ffn[train_ind,]
test <- ffn[-train_ind,]
write_tsv(data.frame(train), "FakeNewsNet/dataset/fnn_train.tsv")
write_tsv(data.frame(test), "FakeNewsNet/dataset/fnn_test.tsv")
}
loadFNNTrain <- function() {
train<-fread(file="data/FakeNewsNet/dataset/fnn_train.tsv",sep = '\t', quote="", header = TRUE, encoding="UTF-8") %>%
as_tibble() %>%
mutate(label = as.factor(label)) %>%
filter(!(ID %in% fnn_outliers))
return(train)
}
loadFNNTest <- function() {
test<-fread(file="data/FakeNewsNet/dataset/fnn_test.tsv",sep = '\t', quote="", header = TRUE, encoding="UTF-8") %>%
as_tibble() %>%
mutate(label = as.factor(label))
return(test)
}
loadFNN <- function() {
fnn<-fread(file="data/FakeNewsNet/dataset/fnn_data.TSV",sep = '\t', quote="", header = TRUE, encoding="UTF-8") %>%
as_tibble() %>%
filter(!(ID %in% fnn_outliers))
return(fnn)
}
loadFNNComplexity <- function(str){
path = paste("features/fnn_",str,"_complexity.tsv", sep = '')
dat_complexity <- fread(file=path,sep = '\t', quote="", header = TRUE, encoding="UTF-8") %>%
as_tibble() %>%
mutate(ID = as.character(ID), label = as.factor(label)) %>%
dplyr::select(-text) %>%
filter(!(ID %in% fnn_outliers))
return(dat_complexity)
}
loadLIWCGroups <- function(){
LIWC_groups <- tibble(
WC = "summary",
Analytic = "summary",
Clout = "summary",
Authentic = "summary",
Tone = "summary",
WPS = "summary",
Sixltr = "summary",
Dic = "summary",
`function` = "function",
pronoun = "function",
ppron = "function",
i = "function",
we = "function",
you = "function",
shehe = "function",
they = "function",
ipron = "function",
article = "function",
prep = "function",
auxverb = "function",
adverb = "function",
conj = "function",
negate = "function",
verb = "othergram",
adj = "othergram",
compare = "othergram",
interrog = "othergram",
number = "othergram",
quant = "othergram",
affect = "affect",
posemo = "affect",
negemo = "affect",
anx = "affect",
anger = "affect",
sad = "affect",
social = "social",
family = "social",
friend = "social",
female = "social",
male = "social",
cogproc = "cogproc",
insight = "cogproc",
cause = "cogproc",
discrep = "cogproc",
tentat = "cogproc",
certain = "cogproc",
differ = "cogproc",
percept = "percept",
see = "percept",
hear = "percept",
feel = "percept",
bio = "bio",
body = "bio",
health = "bio",
sexual = "bio",
ingest = "bio",
drives = "drives",
affiliation = "drives",
achieve = "drives",
power = "drives",
reward = "drives",
risk = "drives",
focuspast = "timeorient",
focuspresent = "timeorient",
focusfuture = "timeorient",
relativ = "relativ",
motion = "relativ",
space = "relativ",
time = "relativ",
work = "personc",
leisure = "personc",
home = "personc",
money = "personc",
relig = "personc",
death = "personc",
informal = "informal",
swear = "informal",
netspeak = "informal",
assent = "informal",
nonflu = "informal",
filler = "informal",
AllPunc = "punc",
Period = "punc",
Comma = "punc",
Colon = "punc",
SemiC = "punc",
QMark = "punc",
Exclam = "punc",
Dash = "punc",
Quote = "punc",
Apostro = "punc",
Parenth = "punc",
OtherP = "punc"
) %>%
gather(var, group)
return(LIWC_groups)
}
loadFNNLIWC <- function(str){
path = paste("annotations/LIWC/LIWC2015_fnn_",str,".csv", sep = '')
dat_LIWC<-fread(file=path,header = TRUE, encoding="UTF-8") %>%
as_tibble() %>%
mutate(ID = as.character(A), label = as.factor(B), title = as.character(C), text = as.character(D)) %>%
dplyr::select(ID, label, -title, -text, WC:OtherP) %>%
filter(!(ID %in% fnn_outliers))
return(dat_LIWC)
}
loadFNNPOS <- function(str){
path = paste("annotations/coreNLP/fnn_",str,"_POS.tsv", sep = '')
dat_POS <- fread(file=path,sep = '\t', quote="", header = TRUE, encoding="UTF-8") %>%
as_tibble() %>%
mutate(ID = as.character(ID), label = as.factor(label)) %>%
dplyr::select(ID, label, everything()) %>%
filter(!(ID %in% fnn_outliers))
return(dat_POS)
}
loadFNNNER <- function(str){
path = paste("annotations/coreNLP/fnn_",str,"_NER.tsv", sep = '')
dat_NER <- fread(file=path,sep = '\t', quote="", header = TRUE, encoding="UTF-8") %>%
as_tibble() %>%
mutate(ID = as.character(ID), label=as.factor(label)) %>%
dplyr::select(ID, label, everything()) %>%
filter(!(ID %in% fnn_outliers))
return(dat_NER)
}
loadFNNtxtfeat <- function(str){
# load complexity
temp_complexity <- loadFNNComplexity(str)
# load LWIC
temp_LIWC<-loadFNNLIWC(str)
# load POS
temp_POS <- loadFNNPOS(str)
# load NER
temp_NER <- loadFNNNER(str)
# merge
temp_txtfeat <- temp_complexity %>%
left_join(temp_LIWC, by = c("ID", "label")) %>%
left_join(temp_POS, by = c("ID", "label")) %>%
left_join(temp_NER, by = c("ID", "label")) %>%
mutate(label = as.factor(2 - unclass(label))) %>%
distinct(ID, .keep_all= TRUE)
return(temp_txtfeat)
}
## write LIAR to fastText format
# file.create("LIAR/train.txt")
# #out<-file("liar_dataset/train.txt")
# for (i in 1:nrow(train)) {
# line<-paste("__label__", unclass(train$label[i]), " ",train$statement[i], sep="")
# write(line,file="LIAR/train.txt",append=TRUE)
# }
#close(out)
## write LIAR to output
# file.create("LIAR/test.txt")
# #out<-file("liar_dataset/test.txt")
# for (i in 1:nrow(test)) {
# line<-paste("__label__", unclass(test$label[i]), " ",test$statement[i], sep="")
# write(line,file="LIAR/test.txt",append=TRUE)
# }
#close(out)
## sentiment analysis
#library("sentimentr")
#test <- get_sentences(as.character(train$statement))
# library("syuzhet")
# sentiments<-get_nrc_sentiment(as.character(train$statement))
# train<-cbind(train,sentiments)
# #logistic
# mod.logit<-glm(label~anger+anticipation+disgust+fear+joy+sadness+surprise+
# trust+negative+positive,data=train,family = binomial(link = "logit"))
#
# summary(mod.logit)
# library(pscl)
# pR2(mod.logit)
# #linear
# mod.lm<-lm(unclass(label)~anger+anticipation+disgust+fear+joy+sadness+surprise+
# trust+negative+positive,data=train)
# summary(mod.lm)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R
\name{nElements}
\alias{nElements}
\title{Number of elements}
\usage{
nElements(object)
}
\arguments{
\item{object}{Object to be coerced or tested.}
}
\value{
A numeric value with the number of elements.
}
\description{
Check the number of elements of the TidySet.
}
\examples{
TS <- tidySet(list(A = letters[1:2], B = letters[5:7]))
nElements(TS)
}
\seealso{
Other count functions:
\code{\link{nRelations}()},
\code{\link{nSets}()}
Other methods:
\code{\link{TidySet-class}},
\code{\link{activate}()},
\code{\link{add_column}()},
\code{\link{add_relation}()},
\code{\link{arrange.TidySet}()},
\code{\link{cartesian}()},
\code{\link{complement_element}()},
\code{\link{complement_set}()},
\code{\link{complement}()},
\code{\link{element_size}()},
\code{\link{elements}()},
\code{\link{filter.TidySet}()},
\code{\link{group_by.TidySet}()},
\code{\link{group}()},
\code{\link{incidence}()},
\code{\link{intersection}()},
\code{\link{is.fuzzy}()},
\code{\link{is_nested}()},
\code{\link{move_to}()},
\code{\link{mutate.TidySet}()},
\code{\link{nRelations}()},
\code{\link{nSets}()},
\code{\link{name_elements<-}()},
\code{\link{name_sets<-}()},
\code{\link{name_sets}()},
\code{\link{power_set}()},
\code{\link{pull.TidySet}()},
\code{\link{relations}()},
\code{\link{remove_column}()},
\code{\link{remove_element}()},
\code{\link{remove_relation}()},
\code{\link{remove_set}()},
\code{\link{rename_elements}()},
\code{\link{rename_set}()},
\code{\link{select.TidySet}()},
\code{\link{set_size}()},
\code{\link{sets}()},
\code{\link{subtract}()},
\code{\link{union}()}
}
\concept{count functions}
\concept{methods}
| /man/nElements.Rd | permissive | ropensci/BaseSet | R | false | true | 1,707 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R
\name{nElements}
\alias{nElements}
\title{Number of elements}
\usage{
nElements(object)
}
\arguments{
\item{object}{Object to be coerced or tested.}
}
\value{
A numeric value with the number of elements.
}
\description{
Check the number of elements of the TidySet.
}
\examples{
TS <- tidySet(list(A = letters[1:2], B = letters[5:7]))
nElements(TS)
}
\seealso{
Other count functions:
\code{\link{nRelations}()},
\code{\link{nSets}()}
Other methods:
\code{\link{TidySet-class}},
\code{\link{activate}()},
\code{\link{add_column}()},
\code{\link{add_relation}()},
\code{\link{arrange.TidySet}()},
\code{\link{cartesian}()},
\code{\link{complement_element}()},
\code{\link{complement_set}()},
\code{\link{complement}()},
\code{\link{element_size}()},
\code{\link{elements}()},
\code{\link{filter.TidySet}()},
\code{\link{group_by.TidySet}()},
\code{\link{group}()},
\code{\link{incidence}()},
\code{\link{intersection}()},
\code{\link{is.fuzzy}()},
\code{\link{is_nested}()},
\code{\link{move_to}()},
\code{\link{mutate.TidySet}()},
\code{\link{nRelations}()},
\code{\link{nSets}()},
\code{\link{name_elements<-}()},
\code{\link{name_sets<-}()},
\code{\link{name_sets}()},
\code{\link{power_set}()},
\code{\link{pull.TidySet}()},
\code{\link{relations}()},
\code{\link{remove_column}()},
\code{\link{remove_element}()},
\code{\link{remove_relation}()},
\code{\link{remove_set}()},
\code{\link{rename_elements}()},
\code{\link{rename_set}()},
\code{\link{select.TidySet}()},
\code{\link{set_size}()},
\code{\link{sets}()},
\code{\link{subtract}()},
\code{\link{union}()}
}
\concept{count functions}
\concept{methods}
|
## Function for qunatification of Line Types
## First calculating the total length of line type per Aggregation area, write those to table
## secondly calculation of ratio of line Type against all selected line Types
##
## Option for selection of which line types to calc or to choose ALL the lines
library(RPostgreSQL)
library(rgdal)
library(RODBC)
##
## setting helper functions
##
########## FUNCTION ##########
## writing intersection table
##
## IMPORTANT NOTICE.Personal check WHERE clause if switching between urmo and localhost
## Additionally the setting of the WHERE col LIKE hw_ is necessary for calc of all, not for calc of bike ratio yes|no
##
# ####### Debugguing: this is working for a reduced lineNetwork the Intersect table is written correctly
# createInterSecTable( connection, "_qntfyLine_lines_R_test",
# "urmo.abz", "abz_id", "the_geom",
# "_qntfyLine_lines_test", "osm_type", "shape")
#
# ### testing for the name_id column
# createInterSecTable( connection, "_qntfyLine_lines_R_test",
# "urmo.bz", "bz_name", "the_geom",
# "_qntfyLine_lines_test", "osm_type", "shape")
#
#
#
# ### testing for the whole lineNetwork; it itersected perfectly.
# createInterSecTable( connection, "_qntfyLine_lines_R_test_II",
# "urmo.bz", "bz_id", "the_geom",
# "osm.berlin_network", "osm_type", "shape")
createInterSecTable <- function( connection,
resultTable_name,
Agg_Area, id_column, Agg_geom,
Ex_Area, label_column, Ex_geom
)
{
intersectTable <- dbGetQuery(connection, sprintf(
"DROP TABLE IF EXISTS %s_InterSec;
SELECT * INTO %s_InterSec FROM
(SELECT
row_number() over (order by 1) as key,
Agg_Area.%s AS Agg_ID,
Ex_Area.%s AS LineType,
ST_Multi(ST_Intersection(Agg_Area.%s, ST_Transform(Ex_Area.%s, 25833)))::geometry(multiLineString, 25833) as geom
FROM
%s AS Agg_Area
LEFT JOIN %s AS Ex_Area
ON (ST_INTERSECTS(Agg_Area.%s, ST_Transform(Ex_Area.%s, 25833)))
WHERE
ST_isValid(Agg_Area.%s) = TRUE AND ST_isValid(ST_Transform(Ex_Area.%s, 25833)) = TRUE
) as foo;
ALTER TABLE %s_InterSec ADD PRIMARY KEY (key)
;"
,
resultTable_name, ## Drop
resultTable_name, ## SLECT INTO
id_column, ## Agg_Area -- column with the unique Agg_Area_ID e.g. PLR-id
label_column, ## label_column. -- column with linetype specification
Agg_geom, Ex_geom, ## ST_Multi -- geometry columns of both Agg and Ex objects
Agg_Area, ## FROM -- table containing the Aggreation Area geometries
Ex_Area, ## LEFT JOIN -- table containing the Examination Object geometries and information here: lineTypes
Agg_geom, Ex_geom, ## ON -- geometrie columns of both Agg and Ex objects
#label_column, "highway%", ## WHERE -- type of Line and query for highway in its description --> its an OSM-special
Agg_geom, Ex_geom, ## WHERE -- geometrie columns of both Agg and Ex objects
resultTable_name ## Add primary key
))
return(intersectTable)
}
#--Ex_Area.%s LIKE '%s' AND
#qntfyLines(con, Agg_Area, id_column, Agg_geom, Ex_Area, label_column, Ex_geom)
#
#
########## FUNCTION ##########
## getting the vector of dictinct variables from Agg_Area table
## reminder: switch WHERE linetype to 'highway%' for OSM data, otherwise no WHERE is needed
getVDist <- function (
connection,
resultTable_name
)
{
VDistdf <- dbGetQuery(connection, sprintf(
"SELECT DISTINCT Linetype
FROM %s_InterSec
;"
,
resultTable_name
))
VDist <- VDistdf[,1]
return(VDist)
}
## DEBUGGING - working
#vDist <- getVDist(con, resultTable_name = "bz_osm")
########## FUNCTION ##########
## writing results table
## create result table with Agg_Area_Id and its geom to select other results into
createResultTable <- function( connection,
resultTable_name, id_column,
Agg_geom, Agg_Area
)
{
dbGetQuery(connection, sprintf(
"DROP TABLE IF EXISTS %s;
SELECT
row_number() over (order by 1) as key,
%s AS Agg_Id,
%s AS geom
INTO %s
FROM %s AS Agg_Area
WHERE ST_isValid(Agg_Area.%s) = TRUE AND ST_isSimple(Agg_Area.%s) = TRUE
;
ALTER TABLE %s ADD PRIMARY KEY (key)
;"
,
resultTable_name, ## DROP If Exists
id_column, ## SELECT #1 -- column with the unique Agg_Area_ID e.g. PLR-id
Agg_geom, ## SELECT #2 -- geometrie columns of Agg_Area
resultTable_name,
Agg_Area, ## FROM -- table containing the Aggreation Area geometries
Agg_geom, Agg_geom,
resultTable_name ## WHERE -- geometry columns of Agg_Area
))
}
## DEBUGGING - working
#createResultTable( con, "bz_osm", "bz_id", "the_geom", "urmo.bz")
########## FUNCTION ##########
## updating a table (create and fill columns)
## containing the lengths of lines per aggregation Area
##
## TROUBLE cannot loop through the necessary VDIST renamed 2 way_whatever and the necessary way.whatever for selection
## of distinct values simultaneously, need to update the loop or else
##
updateTable <- function( connection,
vDist,
resultTable_name
)
{
dbGetQuery(connection, sprintf(
"ALTER TABLE %s DROP COLUMN IF EXISTS sum_%s
;
ALTER TABLE %s ADD COLUMN sum_%s FLOAT
;
UPDATE %s
SET sum_%s = foo.sum_%s
FROM (
SELECT
Agg_ID,
SUM(ST_Length(geom))/1000 AS sum_%s
FROM %s_InterSec
WHERE lineType = '%s'
GROUP BY Agg_ID
ORDER BY Agg_ID
) as foo
WHERE %s.Agg_ID = foo.Agg_ID
;"
,
resultTable_name, gsub('\\.','_',vDist), ## ALTER TABLE DROP
resultTable_name, gsub('\\.','_',vDist), ## ALTER TABLE ADD COl -- vector containing distinct values
resultTable_name, ## UPDATE -- vector containing distinct values
gsub('\\.','_',vDist), gsub('\\.','_',vDist), ## SET -- vector containing distinct values
gsub('\\.','_',vDist), ## SUM -- vector containing distinct values
resultTable_name, ## FROM -- vector containing distinct values
vDist, ## WHERE -1- Linetype
resultTable_name ## WHERE -2- resutlTable -- vector containing distinct values
))
}
## DEBUGGING - working
#for (i in vDist) {updateTable(connection, i, "bz_osm")}
########## FUNCTION ##########
## inserting the total length into Table results
## calc the total length of selected line types
sumLength <- function( connection,
vDist,
resultTable_name
)
{
sumLength <- dbGetQuery(connection, sprintf(
"UPDATE %s
SET sum_length = COALESCE(sum_length,0)+COALESCE(sum_%s,0) -- summation of all values listed in the V(Dist)
;"
,
resultTable_name,
vDist
))
}
## DEBUGGING - working
#addSumLengthCol <- dbGetQuery(connection, sprintf("ALTER TABLE %s DROP COLUMN IF EXISTS sum_length;
# ALTER TABLE %s ADD COLUMN sum_length FLOAT;", "bz_osm", "bz_osm"))
#for (i in vDistName) {sumLength(connection, i, "bz_osm")}
########## FUNCTION ##########
## setting Function for line quantification
ratioLines2Table <- function( connection,
resultTable_name,
vDist
)
{
calcRatios <- dbGetQuery(connection, sprintf(
"ALTER TABLE %s DROP COLUMN IF EXISTS ratio_%s
;
ALTER TABLE %s ADD COLUMN ratio_%s FLOAT
;
UPDATE %s
SET ratio_%s = sum_%s/sum_length
;"
,
resultTable_name, vDist, ## ALTER TABLE DROP COl -- vector containing distinct values
resultTable_name, vDist, ## ALTER TABLE ADD COL -- vector containing distinct values
resultTable_name, ## UPDATE
vDist, vDist ## SET -- vector containing distinct values
))
}
## DEBUGGING -
#for (i in vDistName) {ratioLines2Table(connection, "bz_osm", i)}
########## FUNCTION ##########
## the complete Function
calculateHighwayPercentages <- function( connection,
resultTable_name,
Agg_Area, id_column, in_geom_column,
label_column, mode, Ex_geom
)
{
createInterSecTable(connection, resultTable_name, Agg_Area, id_column, in_geom_column, label_column, mode, Ex_geom)
vDist <- getVDist(connection, resultTable_name)
if(NA %in% vDist){
vDist <- vDist[!is.na(vDist)]
}
vDistName <- gsub('\\.','_',vDist)
createResultTable(connection, resultTable_name, id_column, in_geom_column, Agg_Area)
for (i in vDist) {updateTable(connection, i, resultTable_name)}
addSumLengthCol <- dbGetQuery(connection, sprintf("ALTER TABLE %s DROP COLUMN IF EXISTS sum_length;
ALTER TABLE %s ADD COLUMN sum_length FLOAT;", resultTable_name, resultTable_name))
for (i in vDistName) {sumLength(connection, i, resultTable_name)}
for (i in vDistName) {ratioLines2Table(connection, resultTable_name, i)}
}
## DEBUGGING - with bikeusage YES|NO updated table
# qntfyLinesBike ( con, "a_test_qntyfyLines_Clean_bz_test",
# "urmo.bz", "bz_id", "the_geom",
# "osm.berlin_network", "bikeusage", "shape")
##qntfyLines <- function (connection,result_table_name,Agg_Area,id_column,Agg_geom, Ex_Area,label_column,Ex_geom)
#for bikes -->
#qntfyLinesBike(connection = con, result_table_name = "result_bike_hex_2000", Agg_Area = "grids.hex_2000", id_column = "gid", Agg_geom = "the_geom", Ex_Area = "osm.berlin_network", label_column = "bikeusage", Ex_geom = "shape")
# connection = con
# result_table_name = "result_bike_hex_2000"
# Agg_Area = "grids.hex_2000"
# id_column = "gid"
# Agg_geom = "the_geom"
# Ex_Area = "osm.berlin_network"
# label_column = "bikeusage"
# Ex_geom = "shape"
#qntfyLinesBike(con, 'public.abz_network_bike_ind', "urmo.abz", "abz_id" , "the_geom", "osm.berlin_network_old", "bikeusage", "shape")
| /indicator/osmIndicators.R | no_license | ManuelKienast/SpatCharIndicatorTool | R | false | false | 10,814 | r | ## Function for qunatification of Line Types
## First calculating the total length of line type per Aggregation area, write those to table
## secondly calculation of ratio of line Type against all selected line Types
##
## Option for selection of which line types to calc or to choose ALL the lines
library(RPostgreSQL)
library(rgdal)
library(RODBC)
##
## setting helper functions
##
########## FUNCTION ##########
## writing intersection table
##
## IMPORTANT NOTICE.Personal check WHERE clause if switching between urmo and localhost
## Additionally the setting of the WHERE col LIKE hw_ is necessary for calc of all, not for calc of bike ratio yes|no
##
# ####### Debugguing: this is working for a reduced lineNetwork the Intersect table is written correctly
# createInterSecTable( connection, "_qntfyLine_lines_R_test",
# "urmo.abz", "abz_id", "the_geom",
# "_qntfyLine_lines_test", "osm_type", "shape")
#
# ### testing for the name_id column
# createInterSecTable( connection, "_qntfyLine_lines_R_test",
# "urmo.bz", "bz_name", "the_geom",
# "_qntfyLine_lines_test", "osm_type", "shape")
#
#
#
# ### testing for the whole lineNetwork; it itersected perfectly.
# createInterSecTable( connection, "_qntfyLine_lines_R_test_II",
# "urmo.bz", "bz_id", "the_geom",
# "osm.berlin_network", "osm_type", "shape")
createInterSecTable <- function( connection,
resultTable_name,
Agg_Area, id_column, Agg_geom,
Ex_Area, label_column, Ex_geom
)
{
intersectTable <- dbGetQuery(connection, sprintf(
"DROP TABLE IF EXISTS %s_InterSec;
SELECT * INTO %s_InterSec FROM
(SELECT
row_number() over (order by 1) as key,
Agg_Area.%s AS Agg_ID,
Ex_Area.%s AS LineType,
ST_Multi(ST_Intersection(Agg_Area.%s, ST_Transform(Ex_Area.%s, 25833)))::geometry(multiLineString, 25833) as geom
FROM
%s AS Agg_Area
LEFT JOIN %s AS Ex_Area
ON (ST_INTERSECTS(Agg_Area.%s, ST_Transform(Ex_Area.%s, 25833)))
WHERE
ST_isValid(Agg_Area.%s) = TRUE AND ST_isValid(ST_Transform(Ex_Area.%s, 25833)) = TRUE
) as foo;
ALTER TABLE %s_InterSec ADD PRIMARY KEY (key)
;"
,
resultTable_name, ## Drop
resultTable_name, ## SLECT INTO
id_column, ## Agg_Area -- column with the unique Agg_Area_ID e.g. PLR-id
label_column, ## label_column. -- column with linetype specification
Agg_geom, Ex_geom, ## ST_Multi -- geometry columns of both Agg and Ex objects
Agg_Area, ## FROM -- table containing the Aggreation Area geometries
Ex_Area, ## LEFT JOIN -- table containing the Examination Object geometries and information here: lineTypes
Agg_geom, Ex_geom, ## ON -- geometrie columns of both Agg and Ex objects
#label_column, "highway%", ## WHERE -- type of Line and query for highway in its description --> its an OSM-special
Agg_geom, Ex_geom, ## WHERE -- geometrie columns of both Agg and Ex objects
resultTable_name ## Add primary key
))
return(intersectTable)
}
#--Ex_Area.%s LIKE '%s' AND
#qntfyLines(con, Agg_Area, id_column, Agg_geom, Ex_Area, label_column, Ex_geom)
#
#
########## FUNCTION ##########
## getting the vector of dictinct variables from Agg_Area table
## reminder: switch WHERE linetype to 'highway%' for OSM data, otherwise no WHERE is needed
getVDist <- function (
connection,
resultTable_name
)
{
VDistdf <- dbGetQuery(connection, sprintf(
"SELECT DISTINCT Linetype
FROM %s_InterSec
;"
,
resultTable_name
))
VDist <- VDistdf[,1]
return(VDist)
}
## DEBUGGING - working
#vDist <- getVDist(con, resultTable_name = "bz_osm")
########## FUNCTION ##########
## writing results table
## create result table with Agg_Area_Id and its geom to select other results into
createResultTable <- function( connection,
resultTable_name, id_column,
Agg_geom, Agg_Area
)
{
dbGetQuery(connection, sprintf(
"DROP TABLE IF EXISTS %s;
SELECT
row_number() over (order by 1) as key,
%s AS Agg_Id,
%s AS geom
INTO %s
FROM %s AS Agg_Area
WHERE ST_isValid(Agg_Area.%s) = TRUE AND ST_isSimple(Agg_Area.%s) = TRUE
;
ALTER TABLE %s ADD PRIMARY KEY (key)
;"
,
resultTable_name, ## DROP If Exists
id_column, ## SELECT #1 -- column with the unique Agg_Area_ID e.g. PLR-id
Agg_geom, ## SELECT #2 -- geometrie columns of Agg_Area
resultTable_name,
Agg_Area, ## FROM -- table containing the Aggreation Area geometries
Agg_geom, Agg_geom,
resultTable_name ## WHERE -- geometry columns of Agg_Area
))
}
## DEBUGGING - working
#createResultTable( con, "bz_osm", "bz_id", "the_geom", "urmo.bz")
########## FUNCTION ##########
## updating a table (create and fill columns)
## containing the lengths of lines per aggregation Area
##
## TROUBLE cannot loop through the necessary VDIST renamed 2 way_whatever and the necessary way.whatever for selection
## of distinct values simultaneously, need to update the loop or else
##
updateTable <- function( connection,
vDist,
resultTable_name
)
{
dbGetQuery(connection, sprintf(
"ALTER TABLE %s DROP COLUMN IF EXISTS sum_%s
;
ALTER TABLE %s ADD COLUMN sum_%s FLOAT
;
UPDATE %s
SET sum_%s = foo.sum_%s
FROM (
SELECT
Agg_ID,
SUM(ST_Length(geom))/1000 AS sum_%s
FROM %s_InterSec
WHERE lineType = '%s'
GROUP BY Agg_ID
ORDER BY Agg_ID
) as foo
WHERE %s.Agg_ID = foo.Agg_ID
;"
,
resultTable_name, gsub('\\.','_',vDist), ## ALTER TABLE DROP
resultTable_name, gsub('\\.','_',vDist), ## ALTER TABLE ADD COl -- vector containing distinct values
resultTable_name, ## UPDATE -- vector containing distinct values
gsub('\\.','_',vDist), gsub('\\.','_',vDist), ## SET -- vector containing distinct values
gsub('\\.','_',vDist), ## SUM -- vector containing distinct values
resultTable_name, ## FROM -- vector containing distinct values
vDist, ## WHERE -1- Linetype
resultTable_name ## WHERE -2- resutlTable -- vector containing distinct values
))
}
## DEBUGGING - working
#for (i in vDist) {updateTable(connection, i, "bz_osm")}
########## FUNCTION ##########
## inserting the total length into Table results
## calc the total length of selected line types
sumLength <- function( connection,
vDist,
resultTable_name
)
{
sumLength <- dbGetQuery(connection, sprintf(
"UPDATE %s
SET sum_length = COALESCE(sum_length,0)+COALESCE(sum_%s,0) -- summation of all values listed in the V(Dist)
;"
,
resultTable_name,
vDist
))
}
## DEBUGGING - working
#addSumLengthCol <- dbGetQuery(connection, sprintf("ALTER TABLE %s DROP COLUMN IF EXISTS sum_length;
# ALTER TABLE %s ADD COLUMN sum_length FLOAT;", "bz_osm", "bz_osm"))
#for (i in vDistName) {sumLength(connection, i, "bz_osm")}
########## FUNCTION ##########
## setting Function for line quantification
ratioLines2Table <- function( connection,
resultTable_name,
vDist
)
{
calcRatios <- dbGetQuery(connection, sprintf(
"ALTER TABLE %s DROP COLUMN IF EXISTS ratio_%s
;
ALTER TABLE %s ADD COLUMN ratio_%s FLOAT
;
UPDATE %s
SET ratio_%s = sum_%s/sum_length
;"
,
resultTable_name, vDist, ## ALTER TABLE DROP COl -- vector containing distinct values
resultTable_name, vDist, ## ALTER TABLE ADD COL -- vector containing distinct values
resultTable_name, ## UPDATE
vDist, vDist ## SET -- vector containing distinct values
))
}
## DEBUGGING -
#for (i in vDistName) {ratioLines2Table(connection, "bz_osm", i)}
########## FUNCTION ##########
## the complete Function
calculateHighwayPercentages <- function( connection,
resultTable_name,
Agg_Area, id_column, in_geom_column,
label_column, mode, Ex_geom
)
{
createInterSecTable(connection, resultTable_name, Agg_Area, id_column, in_geom_column, label_column, mode, Ex_geom)
vDist <- getVDist(connection, resultTable_name)
if(NA %in% vDist){
vDist <- vDist[!is.na(vDist)]
}
vDistName <- gsub('\\.','_',vDist)
createResultTable(connection, resultTable_name, id_column, in_geom_column, Agg_Area)
for (i in vDist) {updateTable(connection, i, resultTable_name)}
addSumLengthCol <- dbGetQuery(connection, sprintf("ALTER TABLE %s DROP COLUMN IF EXISTS sum_length;
ALTER TABLE %s ADD COLUMN sum_length FLOAT;", resultTable_name, resultTable_name))
for (i in vDistName) {sumLength(connection, i, resultTable_name)}
for (i in vDistName) {ratioLines2Table(connection, resultTable_name, i)}
}
## DEBUGGING - with bikeusage YES|NO updated table
# qntfyLinesBike ( con, "a_test_qntyfyLines_Clean_bz_test",
# "urmo.bz", "bz_id", "the_geom",
# "osm.berlin_network", "bikeusage", "shape")
##qntfyLines <- function (connection,result_table_name,Agg_Area,id_column,Agg_geom, Ex_Area,label_column,Ex_geom)
#for bikes -->
#qntfyLinesBike(connection = con, result_table_name = "result_bike_hex_2000", Agg_Area = "grids.hex_2000", id_column = "gid", Agg_geom = "the_geom", Ex_Area = "osm.berlin_network", label_column = "bikeusage", Ex_geom = "shape")
# connection = con
# result_table_name = "result_bike_hex_2000"
# Agg_Area = "grids.hex_2000"
# id_column = "gid"
# Agg_geom = "the_geom"
# Ex_Area = "osm.berlin_network"
# label_column = "bikeusage"
# Ex_geom = "shape"
#qntfyLinesBike(con, 'public.abz_network_bike_ind', "urmo.abz", "abz_id" , "the_geom", "osm.berlin_network_old", "bikeusage", "shape")
|
RemovePreSuffix <-
function(texts,Context){
## No-Stem List: a list of words to not stem
notstemlist <- c('\u0627\u06CC\u0631\u0627\u0646', #Iran
'\u0645\u0634\u0631\u0648\u0637\u06CC\u062A', #Mashrutiat
'\u0639\u0645\u0631\u0627\u0646', #omrAn
'\u0628\u0631\u062C\u0627\u0645') #Barjam
arabicplurals <- list(c('\u062F\u0648\u0644','\u062F\u0648\u0644\u062A'), #doval,dolat
c('\u0645\u0646\u0627\u0628\u0639','\u0645\u0646\u0628\u0639'), #manabe,manba
c('\u0627\u0645\u0648\u0631','\u0627\u0645\u0631'), #omur,amr
c('\u0627\u0641\u0631\u0627\u062F','\u0641\u0631\u062F'), #afrad,fard
c('\u0631\u0648\u0627\u0628\u0637','\u0631\u0627\u0628\u0637\u0647'), #ravabet,rabeteh
c('\u0636\u0648\u0627\u0628\u0637','\u0636\u0627\u0628\u0637\u0647'), #zavabet,zabeteh
c('\u0634\u0631\u0627\u06CC\u0637','\u0634\u0631\u0637'), #sharayet,shart
c('\u0642\u0648\u0627\u0646\u06CC\u0646','\u0642\u0627\u0646\u0648\u0646'), #ghavanin,ghanun
c('\u062D\u0642\u0648\u0642','\u062D\u0642'), #hoghuh,hagh
c('\u062D\u062F\u0648\u062F','\u062D\u062F'), #hodud,had
c('\u0645\u0631\u0627\u06A9\u0632','\u0645\u0631\u06A9\u0632'), #marakez,markaz
c('\u0627\u062D\u0632\u0627\u0628','\u062D\u0632\u0628'), #ahzab,hezb
c('\u0627\u0647\u062F\u0627\u0641','\u0647\u062F\u0641'), #ahdaf,hadaf
c('\u0645\u062F\u0627\u0631\u06A9','\u0645\u062F\u0631\u06A9'), #madarek,madrak
c('\u0639\u0644\u0648\u0645','\u0639\u0644\u0645'), #olum,elm
c('\u0645\u0642\u0627\u0644\u0627\u062A','\u0645\u0642\u0627\u0644\u0647'), #maghalat,maghaleh
c('\u0648\u0638\u0627\u06CC\u0641','\u0648\u0638\u06CC\u0641\u0647'), #vazayef,vazifeh
c('\u0648\u0632\u0631\u0627','\u0648\u0632\u06CC\u0631'), #vozara,vazir
c('\u0645\u0635\u0627\u062F\u06CC\u0642','\u0645\u0635\u062F\u0627\u0642'), #masadigh,mesdagh
c('\u0645\u0631\u0627\u062C\u0639','\u0645\u0631\u062C\u0639'), #maraje,marja
c('\u0627\u0634\u062E\u0627\u0635','\u0634\u062E\u0635'), #ashkhas,shakhs
c('\u062E\u0635\u0627\u06CC\u0635','\u062E\u0635\u06CC\u0635\u0647'), #khasayes,khasiseh
c('\u0634\u0648\u0627\u0647\u062F','\u0634\u0627\u0647\u062F')) #shavahed,shahed
textsSplit <- strsplit(texts," ")[[1]]
if ("TRUE" %in% grepl("[0-9]$", textsSplit)){a <- textsSplit[-grep("[0-9]$", textsSplit)]}else{a <- textsSplit}
if(length(textsSplit) > 0){
for(i in 1:length(textsSplit)){
word <- textsSplit[i]
if(!(word %in% notstemlist) & !(word %in% unlist(arabicplurals)) & nchar(word)>2){
word0 <- strsplit(word,"")[[1]]
word1 <- fp1(word0);if((paste(word1,collapse="")) %in% c(notstemlist, unlist(arabicplurals))){
textsSplit[i] <- paste(word1,collapse=""); next}
word2 <- fs1(word1);if((paste(word2,collapse="")) %in% c(notstemlist, unlist(arabicplurals))){
textsSplit[i] <- paste(word2,collapse=""); next}
word3 <- fs2(word2);if((paste(word3,collapse="")) %in% c(notstemlist, unlist(arabicplurals))){
textsSplit[i] <- paste(word3,collapse=""); next}
word4 <- fs3(word3);if((paste(word4,collapse="")) %in% c(notstemlist, unlist(arabicplurals))){
textsSplit[i] <- paste(word4,collapse=""); next}
word5 <- fs4(word4);if((paste(word5,collapse="")) %in% c(notstemlist, unlist(arabicplurals))){
textsSplit[i] <- paste(word5,collapse=""); next}
word6 <- fs5(word5);if((paste(word6,collapse="")) %in% c(notstemlist, unlist(arabicplurals))){
textsSplit[i] <- paste(word6,collapse=""); next}
word7 <- fs6(word6);if((paste(word7,collapse="")) %in% c(notstemlist, unlist(arabicplurals))){
textsSplit[i] <- paste(word7,collapse=""); next}
word8 <- fs4(word7);if((paste(word8,collapse="")) %in% c(notstemlist, unlist(arabicplurals))){
textsSplit[i] <- paste(word8,collapse=""); next}
word9 <- fs5(word8);if((paste(word9,collapse="")) %in% c(notstemlist, unlist(arabicplurals))){
textsSplit[i] <- paste(word9,collapse=""); next}
word10 <- fs7(word9);if((paste(word10,collapse="")) %in% c(notstemlist, unlist(arabicplurals))){
textsSplit[i] <- paste(word10,collapse=""); next}
if(!Context){textsSplit[i] <- paste(word10,collapse="")}
if(Context){
stems <- unique(c(word,
paste(word1,collapse=""),
paste(word2,collapse=""),
paste(word3,collapse=""),
paste(word4,collapse=""),
paste(word5,collapse=""),
paste(word6,collapse=""),
paste(word7,collapse=""),
paste(word8,collapse=""),
paste(word9,collapse=""),
paste(word10,collapse="")
))
for (j in length(stems):1){
b <- strsplit(stems[j],"")[[1]]
if(b[length(b)] =="\u06AF"){
b[length(b)] <- "\u0647"
if(paste0(b,collapse="")%in%a){stems[j] <- paste0(b,collapse="")}
}
textsSplit[i] <- stems[j]
if(stems[j]%in%a){break}
}
}
}
}
}
texts <- paste(textsSplit,collapse=" ")
texts <- gsub("\u0030|\u0031|\u0032|\u0033|\u0034|\u0035","",texts)
texts <- trim(gsub(" {2,}"," ", texts))
return(texts)
}
| /R/RemovePreSuffix.R | no_license | cran/PersianStemmer | R | false | false | 5,891 | r | RemovePreSuffix <-
function(texts,Context){
## No-Stem List: a list of words to not stem
notstemlist <- c('\u0627\u06CC\u0631\u0627\u0646', #Iran
'\u0645\u0634\u0631\u0648\u0637\u06CC\u062A', #Mashrutiat
'\u0639\u0645\u0631\u0627\u0646', #omrAn
'\u0628\u0631\u062C\u0627\u0645') #Barjam
arabicplurals <- list(c('\u062F\u0648\u0644','\u062F\u0648\u0644\u062A'), #doval,dolat
c('\u0645\u0646\u0627\u0628\u0639','\u0645\u0646\u0628\u0639'), #manabe,manba
c('\u0627\u0645\u0648\u0631','\u0627\u0645\u0631'), #omur,amr
c('\u0627\u0641\u0631\u0627\u062F','\u0641\u0631\u062F'), #afrad,fard
c('\u0631\u0648\u0627\u0628\u0637','\u0631\u0627\u0628\u0637\u0647'), #ravabet,rabeteh
c('\u0636\u0648\u0627\u0628\u0637','\u0636\u0627\u0628\u0637\u0647'), #zavabet,zabeteh
c('\u0634\u0631\u0627\u06CC\u0637','\u0634\u0631\u0637'), #sharayet,shart
c('\u0642\u0648\u0627\u0646\u06CC\u0646','\u0642\u0627\u0646\u0648\u0646'), #ghavanin,ghanun
c('\u062D\u0642\u0648\u0642','\u062D\u0642'), #hoghuh,hagh
c('\u062D\u062F\u0648\u062F','\u062D\u062F'), #hodud,had
c('\u0645\u0631\u0627\u06A9\u0632','\u0645\u0631\u06A9\u0632'), #marakez,markaz
c('\u0627\u062D\u0632\u0627\u0628','\u062D\u0632\u0628'), #ahzab,hezb
c('\u0627\u0647\u062F\u0627\u0641','\u0647\u062F\u0641'), #ahdaf,hadaf
c('\u0645\u062F\u0627\u0631\u06A9','\u0645\u062F\u0631\u06A9'), #madarek,madrak
c('\u0639\u0644\u0648\u0645','\u0639\u0644\u0645'), #olum,elm
c('\u0645\u0642\u0627\u0644\u0627\u062A','\u0645\u0642\u0627\u0644\u0647'), #maghalat,maghaleh
c('\u0648\u0638\u0627\u06CC\u0641','\u0648\u0638\u06CC\u0641\u0647'), #vazayef,vazifeh
c('\u0648\u0632\u0631\u0627','\u0648\u0632\u06CC\u0631'), #vozara,vazir
c('\u0645\u0635\u0627\u062F\u06CC\u0642','\u0645\u0635\u062F\u0627\u0642'), #masadigh,mesdagh
c('\u0645\u0631\u0627\u062C\u0639','\u0645\u0631\u062C\u0639'), #maraje,marja
c('\u0627\u0634\u062E\u0627\u0635','\u0634\u062E\u0635'), #ashkhas,shakhs
c('\u062E\u0635\u0627\u06CC\u0635','\u062E\u0635\u06CC\u0635\u0647'), #khasayes,khasiseh
c('\u0634\u0648\u0627\u0647\u062F','\u0634\u0627\u0647\u062F')) #shavahed,shahed
textsSplit <- strsplit(texts," ")[[1]]
if ("TRUE" %in% grepl("[0-9]$", textsSplit)){a <- textsSplit[-grep("[0-9]$", textsSplit)]}else{a <- textsSplit}
if(length(textsSplit) > 0){
for(i in 1:length(textsSplit)){
word <- textsSplit[i]
if(!(word %in% notstemlist) & !(word %in% unlist(arabicplurals)) & nchar(word)>2){
word0 <- strsplit(word,"")[[1]]
word1 <- fp1(word0);if((paste(word1,collapse="")) %in% c(notstemlist, unlist(arabicplurals))){
textsSplit[i] <- paste(word1,collapse=""); next}
word2 <- fs1(word1);if((paste(word2,collapse="")) %in% c(notstemlist, unlist(arabicplurals))){
textsSplit[i] <- paste(word2,collapse=""); next}
word3 <- fs2(word2);if((paste(word3,collapse="")) %in% c(notstemlist, unlist(arabicplurals))){
textsSplit[i] <- paste(word3,collapse=""); next}
word4 <- fs3(word3);if((paste(word4,collapse="")) %in% c(notstemlist, unlist(arabicplurals))){
textsSplit[i] <- paste(word4,collapse=""); next}
word5 <- fs4(word4);if((paste(word5,collapse="")) %in% c(notstemlist, unlist(arabicplurals))){
textsSplit[i] <- paste(word5,collapse=""); next}
word6 <- fs5(word5);if((paste(word6,collapse="")) %in% c(notstemlist, unlist(arabicplurals))){
textsSplit[i] <- paste(word6,collapse=""); next}
word7 <- fs6(word6);if((paste(word7,collapse="")) %in% c(notstemlist, unlist(arabicplurals))){
textsSplit[i] <- paste(word7,collapse=""); next}
word8 <- fs4(word7);if((paste(word8,collapse="")) %in% c(notstemlist, unlist(arabicplurals))){
textsSplit[i] <- paste(word8,collapse=""); next}
word9 <- fs5(word8);if((paste(word9,collapse="")) %in% c(notstemlist, unlist(arabicplurals))){
textsSplit[i] <- paste(word9,collapse=""); next}
word10 <- fs7(word9);if((paste(word10,collapse="")) %in% c(notstemlist, unlist(arabicplurals))){
textsSplit[i] <- paste(word10,collapse=""); next}
if(!Context){textsSplit[i] <- paste(word10,collapse="")}
if(Context){
stems <- unique(c(word,
paste(word1,collapse=""),
paste(word2,collapse=""),
paste(word3,collapse=""),
paste(word4,collapse=""),
paste(word5,collapse=""),
paste(word6,collapse=""),
paste(word7,collapse=""),
paste(word8,collapse=""),
paste(word9,collapse=""),
paste(word10,collapse="")
))
for (j in length(stems):1){
b <- strsplit(stems[j],"")[[1]]
if(b[length(b)] =="\u06AF"){
b[length(b)] <- "\u0647"
if(paste0(b,collapse="")%in%a){stems[j] <- paste0(b,collapse="")}
}
textsSplit[i] <- stems[j]
if(stems[j]%in%a){break}
}
}
}
}
}
texts <- paste(textsSplit,collapse=" ")
texts <- gsub("\u0030|\u0031|\u0032|\u0033|\u0034|\u0035","",texts)
texts <- trim(gsub(" {2,}"," ", texts))
return(texts)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vision_core.R
\name{LabeledBBox}
\alias{LabeledBBox}
\title{LabeledBBox}
\usage{
LabeledBBox(...)
}
\arguments{
\item{items}{items}
}
\description{
Basic type for a list of bounding boxes in an image
}
| /man/LabeledBBox.Rd | permissive | ysnghr/fastai | R | false | true | 280 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vision_core.R
\name{LabeledBBox}
\alias{LabeledBBox}
\title{LabeledBBox}
\usage{
LabeledBBox(...)
}
\arguments{
\item{items}{items}
}
\description{
Basic type for a list of bounding boxes in an image
}
|
#This code processes and plots annual output from the ED2 model spinup period
#Jaclyn Hatala Matthes, 1/30/14
#jaclyn.hatala.matthes@gmail.com
#Load libraries
ok = require(chron,lib.loc="/usr4/spclpgm/jmatthes/"); if (! ok) stop("Package chron is not available...")
ok = require(ncdf,lib.loc="/usr4/spclpgm/jmatthes/") ; if (! ok) stop("Package ncdf is not available...")
ok = require(maps,lib.loc="/usr4/spclpgm/jmatthes/") ; if (! ok) stop("Package maps is not available...")
ok = require(sp,lib.loc="/usr4/spclpgm/jmatthes/") ; if (! ok) stop("Package raster is not available...")
ok = require(raster,lib.loc="/usr4/spclpgm/jmatthes/") ; if (! ok) stop("Package raster is not available...")
ok = require(colorspace,lib.loc="/usr4/spclpgm/jmatthes/") ; if (! ok) stop("Package raster is not available...")
#Set sites
sites <- c("PBL","PHA","PMB","PDL","PHO","PUN")
site.names <- c("Billy's Lake","Harvard Forest","Minden Bog","Demming Lake","Howland Forest","UNDERC")
#Names and colors for all PFTs, so it works for tropical and temperate.
pft.names = c("C4 grass" ,"Early tropical" ,"Mid tropical"
,"Late tropical" ,"Temperate C3 Grass","North Pine"
,"South Pine" ,"Late conifer" ,"Early hardwood"
,"Mid hardwood" ,"Late hardwood" ,"C3 crop"
,"C3 pasture" ,"C4 crop" ,"C4 pasture"
,"C3 grass" ,"Araucaria" ,"Total" )
pft.cols = c("gold" ,"chartreuse" ,"chartreuse4"
,"#004E00" ,"mediumpurple1" ,"deepskyblue"
,"mediumturquoise" ,"royalblue4" , "darkorange"
,"orangered" ,"firebrick4" , "purple4"
,"darkorchid1" ,"darkgoldenrod" , "khaki"
,"lightgoldenrod3" ,"steelblue3" , "grey22" )
n.pft = length(pft.names) - 1
pft <- c(5,6,8,9,10,11)
#for(s in 1:length(sites)){
s <- 2
#Set directories
dat.dir <- paste("/projectnb/cheas/paleon/ED_runs/p1a_spin_042214//",sites[s],"/analy/",sep="")
match.files <- grep("-Y-",list.files(dat.dir))
files <- list.files(dat.dir)
ann.files <- files[match.files] #yearly files only
#Get time window
yeara <- as.numeric(strsplit(ann.files,"-")[[1]][3]) #first year
yearz <- as.numeric(strsplit(ann.files,"-")[[length(ann.files)]][3]) #last year
# yearz <- 1940
agb.pft <- lai.pft <- bsa.pft <- dba.pft <- den.pft <- matrix(nrow=(yearz-yeara+1),ncol=length(pft))
for (y in yeara:yearz){
cat(" - Reading file :",ann.files[y-yeara+1],"...","\n")
now <- open.ncdf(paste(dat.dir,ann.files[y-yeara+1],sep=""))
#Grab cohort level variables.
ipft <- get.var.ncdf(now,'PFT')
dbh <- get.var.ncdf(now,'DBH')
nplant <- get.var.ncdf(now,'NPLANT')
lai <- get.var.ncdf(now,'LAI_CO')
agb <- get.var.ncdf(now,'AGB_CO')
bsa <- get.var.ncdf(now,'BA_CO')
dba_dt <- get.var.ncdf(now,'DBA_DT')
#if any PFTs go extinct, make placeholders for averaging
if(length(unique(ipft))<length(pft)){
tmp <- (length(pft)-length(unique(ipft)))
ipft <- c(ipft,pft[!(pft %in% ipft)])
agb <- c(agb,rep(0,tmp))
lai <- c(lai,rep(0,tmp))
bsa <- c(bsa,rep(0,tmp))
nplant <- c(nplant,rep(0,tmp))
dba_dt <- c(dba_dt,rep(0,tmp))
}
#PFT summaries
agb.pft[(y-yeara+1),] <- tapply(agb,ipft,sum)
lai.pft[(y-yeara+1),] <- tapply(lai,ipft,sum)
bsa.pft[(y-yeara+1),] <- tapply(bsa,ipft,sum)
den.pft[(y-yeara+1),] <- tapply(nplant,ipft,sum)
dba.pft[(y-yeara+1),] <- tapply(dba_dt,ipft,mean)
close.ncdf(now)
}
#}
years <- as.character((yeara:yearz)-1000)
year.date <- as.Date(years,"%Y")
# png(paste(plot.path,sites[s],'_AGBbyPFT','.png',sep=''),width=900,height=600)
# pdf(paste(plot.path,sites[s],"_spinup",sep=''))
plot(year.date,agb.pft[,1],col=pft.cols[5],pch=16,ylim=range(agb.pft),
xlab="spin-up date",ylab="Annual aboveground biomass [kg/m2]",
main=paste(site.names[s],": Spin-up",sep=""))
for(p in 2:ncol(agb.pft)){
points(year.date,agb.pft[,p],col=pft.cols[4+p],pch=16)
}
legend(year.date[2],max(agb.pft)-mean(agb.pft),pft.names[sort(unique(ipft))],col=pft.cols[5:10],pch=16)
plot(year.date,lai.pft[,1],col=pft.cols[5],pch=16,ylim=range(lai.pft),
xlab="spin-up date",ylab="Annual mean LAI [m2/m2]",
main=paste(site.names[s],": Spin-up",sep=""))
for(p in 2:ncol(lai.pft)){
points(year.date,lai.pft[,p],col=pft.cols[4+p],pch=16)
}
legend(year.date[2],max(lai.pft,na.rm=TRUE)-mean(lai.pft,na.rm=TRUE),pft.names[sort(unique(ipft))],col=pft.cols[5:10],pch=16)
plot(year.date,bsa.pft[,1],col=pft.cols[5],pch=16,ylim=range(bsa.pft),
xlab="spin-up date",ylab="Annual sum basal area [cm2/m2]",
main=paste(site.names[s],": Spin-up",sep=""))
for(p in 2:ncol(bsa.pft)){
points(year.date,bsa.pft[,p],col=pft.cols[4+p],pch=16)
}
legend(year.date[2],max(bsa.pft,na.rm=TRUE)-mean(bsa.pft,na.rm=TRUE),pft.names[sort(unique(ipft))],col=pft.cols[5:10],pch=16)
plot(year.date,den.pft[,1],col=pft.cols[5],pch=16,ylim=range(den.pft),
xlab="spin-up date",ylab="Annual sum of density [nplant/m2]",
main=paste(site.names[s],": Spin-up",sep=""))
for(p in 2:ncol(den.pft)){
points(year.date,den.pft[,p],col=pft.cols[4+p],pch=16)
}
legend(year.date[2],max(den.pft,na.rm=TRUE)-mean(den.pft,na.rm=TRUE),pft.names[sort(unique(ipft))],col=pft.cols[5:10],pch=16)
plot(year.date,dba.pft[,1],col=pft.cols[5],pch=16,ylim=c(-1,1),
xlab="spin-up date",ylab="Annual mean dba_dt",
main=paste(site.names[s],": Spin-up",sep=""))
for(p in 2:ncol(dba.pft)){
points(year.date,dba.pft[,p],col=pft.cols[4+p],pch=16)
}
legend(year.date[2],max(dba.pft,na.rm=TRUE)-mean(dba.pft,na.rm=TRUE),pft.names[sort(unique(ipft))],col=pft.cols[5:10],pch=16)
| /plot_spinup_quick.R | no_license | crollinson/ED_process_output | R | false | false | 6,056 | r | #This code processes and plots annual output from the ED2 model spinup period
#Jaclyn Hatala Matthes, 1/30/14
#jaclyn.hatala.matthes@gmail.com
#Load libraries
ok = require(chron,lib.loc="/usr4/spclpgm/jmatthes/"); if (! ok) stop("Package chron is not available...")
ok = require(ncdf,lib.loc="/usr4/spclpgm/jmatthes/") ; if (! ok) stop("Package ncdf is not available...")
ok = require(maps,lib.loc="/usr4/spclpgm/jmatthes/") ; if (! ok) stop("Package maps is not available...")
ok = require(sp,lib.loc="/usr4/spclpgm/jmatthes/") ; if (! ok) stop("Package raster is not available...")
ok = require(raster,lib.loc="/usr4/spclpgm/jmatthes/") ; if (! ok) stop("Package raster is not available...")
ok = require(colorspace,lib.loc="/usr4/spclpgm/jmatthes/") ; if (! ok) stop("Package raster is not available...")
#Set sites
sites <- c("PBL","PHA","PMB","PDL","PHO","PUN")
site.names <- c("Billy's Lake","Harvard Forest","Minden Bog","Demming Lake","Howland Forest","UNDERC")
#Names and colors for all PFTs, so it works for tropical and temperate.
pft.names = c("C4 grass" ,"Early tropical" ,"Mid tropical"
,"Late tropical" ,"Temperate C3 Grass","North Pine"
,"South Pine" ,"Late conifer" ,"Early hardwood"
,"Mid hardwood" ,"Late hardwood" ,"C3 crop"
,"C3 pasture" ,"C4 crop" ,"C4 pasture"
,"C3 grass" ,"Araucaria" ,"Total" )
pft.cols = c("gold" ,"chartreuse" ,"chartreuse4"
,"#004E00" ,"mediumpurple1" ,"deepskyblue"
,"mediumturquoise" ,"royalblue4" , "darkorange"
,"orangered" ,"firebrick4" , "purple4"
,"darkorchid1" ,"darkgoldenrod" , "khaki"
,"lightgoldenrod3" ,"steelblue3" , "grey22" )
n.pft = length(pft.names) - 1
pft <- c(5,6,8,9,10,11)
#for(s in 1:length(sites)){
s <- 2
#Set directories
dat.dir <- paste("/projectnb/cheas/paleon/ED_runs/p1a_spin_042214//",sites[s],"/analy/",sep="")
match.files <- grep("-Y-",list.files(dat.dir))
files <- list.files(dat.dir)
ann.files <- files[match.files] #yearly files only
#Get time window
yeara <- as.numeric(strsplit(ann.files,"-")[[1]][3]) #first year
yearz <- as.numeric(strsplit(ann.files,"-")[[length(ann.files)]][3]) #last year
# yearz <- 1940
agb.pft <- lai.pft <- bsa.pft <- dba.pft <- den.pft <- matrix(nrow=(yearz-yeara+1),ncol=length(pft))
for (y in yeara:yearz){
cat(" - Reading file :",ann.files[y-yeara+1],"...","\n")
now <- open.ncdf(paste(dat.dir,ann.files[y-yeara+1],sep=""))
#Grab cohort level variables.
ipft <- get.var.ncdf(now,'PFT')
dbh <- get.var.ncdf(now,'DBH')
nplant <- get.var.ncdf(now,'NPLANT')
lai <- get.var.ncdf(now,'LAI_CO')
agb <- get.var.ncdf(now,'AGB_CO')
bsa <- get.var.ncdf(now,'BA_CO')
dba_dt <- get.var.ncdf(now,'DBA_DT')
#if any PFTs go extinct, make placeholders for averaging
if(length(unique(ipft))<length(pft)){
tmp <- (length(pft)-length(unique(ipft)))
ipft <- c(ipft,pft[!(pft %in% ipft)])
agb <- c(agb,rep(0,tmp))
lai <- c(lai,rep(0,tmp))
bsa <- c(bsa,rep(0,tmp))
nplant <- c(nplant,rep(0,tmp))
dba_dt <- c(dba_dt,rep(0,tmp))
}
#PFT summaries
agb.pft[(y-yeara+1),] <- tapply(agb,ipft,sum)
lai.pft[(y-yeara+1),] <- tapply(lai,ipft,sum)
bsa.pft[(y-yeara+1),] <- tapply(bsa,ipft,sum)
den.pft[(y-yeara+1),] <- tapply(nplant,ipft,sum)
dba.pft[(y-yeara+1),] <- tapply(dba_dt,ipft,mean)
close.ncdf(now)
}
#}
years <- as.character((yeara:yearz)-1000)
year.date <- as.Date(years,"%Y")
# png(paste(plot.path,sites[s],'_AGBbyPFT','.png',sep=''),width=900,height=600)
# pdf(paste(plot.path,sites[s],"_spinup",sep=''))
plot(year.date,agb.pft[,1],col=pft.cols[5],pch=16,ylim=range(agb.pft),
xlab="spin-up date",ylab="Annual aboveground biomass [kg/m2]",
main=paste(site.names[s],": Spin-up",sep=""))
for(p in 2:ncol(agb.pft)){
points(year.date,agb.pft[,p],col=pft.cols[4+p],pch=16)
}
legend(year.date[2],max(agb.pft)-mean(agb.pft),pft.names[sort(unique(ipft))],col=pft.cols[5:10],pch=16)
plot(year.date,lai.pft[,1],col=pft.cols[5],pch=16,ylim=range(lai.pft),
xlab="spin-up date",ylab="Annual mean LAI [m2/m2]",
main=paste(site.names[s],": Spin-up",sep=""))
for(p in 2:ncol(lai.pft)){
points(year.date,lai.pft[,p],col=pft.cols[4+p],pch=16)
}
legend(year.date[2],max(lai.pft,na.rm=TRUE)-mean(lai.pft,na.rm=TRUE),pft.names[sort(unique(ipft))],col=pft.cols[5:10],pch=16)
plot(year.date,bsa.pft[,1],col=pft.cols[5],pch=16,ylim=range(bsa.pft),
xlab="spin-up date",ylab="Annual sum basal area [cm2/m2]",
main=paste(site.names[s],": Spin-up",sep=""))
for(p in 2:ncol(bsa.pft)){
points(year.date,bsa.pft[,p],col=pft.cols[4+p],pch=16)
}
legend(year.date[2],max(bsa.pft,na.rm=TRUE)-mean(bsa.pft,na.rm=TRUE),pft.names[sort(unique(ipft))],col=pft.cols[5:10],pch=16)
plot(year.date,den.pft[,1],col=pft.cols[5],pch=16,ylim=range(den.pft),
xlab="spin-up date",ylab="Annual sum of density [nplant/m2]",
main=paste(site.names[s],": Spin-up",sep=""))
for(p in 2:ncol(den.pft)){
points(year.date,den.pft[,p],col=pft.cols[4+p],pch=16)
}
legend(year.date[2],max(den.pft,na.rm=TRUE)-mean(den.pft,na.rm=TRUE),pft.names[sort(unique(ipft))],col=pft.cols[5:10],pch=16)
plot(year.date,dba.pft[,1],col=pft.cols[5],pch=16,ylim=c(-1,1),
xlab="spin-up date",ylab="Annual mean dba_dt",
main=paste(site.names[s],": Spin-up",sep=""))
for(p in 2:ncol(dba.pft)){
points(year.date,dba.pft[,p],col=pft.cols[4+p],pch=16)
}
legend(year.date[2],max(dba.pft,na.rm=TRUE)-mean(dba.pft,na.rm=TRUE),pft.names[sort(unique(ipft))],col=pft.cols[5:10],pch=16)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Max.Profit.PB.R
\name{Max.Profit.PB}
\alias{Max.Profit.PB}
\title{Find maximum profit for Pure Bundling strategy}
\usage{
Max.Profit.PB(r1.r2, pb.min.max, c.1, c.2, alfa, beta, teta, FC)
}
\arguments{
\item{r1.r2}{NX2 reservation prices of two goods []}
\item{pb.min.max}{Vector}
\item{c.1}{good 1 parameter of production cost}
\item{c.2}{good 1 parameter of production cost}
\item{alfa}{parameter of scale economics alfa = 0 --> CRS, alfa < 0 --> IRS, alfa < 0 --> DRS}
\item{beta}{parameter of sope economics beta = 0 --> neutral, beta > 0 complementary, beta < 0 substitution}
\item{teta}{parameter of complementary and substitution of goods beta = 0 --> neutral, beta > 0 complementary, beta < 0 substitution}
\item{FC}{fixed Cost of production}
}
\value{
max.profit
}
\description{
Find maximum profit for Pure Bundling strategy
}
| /man/Max.Profit.PB.Rd | no_license | tomvar/bundling | R | false | true | 922 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Max.Profit.PB.R
\name{Max.Profit.PB}
\alias{Max.Profit.PB}
\title{Find maximum profit for Pure Bundling strategy}
\usage{
Max.Profit.PB(r1.r2, pb.min.max, c.1, c.2, alfa, beta, teta, FC)
}
\arguments{
\item{r1.r2}{NX2 reservation prices of two goods []}
\item{pb.min.max}{Vector}
\item{c.1}{good 1 parameter of production cost}
\item{c.2}{good 1 parameter of production cost}
\item{alfa}{parameter of scale economics alfa = 0 --> CRS, alfa < 0 --> IRS, alfa < 0 --> DRS}
\item{beta}{parameter of sope economics beta = 0 --> neutral, beta > 0 complementary, beta < 0 substitution}
\item{teta}{parameter of complementary and substitution of goods beta = 0 --> neutral, beta > 0 complementary, beta < 0 substitution}
\item{FC}{fixed Cost of production}
}
\value{
max.profit
}
\description{
Find maximum profit for Pure Bundling strategy
}
|
\alias{gtkUIManagerAddUiFromString}
\name{gtkUIManagerAddUiFromString}
\title{gtkUIManagerAddUiFromString}
\description{Parses a string containing a UI definition and
merges it with the current contents of \code{self}. An enclosing <ui>
element is added if it is missing.}
\usage{gtkUIManagerAddUiFromString(object, buffer, length = -1, .errwarn = TRUE)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkUIManager}} object}
\item{\verb{buffer}}{the string to parse}
\item{\verb{length}}{the length of \code{buffer} (may be -1 if \code{buffer} is nul-terminated)}
\item{.errwarn}{Whether to issue a warning on error or fail silently}
}
\details{Since 2.4}
\value{
A list containing the following elements:
\item{retval}{[numeric] The merge id for the merged UI. The merge id can be used
to unmerge the UI with \code{\link{gtkUIManagerRemoveUi}}. If an error occurred,
the return value is 0.}
\item{\verb{error}}{return location for an error}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /RGtk2/man/gtkUIManagerAddUiFromString.Rd | no_license | lawremi/RGtk2 | R | false | false | 1,017 | rd | \alias{gtkUIManagerAddUiFromString}
\name{gtkUIManagerAddUiFromString}
\title{gtkUIManagerAddUiFromString}
\description{Parses a string containing a UI definition and
merges it with the current contents of \code{self}. An enclosing <ui>
element is added if it is missing.}
\usage{gtkUIManagerAddUiFromString(object, buffer, length = -1, .errwarn = TRUE)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkUIManager}} object}
\item{\verb{buffer}}{the string to parse}
\item{\verb{length}}{the length of \code{buffer} (may be -1 if \code{buffer} is nul-terminated)}
\item{.errwarn}{Whether to issue a warning on error or fail silently}
}
\details{Since 2.4}
\value{
A list containing the following elements:
\item{retval}{[numeric] The merge id for the merged UI. The merge id can be used
to unmerge the UI with \code{\link{gtkUIManagerRemoveUi}}. If an error occurred,
the return value is 0.}
\item{\verb{error}}{return location for an error}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
###Load SQLDF Library
library(sqldf)
####Set working directory to where original data file is
setwd("~/IST/Dropbox/Dropbox/Minhas/Coursera/Data Analysis/Project 1")
myfile<-"household_power_consumption.txt"
####Read the necessary days
mySub<-read.csv.sql(myfile, sep=";",sql = 'select * from file where Date = "1/2/2007" OR Date = "2/2/2007"')
####Create plot
hist(mySub$Global_active_power,col="red",main="Global Active Power",xlab="Global Active Power (killowatts)")
####Save to PNG
dev.copy(png, file = "plot1.png")
dev.off()
| /plot1.r | no_license | mi-prata/ExData_Plotting1 | R | false | false | 535 | r | ###Load SQLDF Library
library(sqldf)
####Set working directory to where original data file is
setwd("~/IST/Dropbox/Dropbox/Minhas/Coursera/Data Analysis/Project 1")
myfile<-"household_power_consumption.txt"
####Read the necessary days
mySub<-read.csv.sql(myfile, sep=";",sql = 'select * from file where Date = "1/2/2007" OR Date = "2/2/2007"')
####Create plot
hist(mySub$Global_active_power,col="red",main="Global Active Power",xlab="Global Active Power (killowatts)")
####Save to PNG
dev.copy(png, file = "plot1.png")
dev.off()
|
library(tidyverse)
library(brms)
path <- '/gpfs1/data/idiv_chase/emmala/NutNet'
plot <- read.csv(paste0(path, '/plot.csv'), header=T,fill=TRUE,sep=",",na.strings=c(""," ","NA","NA ","na"))
plot$site_code<-as.factor(plot$site_code)
plot$block<-as.factor(plot$block)
plot$plot<-as.factor(plot$plot)
plot <- plot %>% group_by(site_code) %>% filter(year_max >= 3) %>%
ungroup()
rich.3 <- brm(rich ~ trt * year_trt + (trt * year_trt | site_code/block/plot),
data = plot, cores = 4, chains = 4,
iter=5000, warmup = 1000,
prior = c(
prior(normal(8,5), class = Intercept),
prior(normal(5,1), class = b, coef = trtNPK),
prior(normal(0,1), class = b, coef = year_trt),
prior(normal(0,1), class = b, coef = trtNPK:year_trt),
prior(normal(0,1), class = sd),
prior(normal(0,1), class = sigma)),
control = list(adapt_delta = 0.99)
)
save(rich.3,
file=Sys.getenv('OFILE'))
| /cluster/univariate_models/rich.R | no_license | emma-ladouceur/NutNet-CAFE | R | false | false | 1,062 | r |
library(tidyverse)
library(brms)
path <- '/gpfs1/data/idiv_chase/emmala/NutNet'
plot <- read.csv(paste0(path, '/plot.csv'), header=T,fill=TRUE,sep=",",na.strings=c(""," ","NA","NA ","na"))
plot$site_code<-as.factor(plot$site_code)
plot$block<-as.factor(plot$block)
plot$plot<-as.factor(plot$plot)
plot <- plot %>% group_by(site_code) %>% filter(year_max >= 3) %>%
ungroup()
rich.3 <- brm(rich ~ trt * year_trt + (trt * year_trt | site_code/block/plot),
data = plot, cores = 4, chains = 4,
iter=5000, warmup = 1000,
prior = c(
prior(normal(8,5), class = Intercept),
prior(normal(5,1), class = b, coef = trtNPK),
prior(normal(0,1), class = b, coef = year_trt),
prior(normal(0,1), class = b, coef = trtNPK:year_trt),
prior(normal(0,1), class = sd),
prior(normal(0,1), class = sigma)),
control = list(adapt_delta = 0.99)
)
save(rich.3,
file=Sys.getenv('OFILE'))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.